mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-10-30 02:02:43 +00:00
Chroot Listener (#22304)
* Initial oss-patch apply * Added changelog * Renamed changelog txt * Added the imports to the handler file * Added a check that no two ports are the same, and modified changelog * Edited go sum entry * Tidy up using go mod * Use strutil instead * Revert go sum and go mod * Revert sdk go sum * Edited go.sum to before * Edited go.sum again to initial * Revert changes
This commit is contained in:
@@ -30,6 +30,7 @@ require (
|
||||
github.com/hashicorp/go-secure-stdlib/password v0.1.1
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2
|
||||
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2
|
||||
github.com/hashicorp/go-set v0.1.13
|
||||
github.com/hashicorp/go-sockaddr v1.0.2
|
||||
github.com/hashicorp/go-uuid v1.0.3
|
||||
github.com/hashicorp/go-version v1.6.0
|
||||
|
||||
@@ -119,6 +119,8 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
||||
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY=
|
||||
github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs=
|
||||
github.com/hashicorp/go-set v0.1.13 h1:k1B5goY3c7OKEzpK+gwAhJexxzAJwDN8kId8YvWrihA=
|
||||
github.com/hashicorp/go-set v0.1.13/go.mod h1:0/D+R4MFUzJ6XmvjU7liXtznF1eQDxh84GJlhXw+lvo=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
@@ -231,6 +233,7 @@ github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/hashicorp/vault/api"
|
||||
dockhelper "github.com/hashicorp/vault/sdk/helper/docker"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/helper/strutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
uberAtomic "go.uber.org/atomic"
|
||||
"golang.org/x/net/http2"
|
||||
@@ -479,6 +480,7 @@ type DockerClusterNode struct {
|
||||
ImageTag string
|
||||
DataVolumeName string
|
||||
cleanupVolume func()
|
||||
AllClients []*api.Client
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) TLSConfig() *tls.Config {
|
||||
@@ -506,6 +508,30 @@ func (n *DockerClusterNode) APIClient() *api.Client {
|
||||
return client
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) APIClientN(listenerNumber int) (*api.Client, error) {
|
||||
// We clone to ensure that whenever this method is called, the caller gets
|
||||
// back a pristine client, without e.g. any namespace or token changes that
|
||||
// might pollute a shared client. We clone the config instead of the
|
||||
// client because (1) Client.clone propagates the replicationStateStore and
|
||||
// the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and
|
||||
// (3) if clone returns an error, it doesn't feel as appropriate to panic
|
||||
// below. Who knows why clone might return an error?
|
||||
if listenerNumber >= len(n.AllClients) {
|
||||
return nil, fmt.Errorf("invalid listener number %d", listenerNumber)
|
||||
}
|
||||
cfg := n.AllClients[listenerNumber].CloneConfig()
|
||||
client, err := api.NewClient(cfg)
|
||||
if err != nil {
|
||||
// It seems fine to panic here, since this should be the same input
|
||||
// we provided to NewClient when we were setup, and we didn't panic then.
|
||||
// Better not to completely ignore the error though, suppose there's a
|
||||
// bug in CloneConfig?
|
||||
panic(fmt.Sprintf("NewClient error on cloned config: %v", err))
|
||||
}
|
||||
client.SetToken(n.Cluster.rootToken)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewAPIClient creates and configures a Vault API client to communicate with
|
||||
// the running Vault Cluster for this DockerClusterNode
|
||||
func (n *DockerClusterNode) apiConfig() (*api.Config, error) {
|
||||
@@ -544,6 +570,20 @@ func (n *DockerClusterNode) newAPIClient() (*api.Client, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) newAPIClientForAddress(address string) (*api.Client, error) {
|
||||
config, err := n.apiConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Address = fmt.Sprintf("https://%s", address)
|
||||
client, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetToken(n.Cluster.GetRootToken())
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Cleanup kills the container of the node and deletes its data volume
|
||||
func (n *DockerClusterNode) Cleanup() {
|
||||
n.cleanup()
|
||||
@@ -563,6 +603,17 @@ func (n *DockerClusterNode) cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) createDefaultListenerConfig() map[string]interface{} {
|
||||
return map[string]interface{}{"tcp": map[string]interface{}{
|
||||
"address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200),
|
||||
"tls_cert_file": "/vault/config/cert.pem",
|
||||
"tls_key_file": "/vault/config/key.pem",
|
||||
"telemetry": map[string]interface{}{
|
||||
"unauthenticated_metrics_access": true,
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error {
|
||||
if n.DataVolumeName == "" {
|
||||
vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{})
|
||||
@@ -575,16 +626,25 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
}
|
||||
}
|
||||
vaultCfg := map[string]interface{}{}
|
||||
vaultCfg["listener"] = map[string]interface{}{
|
||||
"tcp": map[string]interface{}{
|
||||
"address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200),
|
||||
"tls_cert_file": "/vault/config/cert.pem",
|
||||
"tls_key_file": "/vault/config/key.pem",
|
||||
"telemetry": map[string]interface{}{
|
||||
"unauthenticated_metrics_access": true,
|
||||
},
|
||||
},
|
||||
var listenerConfig []map[string]interface{}
|
||||
listenerConfig = append(listenerConfig, n.createDefaultListenerConfig())
|
||||
ports := []string{"8200/tcp", "8201/tcp"}
|
||||
|
||||
if opts.VaultNodeConfig != nil && opts.VaultNodeConfig.AdditionalListeners != nil {
|
||||
for _, config := range opts.VaultNodeConfig.AdditionalListeners {
|
||||
cfg := n.createDefaultListenerConfig()
|
||||
listener := cfg["tcp"].(map[string]interface{})
|
||||
listener["address"] = fmt.Sprintf("%s:%d", "0.0.0.0", config.Port)
|
||||
listener["chroot_namespace"] = config.ChrootNamespace
|
||||
listenerConfig = append(listenerConfig, cfg)
|
||||
portStr := fmt.Sprintf("%d/tcp", config.Port)
|
||||
if strutil.StrListContains(ports, portStr) {
|
||||
return fmt.Errorf("duplicate port %d specified", config.Port)
|
||||
}
|
||||
ports = append(ports, portStr)
|
||||
}
|
||||
}
|
||||
vaultCfg["listener"] = listenerConfig
|
||||
vaultCfg["telemetry"] = map[string]interface{}{
|
||||
"disable_hostname": true,
|
||||
}
|
||||
@@ -675,6 +735,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
}
|
||||
testcluster.JSONLogNoTimestamp(n.Logger, s)
|
||||
}}
|
||||
|
||||
r, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{
|
||||
ImageRepo: n.ImageRepo,
|
||||
ImageTag: n.ImageTag,
|
||||
@@ -689,7 +750,7 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
"VAULT_LOG_FORMAT=json",
|
||||
"VAULT_LICENSE=" + opts.VaultLicense,
|
||||
},
|
||||
Ports: []string{"8200/tcp", "8201/tcp"},
|
||||
Ports: ports,
|
||||
ContainerName: n.Name(),
|
||||
NetworkName: opts.NetworkName,
|
||||
CopyFromTo: copyFromTo,
|
||||
@@ -772,6 +833,19 @@ func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOption
|
||||
}
|
||||
client.SetToken(n.Cluster.rootToken)
|
||||
n.client = client
|
||||
|
||||
n.AllClients = append(n.AllClients, client)
|
||||
|
||||
for _, addr := range svc.StartResult.Addrs[2:] {
|
||||
// The second element of this list of addresses is the cluster address
|
||||
// We do not want to create a client for the cluster address mapping
|
||||
client, err := n.newAPIClientForAddress(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.SetToken(n.Cluster.rootToken)
|
||||
n.AllClients = append(n.AllClients, client)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,8 @@ type VaultNodeConfig struct {
|
||||
// ServiceRegistrationType string
|
||||
// ServiceRegistrationOptions map[string]string
|
||||
|
||||
StorageOptions map[string]string
|
||||
StorageOptions map[string]string
|
||||
AdditionalListeners []VaultNodeListenerConfig
|
||||
|
||||
DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"`
|
||||
LogFormat string `json:"log_format"`
|
||||
@@ -102,6 +103,11 @@ type ClusterOptions struct {
|
||||
AdministrativeNamespacePath string
|
||||
}
|
||||
|
||||
type VaultNodeListenerConfig struct {
|
||||
Port int
|
||||
ChrootNamespace string
|
||||
}
|
||||
|
||||
type CA struct {
|
||||
CACert *x509.Certificate
|
||||
CACertBytes []byte
|
||||
|
||||
Reference in New Issue
Block a user