Fix radiusd network connection limitations (#17049)

* Allow exposing access to the underlying container

This exposes the Container response from the Docker API, allowing
consumers of the testhelper to interact with the newly started running
container instance. This will be useful for two reasons:

 1. Allowing radiusd container to start its own daemon after modifying
    its configuration.
 2. For loading certificates into a future similar integration test
    using the PKI secrets engine.

Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>

* Allow any client to connect to test radiusd daemon

This fixes test failures of the following form:

> 2022-09-07T10:46:19.332-0400 [TRACE] core: adding local paths: paths=[]
> 2022-09-07T10:46:19.333-0400 [INFO]  core: enabled credential backend: path=mnt/ type=test
> 2022-09-07T10:46:19.334-0400 [WARN]  Executing test step: step_number=1
> 2022-09-07T10:46:19.334-0400 [WARN]  Executing test step: step_number=2
> 2022-09-07T10:46:29.334-0400 [WARN]  Executing test step: step_number=3
> 2022-09-07T10:46:29.335-0400 [WARN]  Executing test step: step_number=4
> 2022-09-07T10:46:39.336-0400 [WARN]  Requesting RollbackOperation
> --- FAIL: TestBackend_acceptance (28.56s)
>     testing.go:364: Failed step 4: erroneous response:
>
>         &logical.Response{Secret:<nil>, Auth:<nil>, Data:map[string]interface {}{"error":"context deadline exceeded"}, Redirect:"", Warnings:[]string(nil), WrapInfo:(*wrapping.ResponseWrapInfo)(nil), Headers:map[string][]string(nil)}
> FAIL
> FAIL	github.com/hashicorp/vault/builtin/credential/radius	29.238s

In particular, radiusd container ships with a default clients.conf which
restricts connections to ranges associated with the Docker daemon. When
creating new networks (such as in CircleCI) or when running via Podman
(which has its own set of network ranges), this initial config will no
longer be applicable. We thus need to write a new config into the image;
while we could do this by rebuilding a new image on top of the existing
layers (provisioning our config), we then need to manage these changes
and give hooks for the service setup to build it.

Thus, post-startup modification is probably easier to execute in our
case.

Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>

Signed-off-by: Alexander Scheel <alex.scheel@hashicorp.com>
This commit is contained in:
Alexander Scheel
2022-09-07 13:43:22 -04:00
committed by GitHub
parent 3079b45e6b
commit e3fd538b1a
2 changed files with 86 additions and 9 deletions

View File

@@ -3,6 +3,7 @@ package radius
import (
"context"
"fmt"
"io"
"os"
"reflect"
"strconv"
@@ -13,6 +14,8 @@ import (
"github.com/hashicorp/vault/helper/testhelpers/docker"
logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical"
"github.com/hashicorp/vault/sdk/logical"
"github.com/docker/docker/api/types"
)
const (
@@ -32,27 +35,94 @@ func prepareRadiusTestContainer(t *testing.T) (func(), string, int) {
return func() {}, os.Getenv(envRadiusRadiusHost), port
}
radiusdOptions := []string{"radiusd", "-f", "-l", "stdout", "-X"}
runner, err := docker.NewServiceRunner(docker.RunOptions{
ImageRepo: "jumanjiman/radiusd",
ImageTag: "latest",
ContainerName: "radiusd",
Cmd: []string{"-f", "-l", "stdout"},
Ports: []string{"1812/udp"},
// Switch the entry point for this operation; we want to sleep
// instead of exec'ing radiusd, as we first need to write a new
// client configuration. radiusd's SIGHUP handler does not reload
// this config file, hence we choose to manually start radiusd
// below.
Entrypoint: []string{"sleep", "3600"},
Ports: []string{"1812/udp"},
LogConsumer: func(s string) {
if t.Failed() {
t.Logf("container logs: %s", s)
}
},
})
if err != nil {
t.Fatalf("Could not start docker radiusd: %s", err)
}
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
// There's no straightfoward way to check the state, but the server starts
// up quick so a 2 second sleep should be enough.
time.Sleep(2 * time.Second)
return docker.NewServiceHostPort(host, port), nil
})
if err != nil {
t.Fatalf("Could not start docker radiusd: %s", err)
}
// Now allow any client to connect to this radiusd instance by writing our
// own clients.conf file.
//
// This is necessary because we lack control over the container's network
// IPs. We might be running in Circle CI (with variable IPs per new
// network) or in Podman (which uses an entirely different set of default
// ranges than Docker).
//
// See also: https://freeradius.org/radiusd/man/clients.conf.html
ctx := context.Background()
clientsConfig := `client 0.0.0.0/1 {
ipaddr = 0.0.0.0/1
secret = testing123
shortname = all-clients-first
}
client 128.0.0.0/1 {
ipaddr = 128.0.0.0/1
secret = testing123
shortname = all-clients-second
}`
ret, err := runner.DockerAPI.ContainerExecCreate(ctx, svc.Container.ID, types.ExecConfig{
User: "0",
AttachStderr: true,
AttachStdout: true,
// Hack: write this via echo, since it exists in the container.
Cmd: []string{"sh", "-c", "echo '" + clientsConfig + "' > /etc/raddb/clients.conf"},
})
if err != nil {
t.Fatalf("Failed to update radiusd client config: error creating command: %v", err)
}
resp, err := runner.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{})
if err != nil {
t.Fatalf("Failed to update radiusd client config: error attaching command: %v", err)
}
read, err := io.ReadAll(resp.Reader)
t.Logf("Command Output (%v):\n%v", err, string(read))
ret, err = runner.DockerAPI.ContainerExecCreate(ctx, svc.Container.ID, types.ExecConfig{
User: "0",
AttachStderr: true,
AttachStdout: true,
// As noted above, we need to start radiusd manually now.
Cmd: radiusdOptions,
})
if err != nil {
t.Fatalf("Failed to start radiusd service: error creating command: %v", err)
}
err = runner.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{})
if err != nil {
t.Fatalf("Failed to start radiusd service: error starting command: %v", err)
}
// Give radiusd time to start...
//
// There's no straightfoward way to check the state, but the server starts
// up quick so a 2 second sleep should be enough.
time.Sleep(2 * time.Second)
pieces := strings.Split(svc.Config.Address(), ":")
port, _ := strconv.Atoi(pieces[1])
return svc.Cleanup, pieces[0], port

View File

@@ -17,6 +17,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/go-connections/nat"
@@ -33,6 +34,7 @@ type RunOptions struct {
ImageTag string
ContainerName string
Cmd []string
Entrypoint []string
Env []string
NetworkID string
CopyFromTo map[string]string
@@ -193,14 +195,16 @@ func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Ser
}
return &Service{
Config: config,
Cleanup: cleanup,
Config: config,
Cleanup: cleanup,
Container: container,
}, nil
}
type Service struct {
Config ServiceConfig
Cleanup func()
Config ServiceConfig
Cleanup func()
Container *types.ContainerJSON
}
func (d *Runner) Start(ctx context.Context) (*types.ContainerJSON, []string, error) {
@@ -222,6 +226,9 @@ func (d *Runner) Start(ctx context.Context) (*types.ContainerJSON, []string, err
cfg.ExposedPorts[nat.Port(p)] = struct{}{}
}
}
if len(d.RunOptions.Entrypoint) > 0 {
cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint)
}
hostConfig := &container.HostConfig{
AutoRemove: !d.RunOptions.DoNotAutoRemove,