refactor: tidy up docker-compose.yml (#10334)

Our `docker-compose.yml` file has grown to a degree where it is almost
unmanageable. Docker compose offers several tools to deal with complex
compose setups, like include files and yaml anchors.

We refactor our setup using these tools to organise these services and
their configuration a bit better.
This commit is contained in:
Thomas Eizinger
2025-09-15 03:37:39 +00:00
committed by GitHub
parent a66a18782e
commit e2dce710f1
6 changed files with 361 additions and 495 deletions

View File

@@ -0,0 +1,92 @@
# requires docker compose plugin (=v2)
#
# wget -q --no-cache -O - https://github.com/siemens/edgeshark/raw/main/deployments/wget/docker-compose.yaml | docker compose -f - up
name: "edgeshark"
services:
gostwire:
image: "ghcr.io/siemens/ghostwire"
restart: "unless-stopped"
read_only: true
entrypoint:
- "/gostwire"
- "--http=[::]:5000"
- "--brand=Edgeshark"
- "--brandicon=PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCA2LjM1IDYuMzUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggZD0iTTUuNzMgNC41M2EuNC40IDAgMDEtLjA2LS4xMmwtLjAyLS4wNC0uMDMuMDNjLS4wNi4xMS0uMDcuMTItLjEuMS0uMDEtLjAyIDAtLjAzLjAzLS4xbC4wNi0uMS4wNC0uMDVjLjAzIDAgLjA0LjAxLjA2LjA5bC4wNC4xMmMuMDMuMDUuMDMuMDcgMCAuMDhoLS4wMnptLS43NS0uMDZjLS4wMi0uMDEtLjAxLS4wMy4wMS0uMDUuMDMtLjAyLjA1LS4wNi4wOC0uMTQuMDMtLjA4LjA1LS4xLjA4LS4wN2wuMDIuMDdjLjAyLjEuMDMuMTIuMDUuMTMuMDUuMDIuMDQuMDctLjAxLjA3LS4wNCAwLS4wNi0uMDMtLjA5LS4xMiAwLS4wMi0uMDEgMC0uMDQuMDRhLjM2LjM2IDAgMDEtLjA0LjA3Yy0uMDIuMDItLjA1LjAyLS4wNiAwem0tLjQ4LS4wM2MtLjAxLS4wMSAwLS4wMy4wMS0uMDZsLjA3LS4xM2MuMDYtLjEuMDctLjEzLjEtLjEyLjAyLjAyLjAzLjA0LjA0LjEuMDEuMDguMDIuMTEuMDQuMTR2LjA0Yy0uMDEuMDItLjA0LjAyLS4wNiAwbC0uMDMtLjEtLjAxLS4wNS0uMDYuMDlhLjQ2LjQ2IDAgMDEtLjA1LjA4Yy0uMDIuMDItLjA0LjAyLS4wNSAwem0uMDgtLjc1YS4wNS4wNSAwIDAxLS4wMS0uMDRMNC41IDMuNWwtLjAyLS4wNGguMDNjLjAzLS4wMi4wMy0uMDEuMDguMDdsLjAzLjA1LjA3LS4xM2MwLS4wMyAwLS4wMy4wMi0uMDQuMDMgMCAuMDQgMCAuMDQuMDJhLjYuNiAwIDAxLS4xMi4yNmMtLjAzLjAyLS4wMy4wMi0uMDUgMHptLjU0LS4xYy0uMDEtLjAyLS4wMi0uMDMtLjAyLS4wOGEuNDQuNDQgMCAwMC0uMDMtLjA4Yy0uMDItLjA1LS4wMi0uMDggMC0uMDguMDUtLjAxLjA2IDAgLjA2LjA0bC4wMy4wOGMuMDIgMCAuMDYtLjA4LjA3LS4xMmwuMDEtLjAyLjA2LS4wMS0uMTIuMjZoLS4wNnptLjQ3LS4wOGwtLjAyLS4wOWEuNTUuNTUgMCAwMC0uMDItLjEyYzAtLjAyIDAtLjAyLjAyLS4wMmguMDNsLjAyLjExdi4wM2MuMDEgMCAuMDQtLjAzLjA2LS4wN2wuMDUtLjA5Yy4wMi0uMDIuMDYtLjAyLjA2IDBsLS4xNC4yNWMtLjAxLjAxLS4wNS4wMi0uMDYgMHptLS43Ni0uNDFjLS4wNi0uMDMtLjA4LS4xNC0uMDUtLjJsLjA0LS4wM2MuMDMtLjAyLjAzLS4wMi4wNy0uMDIuMDYgMCAuMDkuMDEuMTIuMDUuMDMuMDUuMDIuMTItLjAzLjE2LS4wNS4wNS0uMS4wNi0uMTUuMDR6bS4zNS0uMDVBLjEzLjEzIDAgMDE1LjE0IDNsLS4wMS0uMDVjMC0uMDQgMC0uMDUuMDYtLjFsLjA2LS4wMmMuMDcgMCAuMTEuMDUuMS4xMmwtLjAxLjA2Yy0uMDQuMDQtLjEyLjA1LS4xNi4wM3oiLz48cGF0aCBkPSJNMy44NCA1LjQ4Yy0uMTctLjIxLS4wNC0uNS0uMDYtLjc0LjA1LS44My4xLTEuNjYuMDctMi41LjE3LS4xNC40NC4wOC41OS0uMDItLjIyLS4xMy0uNTQtLjEzLS44LS4xNC0uMTQuMDktLjUuMDItLjUuMTcuMi4wOC41My0uMTUuNjQuMDItLjAxLjgxLS4wMyAxLjYyLS4xIDIuNDItLjA1LjIzLjA3LjU2LS4xNC43MmE5LjUxIDkuNTEgMCAwMS0uNjYtLjUzYy4xMy0uMDQuMzItLjQuMS0uMi0uMjEuMjItLjUxLjI3LS43OS4zNS0uMTIuMDgtLjU0LjEyLS4zMi0uMTEuMi0uMjIuNDUtLjQyLjUtLjcyLS4xMy0uMTItLjEuMy0uMjUuMDgtLjIzLS4xNi0uNDMtLjM1LS42Ny0uNS0uMjEuMTItLjQ1LjIzLS43LjI5LS4xNy4xLS42MS4xNC0uMzItLjE0LjItLjIuNDMtLjQyLjQtLjcyLjAzLS4zMS0uMDgtLjYxLS4yLS44OWEyLjA3IDIuMDcgMCAwMC0uNDMtLjY0Yy0uMTgtLjE2LS4wMi0uMy4xNi0uMTkuMzcuMTcuNy40Ljk4LjcuMDkuMTcuMTMuNC4zNi4yNS40NC0uMDguOS0uMSAxLjM0LS4yMS4xMy0uMy4wNC0uNjQtLjEyLS45MSAwLS4xNy0uNDItLjM4LS4yMi0uNDYuMzguMS43Ny4yMyAxLjA4LjQ4LjMyLjE5LjY0LjQ1LjY5Ljg0LjEuMTguNS4xMi43MS4xOS4zMy4wNC42Ni4wOSAxIC4wOS4xNS4xMi0uMDguNDcgMCAuNjgtLjA4LjE1LS40LjA3LS41OS4xNC0uMTIuMTMtLjE0LjQzLS4xNS42NC0uMDUuMTUuMTguNDguMDYuNWExLjQgMS40IDAgMDEwLTEuMWMtLjItLjA2LS4zMy4wNi0uNTMuMDQtLjIzLjA0LS40NS4xLS42OC4xMi0uMTMuMi0uMjMuNTQtLjA2Ljc1LjEzLjE5LjMuMi40OC4yLjE4LjAzLjM2LjA1LjU1LjA0LjE4LS4wMS4zNi4wNy41Mi4wNS4yLjAxLjEuNC4wNy41Mi0uMzguMDctLjc2LjE2LTEuMTQuMjUtLjMuMDQtLjU4LjE0LS44Ny4xOXptLjQyLS4zOGMuMDgtLjEuMDItLjU0LS4wNC0uMjQgMCAuMDYtLjEuMy4wNC4yNHptLjU4LS4xYy4wOS0uMTItLjA0LS40Mi0uMDctLjE0LS4wMi4wNi0uMDIuMi4wNy4xNHptLjU2LS4wNmMuMTItLjE0LS4wOC0uMzQtLjA4LS4wOC0uMDEuMDQuMDIuMTMuMDguMDh6bS0yLjE3LS42Yy4wMy0uMjUuMDItLjUyLjA2LS43OS4wMi0uMjUuMDUtLjUgMC0uNzUtLjA5LjItLjAzLjQ4LS4wOS43LS4wMi4yOC0uMDguNTctLjA0Ljg1LjAyLjAyLjA1LjAyLjA3IDB6bS0uNjctLjI3Yy4wOS0uMy4wNy0uNjMuMDgtLjk0LS4wMS0uMTIuMDEtLjQ3LS4xLS40LjAzLjQzLjAzLjg4LS4wNiAxLjMyIDAgLjAzLjA1LjA1LjA4LjAyem0tLjYtLjVjLjEtLjI0LjE0LS41My4xLS43OS0uMTQgMC0uMDMuMzYtLjEuNS4wMS4wNi0uMTMuMzIgMCAuM3ptLS40My0uMmMuMDQtLjE2LjE1LS41IDAtLjU3LjA1LjE4LS4xMi40NS0uMDMuNThsLjAzLS4wMXptMy40My0uMjJjLjIyLS4xMyAwLS42Ni0uMjItLjM1LS4xLjE1IDAgLjQyLjIyLjM1em0uMzQtLjA2Yy4yOCAwIC4xOC0uNTMtLjA5LS4zNC0uMTIuMDctLjEyLjQyLjA5LjM0em0uNDQtLjAxYy0uMDEtLjEuMTItLjM4LS4wMS0uNC0uMDIuMS0uMTcuNDEgMCAuNHptLTEuMjYtLjAyYzAtLjEzLjAyLS41NS0uMTQtLjUuMDguMTItLjAyLjUxLjE0LjV6Ii8+PHBhdGggZD0iTTUuNTMgMy4yN2wtLjI1LjAzYS41Ny41NyAwIDAxLS4xMy4yNGwtLjA2LS4yLS4zNy4wNGMwIC4xLS4wMy4xOS0uMS4yOGwtLjEzLS4yNC0uMjIuMDNzLS4xNS4yOC0uMTUuNDYuMDcuNDYuMzcuNTNoLjAzbC4xNS0uMjUuMDguMjguMjUuMDIuMTItLjJjLjA1LjEuMS4yLjEyLjJsLjMuMDJ2LS4wOHMtLjEyLS4yNS0uMTItLjM5Yy0uMDItLjI3LjExLS43Ny4xMS0uNzd6IiBmaWxsLW9wYWNpdHk9Ii41Ii8+PC9zdmc+"
user: "65534"
# In order to set only exactly a specific set of capabilities without
# any additional Docker container default capabilities, we need to drop
# "all" capabilities. Regardless of the order (there ain't one) of YAML
# dictionary keys, Docker carries out dropping all capabilities first,
# and only then adds capabilities. See also:
# https://stackoverflow.com/a/63219871.
cap_drop:
- ALL
cap_add:
- CAP_SYS_ADMIN # change namespaces
- CAP_SYS_CHROOT # change mount namespaces
- CAP_SYS_PTRACE # access nsfs namespace information
- CAP_DAC_READ_SEARCH # access/scan /proc/[$PID]/fd itself
- CAP_DAC_OVERRIDE # access container engine unix domain sockets without being rude, erm, root.
- CAP_NET_RAW # pingin' 'round
- CAP_NET_ADMIN # 'nuff tables
security_opt:
# The default Docker container AppArmor profile blocks namespace
# discovery, due to reading from /proc/$PID/ns/* is considered to be
# ptrace read/ready operations.
- apparmor:unconfined
# Essential since we need full PID view.
pid: "host"
cgroup: host
networks:
99-ghost-in-da-edge:
priority: 100
edgeshark:
image: "ghcr.io/siemens/packetflix"
read_only: true
restart: "unless-stopped"
entrypoint:
- "/packetflix"
- "--port=5001"
- "--discovery-service=gostwire.ghost-in-da-edge"
- "--gw-port=5000"
- "--proxy-discovery"
- "--debug"
ports:
- "127.0.0.1:5001:5001"
# Run as non-root user (baked into the meta data of the image anyway).
user: "65534"
# In order to set only exactly a specific set of capabilities without
# any additional Docker container default capabilities, we need to drop
# "all" capabilities. Regardless of the order (there ain't one) of YAML
# dictionary keys, Docker carries out dropping all capabilities first,
# and only then adds capabilities. See also:
# https://stackoverflow.com/a/63219871.
cap_drop:
- ALL
cap_add:
- CAP_SYS_ADMIN # change namespaces
- CAP_SYS_CHROOT # change mount namespaces
- CAP_SYS_PTRACE # access nsfs namespace information
- CAP_NET_ADMIN # allow dumpcap to control promisc. mode
- CAP_NET_RAW # capture raw packets, and not that totally burnt stuff
security_opt:
# The default Docker container AppArmor profile blocks namespace
# discovery, due to reading from /proc/$PID/ns/* is considered to be
# ptrace read/ready operations.
- apparmor:unconfined
# Essential since we need full PID view.
pid: "host"
networks:
99-ghost-in-da-edge:
priority: 100
networks:
99-ghost-in-da-edge:
name: ghost-in-da-edge
internal: false

View File

@@ -0,0 +1,95 @@
services:
# Dependencies
postgres:
# TODO: Enable pgaudit on dev instance. See https://github.com/pgaudit/pgaudit/issues/44#issuecomment-455090262
image: postgres:17
command:
["postgres", "-c", "wal_level=logical", "-c", "max_connections=200"]
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: firezone_dev
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
ports:
- 5432:5432/tcp
networks:
- app-internal
vault:
image: vault:1.13.3
environment:
VAULT_ADDR: "http://127.0.0.1:8200"
VAULT_DEV_ROOT_TOKEN_ID: "firezone"
VAULT_LOG_LEVEL: "debug"
ports:
- 8200:8200/tcp
cap_add:
- IPC_LOCK
networks:
- app-internal
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"--proxy",
"off",
"http://127.0.0.1:8200/v1/sys/health?standbyok=true",
]
interval: 10s
timeout: 3s
retries: 10
start_period: 5s
common:
image: alpine:latest # Dummy, will always be overridden.
environment:
# Database
RUN_CONDITIONAL_MIGRATIONS: "true"
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Feature flags
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
# Seeds
STATIC_SEEDS: "true"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
networks:
- app-internal
volumes:
postgres-data:
elixir-build-cache:
assets-build-cache:

33
scripts/compose/relay.yml Normal file
View File

@@ -0,0 +1,33 @@
services:
relay:
environment:
FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo"
RUST_LOG: ${RUST_LOG:-debug}
RUST_BACKTRACE: 1
FIREZONE_API_URL: ws://api:8081
OTLP_GRPC_ENDPOINT: otel:4317
EBPF_OFFLOADING: eth0
privileged: true
init: true
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: firezone-relay
image: ${RELAY_IMAGE:-ghcr.io/firezone/debug/relay}:${RELAY_TAG:-main}
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.default.disable_ipv6=0
healthcheck:
test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
api:
condition: "service_healthy"
extra_hosts:
- "api:203.0.113.10"
- "api:203:0:113::10"

View File

@@ -0,0 +1,60 @@
services:
httpbin:
image: kennethreitz/httpbin
# Needed to bind to IPv6
command: ["gunicorn", "-b", "[::]:80", "httpbin:app"]
healthcheck:
test: ["CMD-SHELL", "ps -C gunicorn"]
networks:
resources:
ipv4_address: 172.20.0.100
ipv6_address: 172:20:0::100
download.httpbin: # Named after `httpbin` because that is how DNS resources are configured for the test setup.
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: http-test-server
image: ${HTTP_TEST_SERVER_IMAGE:-ghcr.io/firezone/debug/http-test-server}:${HTTP_TEST_SERVER_TAG:-main}
environment:
PORT: 80
networks:
dns_resources:
ipv4_address: 172.21.0.101
dns.httpbin.search.test:
image: kennethreitz/httpbin
healthcheck:
test: ["CMD-SHELL", "ps -C gunicorn"]
networks:
dns_resources:
ipv4_address: 172.21.0.100
iperf3:
image: mlabbe/iperf3
healthcheck:
test:
[
"CMD-SHELL",
"(cat /proc/net/tcp | grep 5201) && (cat /proc/net/udp | grep 5201)",
]
command: -s -V
networks:
resources:
ipv4_address: 172.20.0.110
ipv6_address: 172:20:0::110
networks:
dns_resources:
ipam:
config:
- subnet: 172.21.0.0/24
resources:
enable_ipv6: true
ipam:
config:
- subnet: 172.20.0.0/24
- subnet: 172:20:0::/64

View File

@@ -0,0 +1,13 @@
services:
router:
build:
context: ../router
cap_add:
- NET_ADMIN
privileged: true
sysctls:
- net.ipv4.ip_forward=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.default.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
- net.ipv6.conf.default.forwarding=1