Files
firezone/docker-compose.yml
Thomas Eizinger d1d46fdfb4 ci: create a more realistic network setup (#10301)
Currently, the setup we have in docker-compose does not reflect
real-world scenarios very well because most components share the same
subnet. In reality, Clients, Gateways, relays and the backend are all in
separate subnets, connected via multiple routers on the Internet.

The current setup makes it hard to properly test relayed connections. To
fix this, we move all components into their own subnet with a dedicated
router container that performs source and destination NAT as well as
acts as a firewall for the client and gateway containers to not allow
inbound traffic.

This setup will allow us to more easily test #10286 which requires port
randomization for outgoing traffic on the Client and Gateway side.
2025-09-10 23:37:16 +00:00

902 lines
34 KiB
YAML

# Run with DOCKER_BUILD_TARGET=dev to build Rust inside Docker
x-ip-forwarding: &ip-forwarding
- net.ipv4.ip_forward=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.default.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
- net.ipv6.conf.default.forwarding=1
services:
# Dependencies
postgres:
# TODO: Enable pgaudit on dev instance. See https://github.com/pgaudit/pgaudit/issues/44#issuecomment-455090262
image: postgres:17
command:
["postgres", "-c", "wal_level=logical", "-c", "max_connections=200"]
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: firezone_dev
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
ports:
- 5432:5432/tcp
networks:
- app-internal
vault:
image: vault:1.13.3
environment:
VAULT_ADDR: "http://127.0.0.1:8200"
VAULT_DEV_ROOT_TOKEN_ID: "firezone"
VAULT_LOG_LEVEL: "debug"
ports:
- 8200:8200/tcp
cap_add:
- IPC_LOCK
networks:
- app-internal
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"--proxy",
"off",
"http://127.0.0.1:8200/v1/sys/health?standbyok=true",
]
interval: 10s
timeout: 3s
retries: 10
start_period: 5s
web:
build:
context: elixir
args:
APPLICATION_NAME: web
image: ${WEB_IMAGE:-ghcr.io/firezone/web}:${WEB_TAG:-main}
hostname: web.cluster.local
ports:
- 8080:8080/tcp
environment:
# Web Server
WEB_EXTERNAL_URL: http://localhost:8080/
API_EXTERNAL_URL: http://localhost:8081/
PHOENIX_HTTP_WEB_PORT: "8080"
PHOENIX_HTTP_API_PORT: "8081"
PHOENIX_SECURE_COOKIES: "false"
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "web.cluster.local"
RELEASE_NAME: "web"
# Database
RUN_CONDITIONAL_MIGRATIONS: "true"
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
healthcheck:
test: ["CMD-SHELL", "curl -f localhost:8080/healthz"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
vault:
condition: "service_healthy"
postgres:
condition: "service_healthy"
api-router:
condition: "service_healthy"
networks:
- app-internal
api:
build:
context: elixir
args:
APPLICATION_NAME: api
image: ${API_IMAGE:-ghcr.io/firezone/api}:${API_TAG:-main}
hostname: api.cluster.local
ports:
- 8081:8081/tcp
environment:
# Web Server
WEB_EXTERNAL_URL: http://localhost:8080/
API_EXTERNAL_URL: http://localhost:8081/
PHOENIX_HTTP_WEB_PORT: "8080"
PHOENIX_HTTP_API_PORT: "8081"
PHOENIX_SECURE_COOKIES: "false"
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "api.cluster.local"
RELEASE_NAME: "api"
# Database
RUN_CONDITIONAL_MIGRATIONS: "true"
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
user: root # Needed to run `ip route` commands
cap_add:
- NET_ADMIN # Needed to run `tc` commands to add simulated delay
command:
- sh
- -c
- |
set -e
# Add static route to internet subnet via router
ip -4 route add 203.0.113.0/24 via 172.28.0.254
ip -6 route add 203:0:113::/64 via 172:28:0::254
exec su default -c "bin/server"
depends_on:
vault:
condition: "service_healthy"
postgres:
condition: "service_healthy"
api-router:
condition: "service_healthy"
healthcheck:
test: ["CMD-SHELL", "curl -f localhost:8081/healthz"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
networks:
app-internal:
ipv4_address: 172.28.0.100
ipv6_address: 172:28:0::100
api-router:
build:
context: scripts/router
cap_add:
- NET_ADMIN
sysctls: *ip-forwarding
environment:
PORT_FORWARDS: |
8081 172.28.0.100 tcp
8081 172:28:0::100 tcp
MASQUERADE_TYPE: ""
networks:
app-internal:
ipv4_address: 172.28.0.254
ipv6_address: 172:28:0::254
interface_name: internal
internet:
ipv4_address: 203.0.113.10
ipv6_address: 203:0:113::10
interface_name: internet
domain:
build:
context: elixir
args:
APPLICATION_NAME: domain
image: ${DOMAIN_IMAGE:-ghcr.io/firezone/domain}:${DOMAIN_TAG:-main}
hostname: domain.cluster.local
environment:
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "domain.cluster.local"
RELEASE_NAME: "domain"
# Database
RUN_CONDITIONAL_MIGRATIONS: "true"
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
healthcheck:
test: ["CMD-SHELL", "curl -f localhost:4000/healthz"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
vault:
condition: "service_healthy"
postgres:
condition: "service_healthy"
networks:
- app-internal
# This is a service container which allows to run mix tasks for local development
# without having to install Elixir and Erlang on the host machine.
elixir:
build:
context: elixir
target: compiler
args:
APPLICATION_NAME: api
image: ${ELIXIR_IMAGE:-ghcr.io/firezone/elixir}:${ELIXIR_TAG:-main}
hostname: elixir
environment:
# Web Server
WEB_EXTERNAL_URL: http://localhost:8080/
API_EXTERNAL_URL: http://localhost:8081/
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "mix.cluster.local"
RELEASE_NAME: "mix"
# Database
RUN_CONDITIONAL_MIGRATIONS: "true"
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Higher log level not to make seeds output too verbose
LOG_LEVEL: "info"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Mix env should be set to prod to use secrets declared above,
# otherwise seeds will generate invalid tokens
MIX_ENV: "prod"
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
depends_on:
postgres:
condition: "service_healthy"
networks:
- app-internal
# Run with DOCKER_BUILD_TARGET=dev to build Rust inside Docker
client:
environment:
FIREZONE_DNS_CONTROL: "${FIREZONE_DNS_CONTROL:-etc-resolv-conf}"
FIREZONE_TOKEN: "n.SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkN2RhN2QxY2QtMTExYy00NGE3LWI1YWMtNDAyN2I5ZDIzMGU1bQAAACtBaUl5XzZwQmstV0xlUkFQenprQ0ZYTnFJWktXQnMyRGR3XzJ2Z0lRdkZnbgYAR_ywiZQBYgABUYA.PLNlzyqMSgZlbQb1QX5EzZgYNuY9oeOddP0qDkTwtGg"
RUST_LOG: ${RUST_LOG:-firezone_linux_client=trace,wire=trace,connlib_client_shared=trace,firezone_tunnel=trace,connlib_shared=trace,boringtun=debug,snownet=debug,str0m=debug,phoenix_channel=debug,info}
FIREZONE_API_URL: ws://api:8081
FIREZONE_ID: EFC7A9E3-3576-4633-B633-7D47BA9E14AC
command:
- sh
- -c
- |
set -e
# Add static route to internet subnet via router
ip -4 route add 203.0.113.0/24 via 172.30.0.254
ip -6 route add 203:0:113::/64 via 172:30:0::254
# Disable checksum offloading so that checksums are correct when they reach the relay
apk add --no-cache ethtool
ethtool -K eth0 tx off
firezone-headless-client
init: true
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: firezone-headless-client
image: ${CLIENT_IMAGE:-ghcr.io/firezone/debug/client}:${CLIENT_TAG:-main}
privileged: true # Needed to tune `sysctl` inside container.
cap_add:
- NET_ADMIN
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.default.disable_ipv6=0
devices:
- "/dev/net/tun:/dev/net/tun"
depends_on:
client-router:
condition: "service_healthy"
api:
condition: "service_healthy"
networks:
client-internal:
ipv4_address: 172.30.0.100
ipv6_address: 172:30:0::100
extra_hosts:
- "api:203.0.113.10"
- "api:203:0:113::10"
# Client Router (NAT only, symmetric NAT)
client-router:
build:
context: scripts/router
cap_add:
- NET_ADMIN
sysctls: *ip-forwarding
environment:
MASQUERADE_TYPE: ${CLIENT_MASQUERADE:-}
networks:
client-internal:
ipv4_address: 172.30.0.254
ipv6_address: 172:30:0::254
interface_name: internal
internet:
interface_name: internet
gateway:
healthcheck:
test: ["CMD-SHELL", "ip link | grep tun-firezone"]
environment:
FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkMjI3NDU2MGItZTk3Yi00NWU0LThiMzQtNjc5Yzc2MTdlOThkbQAAADhPMDJMN1VTMkozVklOT01QUjlKNklMODhRSVFQNlVPOEFRVk82VTVJUEwwVkpDMjJKR0gwPT09PW4GAAH8sImUAWIAAVGA.tAm2O9FcyF67VAF3rZdwQpeADrYOIs3S2l2K51G26OM"
RUST_LOG: ${RUST_LOG:-phoenix_channel=trace,firezone_gateway=trace,wire=trace,connlib_gateway_shared=trace,firezone_tunnel=trace,connlib_shared=trace,phoenix_channel=debug,boringtun=debug,snownet=debug,str0m=debug,info}
FIREZONE_API_URL: ws://api:8081
FIREZONE_ID: 4694E56C-7643-4A15-9DF3-638E5B05F570
command:
- sh
- -c
- |
set -e
# Add static route to internet subnet via router
ip -4 route add 203.0.113.0/24 via 172.31.0.254
ip -6 route add 203:0:113::/64 via 172:31:0::254
# Disable checksum offloading so that checksums are correct when they reach the relay
apk add --no-cache ethtool
ethtool -K eth0 tx off
ethtool -K eth1 tx off
ethtool -K eth2 tx off
firezone-gateway
init: true
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: firezone-gateway
image: ${GATEWAY_IMAGE:-ghcr.io/firezone/debug/gateway}:${GATEWAY_TAG:-main}
cap_add:
- NET_ADMIN
sysctls: *ip-forwarding
devices:
- "/dev/net/tun:/dev/net/tun"
depends_on:
gateway-router:
condition: "service_healthy"
api:
condition: "service_healthy"
networks:
gateway-internal:
ipv4_address: 172.31.0.100
ipv6_address: 172:31:0::100
dns_resources:
resources:
extra_hosts:
- "api:203.0.113.10"
- "api:203:0:113::10"
# Gateway Router (NAT only, cone NAT)
gateway-router:
build:
context: scripts/router
cap_add:
- NET_ADMIN
sysctls: *ip-forwarding
environment:
MASQUERADE_TYPE: ${GATEWAY_MASQUERADE:-}
networks:
gateway-internal:
ipv4_address: 172.31.0.254
ipv6_address: 172:31:0::254
interface_name: internal
internet:
interface_name: internet
httpbin:
image: kennethreitz/httpbin
# Needed to bind to IPv6
command: ["gunicorn", "-b", "[::]:80", "httpbin:app"]
healthcheck:
test: ["CMD-SHELL", "ps -C gunicorn"]
networks:
resources:
ipv4_address: 172.20.0.100
ipv6_address: 172:20:0::100
download.httpbin: # Named after `httpbin` because that is how DNS resources are configured for the test setup.
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: http-test-server
image: ${HTTP_TEST_SERVER_IMAGE:-ghcr.io/firezone/debug/http-test-server}:${HTTP_TEST_SERVER_TAG:-main}
environment:
PORT: 80
networks:
dns_resources:
ipv4_address: 172.21.0.101
dns.httpbin.search.test:
image: kennethreitz/httpbin
healthcheck:
test: ["CMD-SHELL", "ps -C gunicorn"]
networks:
dns_resources:
ipv4_address: 172.21.0.100
iperf3:
image: mlabbe/iperf3
healthcheck:
test:
[
"CMD-SHELL",
"(cat /proc/net/tcp | grep 5201) && (cat /proc/net/udp | grep 5201)",
]
command: -s -V
networks:
resources:
ipv4_address: 172.20.0.110
ipv6_address: 172:20:0::110
relay-1:
environment:
PUBLIC_IP4_ADDR: 203.0.113.101
PUBLIC_IP6_ADDR: 203:0:113::101
# LOWEST_PORT: 55555
# HIGHEST_PORT: 55666
# Token for self-hosted Relay
# FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAEngLBONAWIAAVGA.E-f2MFdGMX7JTL2jwoHBdWcUd2G3UNz2JRZLbQrlf0k"
# Token for global Relay
FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo"
RUST_LOG: ${RUST_LOG:-debug}
RUST_BACKTRACE: 1
FIREZONE_API_URL: ws://api:8081
OTLP_GRPC_ENDPOINT: otel:4317
EBPF_OFFLOADING: eth0
command:
- sh
- -c
- |
set -e
# Add static route to internet subnet via router
ip -4 route add 203.0.113.0/24 via 172.29.1.254
ip -6 route add 203:0:113::/64 via 172:29:1::254
firezone-relay
privileged: true
init: true
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: firezone-relay
image: ${RELAY_IMAGE:-ghcr.io/firezone/debug/relay}:${RELAY_TAG:-main}
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.default.disable_ipv6=0
healthcheck:
test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
relay-1-router:
condition: "service_healthy"
api:
condition: "service_healthy"
# ports:
# NOTE: Only 111 ports are used for local dev / testing because Docker Desktop
# allocates a userland proxy process for each forwarded port X_X.
#
# Large ranges here will bring your machine to its knees.
# - "55555-55666:55555-55666/udp"
# - 3478:3478/udp
networks:
relay-1-internal:
ipv4_address: 172.29.1.100
ipv6_address: 172:29:1::100
extra_hosts:
- "api:203.0.113.10"
- "api:203:0:113::10"
relay-1-router:
build:
context: scripts/router
cap_add:
- NET_ADMIN
sysctls: *ip-forwarding
environment:
PORT_FORWARDS: |
3478 172.29.1.100 udp
49152-65535 172.29.1.100 udp
3478 172:29:1::100 udp
49152-65535 172:29:1::100 udp
networks:
relay-1-internal:
ipv4_address: 172.29.1.254
ipv6_address: 172:29:1::254
interface_name: internal
internet:
ipv4_address: 203.0.113.101
ipv6_address: 203:0:113::101
interface_name: internet
relay-2:
environment:
PUBLIC_IP4_ADDR: 203.0.113.102
PUBLIC_IP6_ADDR: 203:0:113::102
# Token for self-hosted Relay
# FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAEngLBONAWIAAVGA.E-f2MFdGMX7JTL2jwoHBdWcUd2G3UNz2JRZLbQrlf0k"
# Token for global Relay
FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo"
RUST_LOG: ${RUST_LOG:-debug}
RUST_BACKTRACE: 1
FIREZONE_API_URL: ws://api:8081
OTLP_GRPC_ENDPOINT: otel:4317
EBPF_OFFLOADING: eth0
command:
- sh
- -c
- |
set -e
# Add static route to internet subnet via router
ip -4 route add 203.0.113.0/24 via 172.29.2.254
ip -6 route add 203:0:113::/64 via 172:29:2::254
firezone-relay
privileged: true
init: true
build:
target: ${DOCKER_BUILD_TARGET:-debug}
context: rust
dockerfile: Dockerfile
args:
PACKAGE: firezone-relay
image: ${RELAY_IMAGE:-ghcr.io/firezone/debug/relay}:${RELAY_TAG:-main}
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.default.disable_ipv6=0
healthcheck:
test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
relay-2-router:
condition: "service_healthy"
api:
condition: "service_healthy"
networks:
relay-2-internal:
ipv4_address: 172.29.2.100
ipv6_address: 172:29:2::100
extra_hosts:
- "api:203.0.113.10"
- "api:203:0:113::10"
relay-2-router:
build:
context: scripts/router
cap_add:
- NET_ADMIN
sysctls: *ip-forwarding
environment:
PORT_FORWARDS: |
3478 172.29.2.100 udp
49152-65535 172.29.2.100 udp
3478 172:29:2::100 udp
49152-65535 172:29:2::100 udp
networks:
relay-2-internal:
ipv4_address: 172.29.2.254
ipv6_address: 172:29:2::254
interface_name: internal
internet:
ipv4_address: 203.0.113.102
ipv6_address: 203:0:113::102
interface_name: internet
# The veth driver uses a pair of interfaces to connect the docker bridge to the container namespace.
# For containers that have an eBPF program attached and do XDP_TX, we need to attach a dummy program
# to the corresponding veth interface on the host to be able to receive the XDP_TX traffic and pass
# it up to the docker bridge successfully.
#
# The "recommended" way to do this is to set both veth interfaces' GRO to on, or attach an XDP program
# that does XDP_PASS to the host side veth interface. The GRO method is not reliable and was shown to
# only pass packets in large bursts every 15-20 seconds which breaks ICE setup, so we use the XDP method.
veth-config:
image: ghcr.io/firezone/xdp-pass
pid: host
network_mode: host
privileged: true
command:
- sh
- -c
- |
set -e
VETHS=$$(ip link show type veth | grep '^[0-9]' | awk '{print $$2}' | cut -d: -f1 | cut -d@ -f1)
# Safe to attach to all veth interfaces on the host
for dev in $$VETHS; do
echo "Attaching XDP to: $$dev"
ip link set dev $$dev xdpdrv obj /xdp/xdp_pass.o sec xdp 2>/dev/null
done
echo "Done configuring $$(echo "$$VETHS" | wc -w) veth interfaces"
depends_on:
relay-1:
condition: "service_started"
relay-2:
condition: "service_started"
otel:
image: otel/opentelemetry-collector:latest
networks:
app-internal:
# EdgeShark is useful for attaching wireshark to TUN devices within containers. It is reachable at http://localhost:5001
# You'll also need the extcap plugin: https://github.com/siemens/cshargextcap
gostwire:
image: "ghcr.io/siemens/ghostwire"
restart: "unless-stopped"
read_only: true
entrypoint:
- "/gostwire"
- "--http=[::]:5000"
- "--brand=Edgeshark"
- "--brandicon=PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCA2LjM1IDYuMzUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggZD0iTTUuNzMgNC41M2EuNC40IDAgMDEtLjA2LS4xMmwtLjAyLS4wNC0uMDMuMDNjLS4wNi4xMS0uMDcuMTItLjEuMS0uMDEtLjAyIDAtLjAzLjAzLS4xbC4wNi0uMS4wNC0uMDVjLjAzIDAgLjA0LjAxLjA2LjA5bC4wNC4xMmMuMDMuMDUuMDMuMDcgMCAuMDhoLS4wMnptLS43NS0uMDZjLS4wMi0uMDEtLjAxLS4wMy4wMS0uMDUuMDMtLjAyLjA1LS4wNi4wOC0uMTQuMDMtLjA4LjA1LS4xLjA4LS4wN2wuMDIuMDdjLjAyLjEuMDMuMTIuMDUuMTMuMDUuMDIuMDQuMDctLjAxLjA3LS4wNCAwLS4wNi0uMDMtLjA5LS4xMiAwLS4wMi0uMDEgMC0uMDQuMDRhLjM2LjM2IDAgMDEtLjA0LjA3Yy0uMDIuMDItLjA1LjAyLS4wNiAwem0tLjQ4LS4wM2MtLjAxLS4wMSAwLS4wMy4wMS0uMDZsLjA3LS4xM2MuMDYtLjEuMDctLjEzLjEtLjEyLjAyLjAyLjAzLjA0LjA0LjEuMDEuMDguMDIuMTEuMDQuMTR2LjA0Yy0uMDEuMDItLjA0LjAyLS4wNiAwbC0uMDMtLjEtLjAxLS4wNS0uMDYuMDlhLjQ2LjQ2IDAgMDEtLjA1LjA4Yy0uMDIuMDItLjA0LjAyLS4wNSAwem0uMDgtLjc1YS4wNS4wNSAwIDAxLS4wMS0uMDRMNC41IDMuNWwtLjAyLS4wNGguMDNjLjAzLS4wMi4wMy0uMDEuMDguMDdsLjAzLjA1LjA3LS4xM2MwLS4wMyAwLS4wMy4wMi0uMDQuMDMgMCAuMDQgMCAuMDQuMDJhLjYuNiAwIDAxLS4xMi4yNmMtLjAzLjAyLS4wMy4wMi0uMDUgMHptLjU0LS4xYy0uMDEtLjAyLS4wMi0uMDMtLjAyLS4wOGEuNDQuNDQgMCAwMC0uMDMtLjA4Yy0uMDItLjA1LS4wMi0uMDggMC0uMDguMDUtLjAxLjA2IDAgLjA2LjA0bC4wMy4wOGMuMDIgMCAuMDYtLjA4LjA3LS4xMmwuMDEtLjAyLjA2LS4wMS0uMTIuMjZoLS4wNnptLjQ3LS4wOGwtLjAyLS4wOWEuNTUuNTUgMCAwMC0uMDItLjEyYzAtLjAyIDAtLjAyLjAyLS4wMmguMDNsLjAyLjExdi4wM2MuMDEgMCAuMDQtLjAzLjA2LS4wN2wuMDUtLjA5Yy4wMi0uMDIuMDYtLjAyLjA2IDBsLS4xNC4yNWMtLjAxLjAxLS4wNS4wMi0uMDYgMHptLS43Ni0uNDFjLS4wNi0uMDMtLjA4LS4xNC0uMDUtLjJsLjA0LS4wM2MuMDMtLjAyLjAzLS4wMi4wNy0uMDIuMDYgMCAuMDkuMDEuMTIuMDUuMDMuMDUuMDIuMTItLjAzLjE2LS4wNS4wNS0uMS4wNi0uMTUuMDR6bS4zNS0uMDVBLjEzLjEzIDAgMDE1LjE0IDNsLS4wMS0uMDVjMC0uMDQgMC0uMDUuMDYtLjFsLjA2LS4wMmMuMDcgMCAuMTEuMDUuMS4xMmwtLjAxLjA2Yy0uMDQuMDQtLjEyLjA1LS4xNi4wM3oiLz48cGF0aCBkPSJNMy44NCA1LjQ4Yy0uMTctLjIxLS4wNC0uNS0uMDYtLjc0LjA1LS44My4xLTEuNjYuMDctMi41LjE3LS4xNC40NC4wOC41OS0uMDItLjIyLS4xMy0uNTQtLjEzLS44LS4xNC0uMTQuMDktLjUuMDItLjUuMTcuMi4wOC41My0uMTUuNjQuMDItLjAxLjgxLS4wMyAxLjYyLS4xIDIuNDItLjA1LjIzLjA3LjU2LS4xNC43MmE5LjUxIDkuNTEgMCAwMS0uNjYtLjUzYy4xMy0uMDQuMzItLjQuMS0uMi0uMjEuMjItLjUxLjI3LS43OS4zNS0uMTIuMDgtLjU0LjEyLS4zMi0uMTEuMi0uMjIuNDUtLjQyLjUtLjcyLS4xMy0uMTItLjEuMy0uMjUuMDgtLjIzLS4xNi0uNDMtLjM1LS42Ny0uNS0uMjEuMTItLjQ1LjIzLS43LjI5LS4xNy4xLS42MS4xNC0uMzItLjE0LjItLjIuNDMtLjQyLjQtLjcyLjAzLS4zMS0uMDgtLjYxLS4yLS44OWEyLjA3IDIuMDcgMCAwMC0uNDMtLjY0Yy0uMTgtLjE2LS4wMi0uMy4xNi0uMTkuMzcuMTcuNy40Ljk4LjcuMDkuMTcuMTMuNC4zNi4yNS40NC0uMDguOS0uMSAxLjM0LS4yMS4xMy0uMy4wNC0uNjQtLjEyLS45MSAwLS4xNy0uNDItLjM4LS4yMi0uNDYuMzguMS43Ny4yMyAxLjA4LjQ4LjMyLjE5LjY0LjQ1LjY5Ljg0LjEuMTguNS4xMi43MS4xOS4zMy4wNC42Ni4wOSAxIC4wOS4xNS4xMi0uMDguNDcgMCAuNjgtLjA4LjE1LS40LjA3LS41OS4xNC0uMTIuMTMtLjE0LjQzLS4xNS42NC0uMDUuMTUuMTguNDguMDYuNWExLjQgMS40IDAgMDEwLTEuMWMtLjItLjA2LS4zMy4wNi0uNTMuMDQtLjIzLjA0LS40NS4xLS42OC4xMi0uMTMuMi0uMjMuNTQtLjA2Ljc1LjEzLjE5LjMuMi40OC4yLjE4LjAzLjM2LjA1LjU1LjA0LjE4LS4wMS4zNi4wNy41Mi4wNS4yLjAxLjEuNC4wNy41Mi0uMzguMDctLjc2LjE2LTEuMTQuMjUtLjMuMDQtLjU4LjE0LS44Ny4xOXptLjQyLS4zOGMuMDgtLjEuMDItLjU0LS4wNC0uMjQgMCAuMDYtLjEuMy4wNC4yNHptLjU4LS4xYy4wOS0uMTItLjA0LS40Mi0uMDctLjE0LS4wMi4wNi0uMDIuMi4wNy4xNHptLjU2LS4wNmMuMTItLjE0LS4wOC0uMzQtLjA4LS4wOC0uMDEuMDQuMDIuMTMuMDguMDh6bS0yLjE3LS42Yy4wMy0uMjUuMDItLjUyLjA2LS43OS4wMi0uMjUuMDUtLjUgMC0uNzUtLjA5LjItLjAzLjQ4LS4wOS43LS4wMi4yOC0uMDguNTctLjA0Ljg1LjAyLjAyLjA1LjAyLjA3IDB6bS0uNjctLjI3Yy4wOS0uMy4wNy0uNjMuMDgtLjk0LS4wMS0uMTIuMDEtLjQ3LS4xLS40LjAzLjQzLjAzLjg4LS4wNiAxLjMyIDAgLjAzLjA1LjA1LjA4LjAyem0tLjYtLjVjLjEtLjI0LjE0LS41My4xLS43OS0uMTQgMC0uMDMuMzYtLjEuNS4wMS4wNi0uMTMuMzIgMCAuM3ptLS40My0uMmMuMDQtLjE2LjE1LS41IDAtLjU3LjA1LjE4LS4xMi40NS0uMDMuNThsLjAzLS4wMXptMy40My0uMjJjLjIyLS4xMyAwLS42Ni0uMjItLjM1LS4xLjE1IDAgLjQyLjIyLjM1em0uMzQtLjA2Yy4yOCAwIC4xOC0uNTMtLjA5LS4zNC0uMTIuMDctLjEyLjQyLjA5LjM0em0uNDQtLjAxYy0uMDEtLjEuMTItLjM4LS4wMS0uNC0uMDIuMS0uMTcuNDEgMCAuNHptLTEuMjYtLjAyYzAtLjEzLjAyLS41NS0uMTQtLjUuMDguMTItLjAyLjUxLjE0LjV6Ii8+PHBhdGggZD0iTTUuNTMgMy4yN2wtLjI1LjAzYS41Ny41NyAwIDAxLS4xMy4yNGwtLjA2LS4yLS4zNy4wNGMwIC4xLS4wMy4xOS0uMS4yOGwtLjEzLS4yNC0uMjIuMDNzLS4xNS4yOC0uMTUuNDYuMDcuNDYuMzcuNTNoLjAzbC4xNS0uMjUuMDguMjguMjUuMDIuMTItLjJjLjA1LjEuMS4yLjEyLjJsLjMuMDJ2LS4wOHMtLjEyLS4yNS0uMTItLjM5Yy0uMDItLjI3LjExLS43Ny4xMS0uNzd6IiBmaWxsLW9wYWNpdHk9Ii41Ii8+PC9zdmc+"
user: "65534"
# In order to set only exactly a specific set of capabilities without
# any additional Docker container default capabilities, we need to drop
# "all" capabilities. Regardless of the order (there ain't one) of YAML
# dictionary keys, Docker carries out dropping all capabilities first,
# and only then adds capabilities. See also:
# https://stackoverflow.com/a/63219871.
cap_drop:
- ALL
cap_add:
- CAP_SYS_ADMIN # change namespaces
- CAP_SYS_CHROOT # change mount namespaces
- CAP_SYS_PTRACE # access nsfs namespace information
- CAP_DAC_READ_SEARCH # access/scan /proc/[$PID]/fd itself
- CAP_DAC_OVERRIDE # access container engine unix domain sockets without being rude, erm, root.
- CAP_NET_RAW # pingin' 'round
- CAP_NET_ADMIN # 'nuff tables
security_opt:
# The default Docker container AppArmor profile blocks namespace
# discovery, due to reading from /proc/$PID/ns/* is considered to be
# ptrace read/ready operations.
- apparmor:unconfined
# Essential since we need full PID view.
pid: "host"
cgroup: host
networks:
99-ghost-in-da-edge:
priority: 100
edgeshark:
image: "ghcr.io/siemens/packetflix"
read_only: true
restart: "unless-stopped"
entrypoint:
- "/packetflix"
- "--port=5001"
- "--discovery-service=gostwire.ghost-in-da-edge"
- "--gw-port=5000"
- "--proxy-discovery"
- "--debug"
ports:
- "127.0.0.1:5001:5001"
# Run as non-root user (baked into the meta data of the image anyway).
user: "65534"
# In order to set only exactly a specific set of capabilities without
# any additional Docker container default capabilities, we need to drop
# "all" capabilities. Regardless of the order (there ain't one) of YAML
# dictionary keys, Docker carries out dropping all capabilities first,
# and only then adds capabilities. See also:
# https://stackoverflow.com/a/63219871.
cap_drop:
- ALL
cap_add:
- CAP_SYS_ADMIN # change namespaces
- CAP_SYS_CHROOT # change mount namespaces
- CAP_SYS_PTRACE # access nsfs namespace information
- CAP_NET_ADMIN # allow dumpcap to control promisc. mode
- CAP_NET_RAW # capture raw packets, and not that totally burnt stuff
security_opt:
# The default Docker container AppArmor profile blocks namespace
# discovery, due to reading from /proc/$PID/ns/* is considered to be
# ptrace read/ready operations.
- apparmor:unconfined
# Essential since we need full PID view.
pid: "host"
networks:
99-ghost-in-da-edge:
priority: 100
networks:
# Internet network - where all public IPs live
internet:
enable_ipv6: true
ipam:
config:
- subnet: 203.0.113.0/24
- subnet: 203:0:113::/64
app-internal:
enable_ipv6: true
ipam:
config:
- subnet: 172.28.0.0/24
- subnet: 172:28:0::/64
relay-1-internal:
enable_ipv6: true
ipam:
config:
- subnet: 172.29.1.0/24
- subnet: 172:29:1::/64
relay-2-internal:
enable_ipv6: true
ipam:
config:
- subnet: 172.29.2.0/24
- subnet: 172:29:2::/64
client-internal:
enable_ipv6: true
ipam:
config:
- subnet: 172.30.0.0/24
- subnet: 172:30:0::/64
gateway-internal:
enable_ipv6: true
ipam:
config:
- subnet: 172.31.0.0/24
- subnet: 172:31:0::/64
dns_resources:
ipam:
config:
- subnet: 172.21.0.0/24
resources:
enable_ipv6: true
ipam:
config:
- subnet: 172.20.0.0/24
- subnet: 172:20:0::/64
# Monitoring network
99-ghost-in-da-edge:
name: ghost-in-da-edge
internal: false
volumes:
postgres-data:
elixir-build-cache:
assets-build-cache: