Files
firezone/docker-compose.yml
Jamil 968db2ae39 feat(portal): Receive WAL events (#8909)
Firezone's control plane is a realtime, distributed system that relies
on a broadcast/subscribe system to function. In many cases, these events
are broadcasted whenever relevant data in the DB changes, such as an
actor losing access to a policy, a membership being deleted, and so
forth.

Today, this is handled in the application layer, typically happening at
the place where the relevant DB call is made (i.e. in an
`after_commit`). While this approach has worked thus far, it has several
issues:

1. We have no guarantee that the DB change will issue a broadcast. If
the application is deployed or the process crashes after the DB changes
are made but before the broadcast happens, we will have potentially
failed to update any connected clients or gateways with the changes.
2. We have no guarantee that the order of DB updates will be maintained
in order for broadcasts. In other words, app server A could win its DB
operation against app server B, but then proceed to lose being the first
to broadcast.
3. If the cluster is in a bad state where broadcasts may return an error
(i.e. https://github.com/firezone/firezone/issues/8660), we will never
retry the broadcast.

To fix the above issues, we introduce a WAL logical decoder that process
the event stream one message at a time and performs any needed work.
Serializability is guaranteed since we only process the WAL in a single,
cluster-global process, `ReplicationConnection`. Durability is also
guaranteed since we only ACK WAL segments after we've successfully
ingested the event.

This means we will only advance the position of our WAL stream after
successfully broadcasting the event.

This PR only introduces the WAL stream processing system but does not
introduce any changes to our current broadcasting behavior - that's
saved for another PR.
2025-04-29 23:53:06 -07:00

640 lines
28 KiB
YAML

services:
# Dependencies
postgres:
# TODO: Enable pgaudit on dev instance. See https://github.com/pgaudit/pgaudit/issues/44#issuecomment-455090262
image: postgres:15
command: ["postgres", "-c", "wal_level=logical"]
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: firezone_dev
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
ports:
- 5432:5432/tcp
networks:
- app
vault:
image: vault:1.13.3
environment:
VAULT_ADDR: "http://127.0.0.1:8200"
VAULT_DEV_ROOT_TOKEN_ID: "firezone"
VAULT_LOG_LEVEL: "debug"
ports:
- 8200:8200/tcp
cap_add:
- IPC_LOCK
networks:
- app
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"--proxy",
"off",
"http://127.0.0.1:8200/v1/sys/health?standbyok=true",
]
interval: 10s
timeout: 3s
retries: 10
start_period: 5s
# Firezone Components
web:
build:
context: elixir
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/web:main
args:
APPLICATION_NAME: web
image: ${WEB_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/web}:${WEB_TAG:-main}
hostname: web.cluster.local
ports:
- 8080:8080/tcp
environment:
# Web Server
WEB_EXTERNAL_URL: http://localhost:8080/
API_EXTERNAL_URL: http://localhost:8081/
PHOENIX_HTTP_WEB_PORT: "8080"
PHOENIX_HTTP_API_PORT: "8081"
PHOENIX_SECURE_COOKIES: "false"
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "web.cluster.local"
RELEASE_NAME: "web"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
DATABASE_REPLICATION_USER: postgres
DATABASE_REPLICATION_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_FLOW_ACTIVITIES_ENABLED: "true"
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
healthcheck:
test: ["CMD-SHELL", "curl -f localhost:8080/healthz"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
vault:
condition: "service_healthy"
postgres:
condition: "service_healthy"
networks:
- app
api:
build:
context: elixir
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/api:main
args:
APPLICATION_NAME: api
image: ${API_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/api}:${API_TAG:-main}
hostname: api.cluster.local
ports:
- 8081:8081/tcp
environment:
# Web Server
WEB_EXTERNAL_URL: http://localhost:8080/
API_EXTERNAL_URL: http://localhost:8081/
PHOENIX_HTTP_WEB_PORT: "8080"
PHOENIX_HTTP_API_PORT: "8081"
PHOENIX_SECURE_COOKIES: "false"
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "api.cluster.local"
RELEASE_NAME: "api"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
DATABASE_REPLICATION_USER: postgres
DATABASE_REPLICATION_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_FLOW_ACTIVITIES_ENABLED: "true"
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
depends_on:
vault:
condition: "service_healthy"
postgres:
condition: "service_healthy"
healthcheck:
test: ["CMD-SHELL", "curl -f localhost:8081/healthz"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
networks:
- app
domain:
build:
context: elixir
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/domain:main
args:
APPLICATION_NAME: domain
image: ${DOMAIN_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/domain}:${DOMAIN_TAG:-main}
hostname: domain.cluster.local
environment:
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "domain.cluster.local"
RELEASE_NAME: "domain"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
DATABASE_REPLICATION_USER: postgres
DATABASE_REPLICATION_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_FLOW_ACTIVITIES_ENABLED: "true"
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
healthcheck:
test: ["CMD-SHELL", "curl -f localhost:4000/healthz"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
vault:
condition: "service_healthy"
postgres:
condition: "service_healthy"
networks:
- app
# This is a service container which allows to run mix tasks for local development
# without having to install Elixir and Erlang on the host machine.
elixir:
build:
context: elixir
target: compiler
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/elixir:main
args:
APPLICATION_NAME: api
image: ${ELIXIR_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/elixir}:${ELIXIR_TAG:-main}
hostname: elixir
environment:
# Web Server
WEB_EXTERNAL_URL: http://localhost:8080/
API_EXTERNAL_URL: http://localhost:8081/
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "mix.cluster.local"
RELEASE_NAME: "mix"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
DATABASE_REPLICATION_USER: postgres
DATABASE_REPLICATION_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock"
# Secrets
TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Higher log level not to make seeds output too verbose
LOG_LEVEL: "info"
# Emails
OUTBOUND_EMAIL_FROM: "public-noreply@firez.one"
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}'
# Mix env should be set to prod to use secrets declared above,
# otherwise seeds will generate invalid tokens
MIX_ENV: "prod"
# Seeds
STATIC_SEEDS: "true"
# Feature flags
FEATURE_FLOW_ACTIVITIES_ENABLED: "true"
FEATURE_POLICY_CONDITIONS_ENABLED: "true"
FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true"
FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true"
FEATURE_IDP_SYNC_ENABLED: "true"
FEATURE_REST_API_ENABLED: "true"
FEATURE_INTERNET_RESOURCE_ENABLED: "true"
depends_on:
postgres:
condition: "service_healthy"
networks:
- app
client:
environment:
FIREZONE_DNS_CONTROL: "${FIREZONE_DNS_CONTROL:-etc-resolv-conf}"
FIREZONE_TOKEN: "n.SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkN2RhN2QxY2QtMTExYy00NGE3LWI1YWMtNDAyN2I5ZDIzMGU1bQAAACtBaUl5XzZwQmstV0xlUkFQenprQ0ZYTnFJWktXQnMyRGR3XzJ2Z0lRdkZnbgYAR_ywiZQBYgABUYA.PLNlzyqMSgZlbQb1QX5EzZgYNuY9oeOddP0qDkTwtGg"
RUST_LOG: ${RUST_LOG:-firezone_linux_client=trace,wire=trace,connlib_client_shared=trace,firezone_tunnel=trace,connlib_shared=trace,boringtun=debug,snownet=debug,str0m=debug,phoenix_channel=debug,info}
FIREZONE_API_URL: ws://api:8081
init: true
build:
target: debug
context: rust
dockerfile: Dockerfile
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/client:main
args:
PACKAGE: firezone-headless-client
image: ${CLIENT_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/debug/client}:${CLIENT_TAG:-main}
cap_add:
- NET_ADMIN
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
devices:
- "/dev/net/tun:/dev/net/tun"
depends_on:
api:
condition: "service_healthy"
networks:
app:
ipv4_address: 172.28.0.100
gateway:
healthcheck:
test: ["CMD-SHELL", "ip link | grep tun-firezone"]
environment:
FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkMjI3NDU2MGItZTk3Yi00NWU0LThiMzQtNjc5Yzc2MTdlOThkbQAAADhPMDJMN1VTMkozVklOT01QUjlKNklMODhRSVFQNlVPOEFRVk82VTVJUEwwVkpDMjJKR0gwPT09PW4GAAH8sImUAWIAAVGA.tAm2O9FcyF67VAF3rZdwQpeADrYOIs3S2l2K51G26OM"
RUST_LOG: ${RUST_LOG:-phoenix_channel=trace,firezone_gateway=trace,wire=trace,connlib_gateway_shared=trace,firezone_tunnel=trace,connlib_shared=trace,phoenix_channel=debug,boringtun=debug,snownet=debug,str0m=debug,info}
FIREZONE_ENABLE_MASQUERADE: 1 # FIXME: NOOP in latest version. Remove after next release.
FIREZONE_API_URL: ws://api:8081
FIREZONE_ID: 4694E56C-7643-4A15-9DF3-638E5B05F570
init: true
build:
target: debug
context: rust
dockerfile: Dockerfile
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/gateway:main
args:
PACKAGE: firezone-gateway
image: ${GATEWAY_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/debug/gateway}:${GATEWAY_TAG:-main}
cap_add:
- NET_ADMIN
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv6.conf.all.forwarding=1
- net.ipv6.conf.default.forwarding=1
devices:
- "/dev/net/tun:/dev/net/tun"
depends_on:
api:
condition: "service_healthy"
networks:
app:
ipv4_address: 172.28.0.105
dns_resources:
resources:
httpbin:
image: kennethreitz/httpbin
healthcheck:
test: ["CMD-SHELL", "ps -C gunicorn"]
networks:
resources:
ipv4_address: 172.20.0.100
download.httpbin: # Named after `httpbin` because that is how DNS resources are configured for the test setup.
build:
target: dev
context: rust
dockerfile: Dockerfile
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/http-test-server:main
args:
PACKAGE: http-test-server
image: ${HTTP_TEST_SERVER_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/debug/http-test-server}:${HTTP_TEST_SERVER_TAG:-main}
environment:
PORT: 80
networks:
dns_resources:
ipv4_address: 172.21.0.101
dns.httpbin.search.test:
image: kennethreitz/httpbin
healthcheck:
test: ["CMD-SHELL", "ps -C gunicorn"]
networks:
dns_resources:
ipv4_address: 172.21.0.100
iperf3:
image: mlabbe/iperf3
healthcheck:
test:
[
"CMD-SHELL",
"(cat /proc/net/tcp | grep 5201) && (cat /proc/net/udp | grep 5201)",
]
command: -s -V
networks:
resources:
ipv4_address: 172.20.0.110
relay-1:
environment:
PUBLIC_IP4_ADDR: ${RELAY_1_PUBLIC_IP4_ADDR:-172.28.0.101}
# PUBLIC_IP6_ADDR: fcff:3990:3990::101
# LOWEST_PORT: 55555
# HIGHEST_PORT: 55666
# Token for self-hosted Relay
# FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAEngLBONAWIAAVGA.E-f2MFdGMX7JTL2jwoHBdWcUd2G3UNz2JRZLbQrlf0k"
# Token for global Relay
FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo"
RUST_LOG: ${RUST_LOG:-debug}
RUST_BACKTRACE: 1
FIREZONE_API_URL: ws://api:8081
OTLP_GRPC_ENDPOINT: otel:4317
build:
target: debug
context: rust
dockerfile: Dockerfile
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/relay:main
args:
PACKAGE: firezone-relay
image: ${RELAY_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/debug/relay}:${RELAY_TAG:-main}
healthcheck:
test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
api:
condition: "service_healthy"
# ports:
# NOTE: Only 111 ports are used for local dev / testing because Docker Desktop
# allocates a userland proxy process for each forwarded port X_X.
#
# Large ranges here will bring your machine to its knees.
# - "55555-55666:55555-55666/udp"
# - 3478:3478/udp
networks:
app:
ipv4_address: ${RELAY_1_PUBLIC_IP4_ADDR:-172.28.0.101}
relay-2:
environment:
PUBLIC_IP4_ADDR: ${RELAY_2_PUBLIC_IP4_ADDR:-172.28.0.201}
# PUBLIC_IP6_ADDR: fcff:3990:3990::101
# Token for self-hosted Relay
# FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAEngLBONAWIAAVGA.E-f2MFdGMX7JTL2jwoHBdWcUd2G3UNz2JRZLbQrlf0k"
# Token for global Relay
FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo"
RUST_LOG: ${RUST_LOG:-debug}
RUST_BACKTRACE: 1
FIREZONE_API_URL: ws://api:8081
OTLP_GRPC_ENDPOINT: otel:4317
build:
target: debug
context: rust
dockerfile: Dockerfile
cache_from:
- type=registry,ref=us-east1-docker.pkg.dev/firezone-staging/cache/relay:main
args:
PACKAGE: firezone-relay
image: ${RELAY_IMAGE:-us-east1-docker.pkg.dev/firezone-staging/firezone/debug/relay}:${RELAY_TAG:-main}
healthcheck:
test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"]
start_period: 10s
interval: 30s
retries: 5
timeout: 5s
depends_on:
api:
condition: "service_healthy"
networks:
app:
ipv4_address: ${RELAY_2_PUBLIC_IP4_ADDR:-172.28.0.201}
otel:
image: otel/opentelemetry-collector:latest
networks:
app:
# EdgeShark is useful for attaching wireshark to TUN devices within containers. It is reachable at http://localhost:5001
# You'll also need the extcap plugin: https://github.com/siemens/cshargextcap
gostwire:
image: "ghcr.io/siemens/ghostwire"
pull_policy: always
restart: "unless-stopped"
read_only: true
entrypoint:
- "/gostwire"
- "--http=[::]:5000"
- "--brand=Edgeshark"
- "--brandicon=PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCA2LjM1IDYuMzUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggZD0iTTUuNzMgNC41M2EuNC40IDAgMDEtLjA2LS4xMmwtLjAyLS4wNC0uMDMuMDNjLS4wNi4xMS0uMDcuMTItLjEuMS0uMDEtLjAyIDAtLjAzLjAzLS4xbC4wNi0uMS4wNC0uMDVjLjAzIDAgLjA0LjAxLjA2LjA5bC4wNC4xMmMuMDMuMDUuMDMuMDcgMCAuMDhoLS4wMnptLS43NS0uMDZjLS4wMi0uMDEtLjAxLS4wMy4wMS0uMDUuMDMtLjAyLjA1LS4wNi4wOC0uMTQuMDMtLjA4LjA1LS4xLjA4LS4wN2wuMDIuMDdjLjAyLjEuMDMuMTIuMDUuMTMuMDUuMDIuMDQuMDctLjAxLjA3LS4wNCAwLS4wNi0uMDMtLjA5LS4xMiAwLS4wMi0uMDEgMC0uMDQuMDRhLjM2LjM2IDAgMDEtLjA0LjA3Yy0uMDIuMDItLjA1LjAyLS4wNiAwem0tLjQ4LS4wM2MtLjAxLS4wMSAwLS4wMy4wMS0uMDZsLjA3LS4xM2MuMDYtLjEuMDctLjEzLjEtLjEyLjAyLjAyLjAzLjA0LjA0LjEuMDEuMDguMDIuMTEuMDQuMTR2LjA0Yy0uMDEuMDItLjA0LjAyLS4wNiAwbC0uMDMtLjEtLjAxLS4wNS0uMDYuMDlhLjQ2LjQ2IDAgMDEtLjA1LjA4Yy0uMDIuMDItLjA0LjAyLS4wNSAwem0uMDgtLjc1YS4wNS4wNSAwIDAxLS4wMS0uMDRMNC41IDMuNWwtLjAyLS4wNGguMDNjLjAzLS4wMi4wMy0uMDEuMDguMDdsLjAzLjA1LjA3LS4xM2MwLS4wMyAwLS4wMy4wMi0uMDQuMDMgMCAuMDQgMCAuMDQuMDJhLjYuNiAwIDAxLS4xMi4yNmMtLjAzLjAyLS4wMy4wMi0uMDUgMHptLjU0LS4xYy0uMDEtLjAyLS4wMi0uMDMtLjAyLS4wOGEuNDQuNDQgMCAwMC0uMDMtLjA4Yy0uMDItLjA1LS4wMi0uMDggMC0uMDguMDUtLjAxLjA2IDAgLjA2LjA0bC4wMy4wOGMuMDIgMCAuMDYtLjA4LjA3LS4xMmwuMDEtLjAyLjA2LS4wMS0uMTIuMjZoLS4wNnptLjQ3LS4wOGwtLjAyLS4wOWEuNTUuNTUgMCAwMC0uMDItLjEyYzAtLjAyIDAtLjAyLjAyLS4wMmguMDNsLjAyLjExdi4wM2MuMDEgMCAuMDQtLjAzLjA2LS4wN2wuMDUtLjA5Yy4wMi0uMDIuMDYtLjAyLjA2IDBsLS4xNC4yNWMtLjAxLjAxLS4wNS4wMi0uMDYgMHptLS43Ni0uNDFjLS4wNi0uMDMtLjA4LS4xNC0uMDUtLjJsLjA0LS4wM2MuMDMtLjAyLjAzLS4wMi4wNy0uMDIuMDYgMCAuMDkuMDEuMTIuMDUuMDMuMDUuMDIuMTItLjAzLjE2LS4wNS4wNS0uMS4wNi0uMTUuMDR6bS4zNS0uMDVBLjEzLjEzIDAgMDE1LjE0IDNsLS4wMS0uMDVjMC0uMDQgMC0uMDUuMDYtLjFsLjA2LS4wMmMuMDcgMCAuMTEuMDUuMS4xMmwtLjAxLjA2Yy0uMDQuMDQtLjEyLjA1LS4xNi4wM3oiLz48cGF0aCBkPSJNMy44NCA1LjQ4Yy0uMTctLjIxLS4wNC0uNS0uMDYtLjc0LjA1LS44My4xLTEuNjYuMDctMi41LjE3LS4xNC40NC4wOC41OS0uMDItLjIyLS4xMy0uNTQtLjEzLS44LS4xNC0uMTQuMDktLjUuMDItLjUuMTcuMi4wOC41My0uMTUuNjQuMDItLjAxLjgxLS4wMyAxLjYyLS4xIDIuNDItLjA1LjIzLjA3LjU2LS4xNC43MmE5LjUxIDkuNTEgMCAwMS0uNjYtLjUzYy4xMy0uMDQuMzItLjQuMS0uMi0uMjEuMjItLjUxLjI3LS43OS4zNS0uMTIuMDgtLjU0LjEyLS4zMi0uMTEuMi0uMjIuNDUtLjQyLjUtLjcyLS4xMy0uMTItLjEuMy0uMjUuMDgtLjIzLS4xNi0uNDMtLjM1LS42Ny0uNS0uMjEuMTItLjQ1LjIzLS43LjI5LS4xNy4xLS42MS4xNC0uMzItLjE0LjItLjIuNDMtLjQyLjQtLjcyLjAzLS4zMS0uMDgtLjYxLS4yLS44OWEyLjA3IDIuMDcgMCAwMC0uNDMtLjY0Yy0uMTgtLjE2LS4wMi0uMy4xNi0uMTkuMzcuMTcuNy40Ljk4LjcuMDkuMTcuMTMuNC4zNi4yNS40NC0uMDguOS0uMSAxLjM0LS4yMS4xMy0uMy4wNC0uNjQtLjEyLS45MSAwLS4xNy0uNDItLjM4LS4yMi0uNDYuMzguMS43Ny4yMyAxLjA4LjQ4LjMyLjE5LjY0LjQ1LjY5Ljg0LjEuMTguNS4xMi43MS4xOS4zMy4wNC42Ni4wOSAxIC4wOS4xNS4xMi0uMDguNDcgMCAuNjgtLjA4LjE1LS40LjA3LS41OS4xNC0uMTIuMTMtLjE0LjQzLS4xNS42NC0uMDUuMTUuMTguNDguMDYuNWExLjQgMS40IDAgMDEwLTEuMWMtLjItLjA2LS4zMy4wNi0uNTMuMDQtLjIzLjA0LS40NS4xLS42OC4xMi0uMTMuMi0uMjMuNTQtLjA2Ljc1LjEzLjE5LjMuMi40OC4yLjE4LjAzLjM2LjA1LjU1LjA0LjE4LS4wMS4zNi4wNy41Mi4wNS4yLjAxLjEuNC4wNy41Mi0uMzguMDctLjc2LjE2LTEuMTQuMjUtLjMuMDQtLjU4LjE0LS44Ny4xOXptLjQyLS4zOGMuMDgtLjEuMDItLjU0LS4wNC0uMjQgMCAuMDYtLjEuMy4wNC4yNHptLjU4LS4xYy4wOS0uMTItLjA0LS40Mi0uMDctLjE0LS4wMi4wNi0uMDIuMi4wNy4xNHptLjU2LS4wNmMuMTItLjE0LS4wOC0uMzQtLjA4LS4wOC0uMDEuMDQuMDIuMTMuMDguMDh6bS0yLjE3LS42Yy4wMy0uMjUuMDItLjUyLjA2LS43OS4wMi0uMjUuMDUtLjUgMC0uNzUtLjA5LjItLjAzLjQ4LS4wOS43LS4wMi4yOC0uMDguNTctLjA0Ljg1LjAyLjAyLjA1LjAyLjA3IDB6bS0uNjctLjI3Yy4wOS0uMy4wNy0uNjMuMDgtLjk0LS4wMS0uMTIuMDEtLjQ3LS4xLS40LjAzLjQzLjAzLjg4LS4wNiAxLjMyIDAgLjAzLjA1LjA1LjA4LjAyem0tLjYtLjVjLjEtLjI0LjE0LS41My4xLS43OS0uMTQgMC0uMDMuMzYtLjEuNS4wMS4wNi0uMTMuMzIgMCAuM3ptLS40My0uMmMuMDQtLjE2LjE1LS41IDAtLjU3LjA1LjE4LS4xMi40NS0uMDMuNThsLjAzLS4wMXptMy40My0uMjJjLjIyLS4xMyAwLS42Ni0uMjItLjM1LS4xLjE1IDAgLjQyLjIyLjM1em0uMzQtLjA2Yy4yOCAwIC4xOC0uNTMtLjA5LS4zNC0uMTIuMDctLjEyLjQyLjA5LjM0em0uNDQtLjAxYy0uMDEtLjEuMTItLjM4LS4wMS0uNC0uMDIuMS0uMTcuNDEgMCAuNHptLTEuMjYtLjAyYzAtLjEzLjAyLS41NS0uMTQtLjUuMDguMTItLjAyLjUxLjE0LjV6Ii8+PHBhdGggZD0iTTUuNTMgMy4yN2wtLjI1LjAzYS41Ny41NyAwIDAxLS4xMy4yNGwtLjA2LS4yLS4zNy4wNGMwIC4xLS4wMy4xOS0uMS4yOGwtLjEzLS4yNC0uMjIuMDNzLS4xNS4yOC0uMTUuNDYuMDcuNDYuMzcuNTNoLjAzbC4xNS0uMjUuMDguMjguMjUuMDIuMTItLjJjLjA1LjEuMS4yLjEyLjJsLjMuMDJ2LS4wOHMtLjEyLS4yNS0uMTItLjM5Yy0uMDItLjI3LjExLS43Ny4xMS0uNzd6IiBmaWxsLW9wYWNpdHk9Ii41Ii8+PC9zdmc+"
user: "65534"
# In order to set only exactly a specific set of capabilities without
# any additional Docker container default capabilities, we need to drop
# "all" capabilities. Regardless of the order (there ain't one) of YAML
# dictionary keys, Docker carries out dropping all capabilities first,
# and only then adds capabilities. See also:
# https://stackoverflow.com/a/63219871.
cap_drop:
- ALL
cap_add:
- CAP_SYS_ADMIN # change namespaces
- CAP_SYS_CHROOT # change mount namespaces
- CAP_SYS_PTRACE # access nsfs namespace information
- CAP_DAC_READ_SEARCH # access/scan /proc/[$PID]/fd itself
- CAP_DAC_OVERRIDE # access container engine unix domain sockets without being rude, erm, root.
- CAP_NET_RAW # pingin' 'round
- CAP_NET_ADMIN # 'nuff tables
security_opt:
# The default Docker container AppArmor profile blocks namespace
# discovery, due to reading from /proc/$PID/ns/* is considered to be
# ptrace read/ready operations.
- apparmor:unconfined
# Essential since we need full PID view.
pid: "host"
cgroup: host
networks:
99-ghost-in-da-edge:
priority: 100
edgeshark:
image: "ghcr.io/siemens/packetflix"
pull_policy: "always"
read_only: true
restart: "unless-stopped"
entrypoint:
- "/packetflix"
- "--port=5001"
- "--discovery-service=gostwire.ghost-in-da-edge"
- "--gw-port=5000"
- "--proxy-discovery"
- "--debug"
ports:
- "127.0.0.1:5001:5001"
# Run as non-root user (baked into the meta data of the image anyway).
user: "65534"
# In order to set only exactly a specific set of capabilities without
# any additional Docker container default capabilities, we need to drop
# "all" capabilities. Regardless of the order (there ain't one) of YAML
# dictionary keys, Docker carries out dropping all capabilities first,
# and only then adds capabilities. See also:
# https://stackoverflow.com/a/63219871.
cap_drop:
- ALL
cap_add:
- CAP_SYS_ADMIN # change namespaces
- CAP_SYS_CHROOT # change mount namespaces
- CAP_SYS_PTRACE # access nsfs namespace information
- CAP_NET_ADMIN # allow dumpcap to control promisc. mode
- CAP_NET_RAW # capture raw packets, and not that totally burnt stuff
security_opt:
# The default Docker container AppArmor profile blocks namespace
# discovery, due to reading from /proc/$PID/ns/* is considered to be
# ptrace read/ready operations.
- apparmor:unconfined
# Essential since we need full PID view.
pid: "host"
networks:
99-ghost-in-da-edge:
priority: 100
# IPv6 is currently causing flakiness with GH actions and on our testbed.
# Disabling until there's more time to debug.
networks:
# Using a separate subnet here so that the CIDR resource for 172.20.0.0 won't catch DNS resources
dns_resources:
ipam:
config:
- subnet: 172.21.0.0/24
resources:
# enable_ipv6: true
ipam:
config:
- subnet: 172.20.0.0/24
# - subnet: fc00:ff:1::/48
app:
# enable_ipv6: true
ipam:
config:
- subnet: 172.28.0.0/24
# Currently not working on testbed
# - subnet: fc00:ff:2::/48
99-ghost-in-da-edge:
name: ghost-in-da-edge
internal: false
volumes:
postgres-data:
elixir-build-cache:
assets-build-cache: