From e2dce710f17690c97940523861c1ebc30775f41c Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Mon, 15 Sep 2025 03:37:39 +0000 Subject: [PATCH] refactor: tidy up `docker-compose.yml` (#10334) Our `docker-compose.yml` file has grown to a degree where it is almost unmanageable. Docker compose offers several tools to deal with complex compose setups, like include files and yaml anchors. We refactor our setup using these tools to organise these services and their configuration a bit better. --- docker-compose.yml | 563 ++++------------------------------ scripts/compose/edgeshark.yml | 92 ++++++ scripts/compose/portal.yml | 95 ++++++ scripts/compose/relay.yml | 33 ++ scripts/compose/resources.yml | 60 ++++ scripts/compose/router.yml | 13 + 6 files changed, 361 insertions(+), 495 deletions(-) create mode 100644 scripts/compose/edgeshark.yml create mode 100644 scripts/compose/portal.yml create mode 100644 scripts/compose/relay.yml create mode 100644 scripts/compose/resources.yml create mode 100644 scripts/compose/router.yml diff --git a/docker-compose.yml b/docker-compose.yml index ab0e2480d..7b2d5e303 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,64 +1,32 @@ # Run with DOCKER_BUILD_TARGET=dev to build Rust inside Docker +include: + - scripts/compose/resources.yml + - scripts/compose/edgeshark.yml + - scripts/compose/portal.yml -x-ip-forwarding: &ip-forwarding - - net.ipv4.ip_forward=1 - - net.ipv6.conf.all.disable_ipv6=0 - - net.ipv6.conf.default.disable_ipv6=0 - - net.ipv6.conf.all.forwarding=1 - - net.ipv6.conf.default.forwarding=1 +x-erlang-cluster: &erlang-cluster + ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd" + ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}' + +x-portal-urls: &portal-urls + WEB_EXTERNAL_URL: http://localhost:8080/ + API_EXTERNAL_URL: http://localhost:8081/ + +x-phoenix-config: &phoenix-config + PHOENIX_HTTP_WEB_PORT: "8080" + PHOENIX_HTTP_API_PORT: "8081" + PHOENIX_SECURE_COOKIES: "false" + +x-health-check: &health-check + interval: 1s + retries: 10 + timeout: 1s services: - # Dependencies - postgres: - # TODO: Enable pgaudit on dev instance. See https://github.com/pgaudit/pgaudit/issues/44#issuecomment-455090262 - image: postgres:17 - command: - ["postgres", "-c", "wal_level=logical", "-c", "max_connections=200"] - volumes: - - postgres-data:/var/lib/postgresql/data - environment: - POSTGRES_USER: postgres - POSTGRES_PASSWORD: postgres - POSTGRES_DB: firezone_dev - healthcheck: - test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"] - start_period: 20s - interval: 30s - retries: 5 - timeout: 5s - ports: - - 5432:5432/tcp - networks: - - app-internal - - vault: - image: vault:1.13.3 - environment: - VAULT_ADDR: "http://127.0.0.1:8200" - VAULT_DEV_ROOT_TOKEN_ID: "firezone" - VAULT_LOG_LEVEL: "debug" - ports: - - 8200:8200/tcp - cap_add: - - IPC_LOCK - networks: - - app-internal - healthcheck: - test: - [ - "CMD", - "wget", - "--spider", - "--proxy", - "off", - "http://127.0.0.1:8200/v1/sys/health?standbyok=true", - ] - interval: 10s - timeout: 3s - retries: 10 - start_period: 5s - web: + extends: + file: scripts/compose/portal.yml + service: common build: context: elixir args: @@ -68,69 +36,23 @@ services: ports: - 8080:8080/tcp environment: - # Web Server - WEB_EXTERNAL_URL: http://localhost:8080/ - API_EXTERNAL_URL: http://localhost:8081/ - PHOENIX_HTTP_WEB_PORT: "8080" - PHOENIX_HTTP_API_PORT: "8081" - PHOENIX_SECURE_COOKIES: "false" - # Erlang - ERLANG_DISTRIBUTION_PORT: 9000 - ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd" - ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}' - RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV" + <<: [*portal-urls, *erlang-cluster, *phoenix-config] RELEASE_HOSTNAME: "web.cluster.local" RELEASE_NAME: "web" - # Database - RUN_CONDITIONAL_MIGRATIONS: "true" - DATABASE_HOST: postgres - DATABASE_PORT: 5432 - DATABASE_NAME: firezone_dev - DATABASE_USER: postgres - DATABASE_PASSWORD: postgres - # Auth - AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock" - # Secrets - TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - # Debugging LOG_LEVEL: "debug" - # Emails - OUTBOUND_EMAIL_FROM: "public-noreply@firez.one" - OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark" - ## Warning: The token is for the blackhole Postmark server created in a separate isolated account, - ## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard. - OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}' - # Seeds - STATIC_SEEDS: "true" - # Feature flags - FEATURE_POLICY_CONDITIONS_ENABLED: "true" - FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true" - FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true" - FEATURE_IDP_SYNC_ENABLED: "true" - FEATURE_REST_API_ENABLED: "true" - FEATURE_INTERNET_RESOURCE_ENABLED: "true" healthcheck: test: ["CMD-SHELL", "curl -f localhost:8080/healthz"] - start_period: 10s - interval: 30s - retries: 5 - timeout: 5s + <<: *health-check depends_on: vault: condition: "service_healthy" postgres: condition: "service_healthy" - api-router: - condition: "service_healthy" - networks: - - app-internal api: + extends: + file: scripts/compose/portal.yml + service: common build: context: elixir args: @@ -140,52 +62,10 @@ services: ports: - 8081:8081/tcp environment: - # Web Server - WEB_EXTERNAL_URL: http://localhost:8080/ - API_EXTERNAL_URL: http://localhost:8081/ - PHOENIX_HTTP_WEB_PORT: "8080" - PHOENIX_HTTP_API_PORT: "8081" - PHOENIX_SECURE_COOKIES: "false" - # Erlang - ERLANG_DISTRIBUTION_PORT: 9000 - ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd" - ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}' - RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV" + <<: [*portal-urls, *erlang-cluster, *phoenix-config] RELEASE_HOSTNAME: "api.cluster.local" RELEASE_NAME: "api" - # Database - RUN_CONDITIONAL_MIGRATIONS: "true" - DATABASE_HOST: postgres - DATABASE_PORT: 5432 - DATABASE_NAME: firezone_dev - DATABASE_USER: postgres - DATABASE_PASSWORD: postgres - # Auth - AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock" - # Secrets - TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - # Debugging LOG_LEVEL: "debug" - # Emails - OUTBOUND_EMAIL_FROM: "public-noreply@firez.one" - OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark" - ## Warning: The token is for the blackhole Postmark server created in a separate isolated account, - ## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard. - OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}' - # Seeds - STATIC_SEEDS: "true" - # Feature flags - FEATURE_POLICY_CONDITIONS_ENABLED: "true" - FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true" - FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true" - FEATURE_IDP_SYNC_ENABLED: "true" - FEATURE_REST_API_ENABLED: "true" - FEATURE_INTERNET_RESOURCE_ENABLED: "true" user: root # Needed to run `ip route` commands cap_add: - NET_ADMIN # Needed to run `tc` commands to add simulated delay @@ -209,22 +89,16 @@ services: condition: "service_healthy" healthcheck: test: ["CMD-SHELL", "curl -f localhost:8081/healthz"] - start_period: 10s - interval: 30s - retries: 5 - timeout: 5s + <<: *health-check networks: app-internal: ipv4_address: 172.28.0.100 ipv6_address: 172:28:0::100 api-router: - build: - context: scripts/router - cap_add: - - NET_ADMIN - sysctls: *ip-forwarding - privileged: true + extends: + file: scripts/compose/router.yml + service: router environment: PORT_FORWARDS: | 8081 172.28.0.100 tcp @@ -241,6 +115,9 @@ services: interface_name: internet domain: + extends: + file: scripts/compose/portal.yml + service: common build: context: elixir args: @@ -248,63 +125,25 @@ services: image: ${DOMAIN_IMAGE:-ghcr.io/firezone/domain}:${DOMAIN_TAG:-main} hostname: domain.cluster.local environment: - # Erlang - ERLANG_DISTRIBUTION_PORT: 9000 - ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd" - ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local","domain@domain.cluster.local"]}' - RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV" + <<: *erlang-cluster RELEASE_HOSTNAME: "domain.cluster.local" RELEASE_NAME: "domain" - # Database - RUN_CONDITIONAL_MIGRATIONS: "true" - DATABASE_HOST: postgres - DATABASE_PORT: 5432 - DATABASE_NAME: firezone_dev - DATABASE_USER: postgres - DATABASE_PASSWORD: postgres - # Auth - AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock" - # Secrets - TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - # Debugging LOG_LEVEL: "debug" - # Emails - OUTBOUND_EMAIL_FROM: "public-noreply@firez.one" - OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark" - ## Warning: The token is for the blackhole Postmark server created in a separate isolated account, - ## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard. - OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}' - # Seeds - STATIC_SEEDS: "true" - # Feature flags - FEATURE_POLICY_CONDITIONS_ENABLED: "true" - FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true" - FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true" - FEATURE_IDP_SYNC_ENABLED: "true" - FEATURE_REST_API_ENABLED: "true" - FEATURE_INTERNET_RESOURCE_ENABLED: "true" healthcheck: test: ["CMD-SHELL", "curl -f localhost:4000/healthz"] - start_period: 10s - interval: 30s - retries: 5 - timeout: 5s + <<: *health-check depends_on: vault: condition: "service_healthy" postgres: condition: "service_healthy" - networks: - - app-internal # This is a service container which allows to run mix tasks for local development # without having to install Elixir and Erlang on the host machine. elixir: + extends: + file: scripts/compose/portal.yml + service: common build: context: elixir target: compiler @@ -313,55 +152,17 @@ services: image: ${ELIXIR_IMAGE:-ghcr.io/firezone/elixir}:${ELIXIR_TAG:-main} hostname: elixir environment: - # Web Server - WEB_EXTERNAL_URL: http://localhost:8080/ - API_EXTERNAL_URL: http://localhost:8081/ - # Erlang - ERLANG_DISTRIBUTION_PORT: 9000 - RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV" + <<: *portal-urls RELEASE_HOSTNAME: "mix.cluster.local" RELEASE_NAME: "mix" - # Database - RUN_CONDITIONAL_MIGRATIONS: "true" - DATABASE_HOST: postgres - DATABASE_PORT: 5432 - DATABASE_NAME: firezone_dev - DATABASE_USER: postgres - DATABASE_PASSWORD: postgres - # Auth - AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock" - # Secrets - TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" - LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" - COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" # Higher log level not to make seeds output too verbose LOG_LEVEL: "info" - # Emails - OUTBOUND_EMAIL_FROM: "public-noreply@firez.one" - OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark" - ## Warning: The token is for the blackhole Postmark server created in a separate isolated account, - ## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard. - OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}' # Mix env should be set to prod to use secrets declared above, # otherwise seeds will generate invalid tokens MIX_ENV: "prod" - # Seeds - STATIC_SEEDS: "true" - # Feature flags - FEATURE_POLICY_CONDITIONS_ENABLED: "true" - FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true" - FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true" - FEATURE_IDP_SYNC_ENABLED: "true" - FEATURE_REST_API_ENABLED: "true" - FEATURE_INTERNET_RESOURCE_ENABLED: "true" depends_on: postgres: condition: "service_healthy" - networks: - - app-internal # Run with DOCKER_BUILD_TARGET=dev to build Rust inside Docker client: @@ -417,14 +218,10 @@ services: - "api:203.0.113.10" - "api:203:0:113::10" - # Client Router (NAT only, symmetric NAT) client-router: - build: - context: scripts/router - cap_add: - - NET_ADMIN - privileged: true - sysctls: *ip-forwarding + extends: + file: scripts/compose/router.yml + service: router environment: MASQUERADE_TYPE: ${CLIENT_MASQUERADE:-} networks: @@ -438,6 +235,7 @@ services: gateway: healthcheck: test: ["CMD-SHELL", "ip link | grep tun-firezone"] + <<: *health-check environment: FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkMjI3NDU2MGItZTk3Yi00NWU0LThiMzQtNjc5Yzc2MTdlOThkbQAAADhPMDJMN1VTMkozVklOT01QUjlKNklMODhRSVFQNlVPOEFRVk82VTVJUEwwVkpDMjJKR0gwPT09PW4GAAH8sImUAWIAAVGA.tAm2O9FcyF67VAF3rZdwQpeADrYOIs3S2l2K51G26OM" RUST_LOG: ${RUST_LOG:-phoenix_channel=trace,firezone_gateway=trace,wire=trace,connlib_gateway_shared=trace,firezone_tunnel=trace,connlib_shared=trace,phoenix_channel=debug,boringtun=debug,snownet=debug,str0m=debug,info} @@ -470,7 +268,12 @@ services: image: ${GATEWAY_IMAGE:-ghcr.io/firezone/debug/gateway}:${GATEWAY_TAG:-main} cap_add: - NET_ADMIN - sysctls: *ip-forwarding + sysctls: + - net.ipv4.ip_forward=1 + - net.ipv6.conf.all.disable_ipv6=0 + - net.ipv6.conf.default.disable_ipv6=0 + - net.ipv6.conf.all.forwarding=1 + - net.ipv6.conf.default.forwarding=1 devices: - "/dev/net/tun:/dev/net/tun" depends_on: @@ -488,14 +291,10 @@ services: - "api:203.0.113.10" - "api:203:0:113::10" - # Gateway Router (NAT only, cone NAT) gateway-router: - build: - context: scripts/router - cap_add: - - NET_ADMIN - privileged: true - sysctls: *ip-forwarding + extends: + file: scripts/compose/router.yml + service: router environment: MASQUERADE_TYPE: ${GATEWAY_MASQUERADE:-} networks: @@ -506,68 +305,13 @@ services: internet: interface_name: internet - httpbin: - image: kennethreitz/httpbin - # Needed to bind to IPv6 - command: ["gunicorn", "-b", "[::]:80", "httpbin:app"] - healthcheck: - test: ["CMD-SHELL", "ps -C gunicorn"] - networks: - resources: - ipv4_address: 172.20.0.100 - ipv6_address: 172:20:0::100 - - download.httpbin: # Named after `httpbin` because that is how DNS resources are configured for the test setup. - build: - target: ${DOCKER_BUILD_TARGET:-debug} - context: rust - dockerfile: Dockerfile - args: - PACKAGE: http-test-server - image: ${HTTP_TEST_SERVER_IMAGE:-ghcr.io/firezone/debug/http-test-server}:${HTTP_TEST_SERVER_TAG:-main} - environment: - PORT: 80 - networks: - dns_resources: - ipv4_address: 172.21.0.101 - - dns.httpbin.search.test: - image: kennethreitz/httpbin - healthcheck: - test: ["CMD-SHELL", "ps -C gunicorn"] - networks: - dns_resources: - ipv4_address: 172.21.0.100 - - iperf3: - image: mlabbe/iperf3 - healthcheck: - test: - [ - "CMD-SHELL", - "(cat /proc/net/tcp | grep 5201) && (cat /proc/net/udp | grep 5201)", - ] - command: -s -V - networks: - resources: - ipv4_address: 172.20.0.110 - ipv6_address: 172:20:0::110 - relay-1: + extends: + file: scripts/compose/relay.yml + service: relay environment: PUBLIC_IP4_ADDR: 203.0.113.101 PUBLIC_IP6_ADDR: 203:0:113::101 - # LOWEST_PORT: 55555 - # HIGHEST_PORT: 55666 - # Token for self-hosted Relay - # FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAEngLBONAWIAAVGA.E-f2MFdGMX7JTL2jwoHBdWcUd2G3UNz2JRZLbQrlf0k" - # Token for global Relay - FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo" - RUST_LOG: ${RUST_LOG:-debug} - RUST_BACKTRACE: 1 - FIREZONE_API_URL: ws://api:8081 - OTLP_GRPC_ENDPOINT: otel:4317 - EBPF_OFFLOADING: eth0 command: - sh - -c @@ -579,51 +323,18 @@ services: ip -6 route add 203:0:113::/64 via 172:29:1::254 firezone-relay - privileged: true - init: true - build: - target: ${DOCKER_BUILD_TARGET:-debug} - context: rust - dockerfile: Dockerfile - args: - PACKAGE: firezone-relay - image: ${RELAY_IMAGE:-ghcr.io/firezone/debug/relay}:${RELAY_TAG:-main} - sysctls: - - net.ipv6.conf.all.disable_ipv6=0 - - net.ipv6.conf.default.disable_ipv6=0 - healthcheck: - test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"] - start_period: 10s - interval: 30s - retries: 5 - timeout: 5s depends_on: relay-1-router: condition: "service_healthy" - api: - condition: "service_healthy" - # ports: - # NOTE: Only 111 ports are used for local dev / testing because Docker Desktop - # allocates a userland proxy process for each forwarded port X_X. - # - # Large ranges here will bring your machine to its knees. - # - "55555-55666:55555-55666/udp" - # - 3478:3478/udp networks: relay-1-internal: ipv4_address: 172.29.1.100 ipv6_address: 172:29:1::100 - extra_hosts: - - "api:203.0.113.10" - - "api:203:0:113::10" relay-1-router: - build: - context: scripts/router - cap_add: - - NET_ADMIN - privileged: true - sysctls: *ip-forwarding + extends: + file: scripts/compose/router.yml + service: router environment: PORT_FORWARDS: | 3478 172.29.1.100 udp @@ -641,18 +352,12 @@ services: interface_name: internet relay-2: + extends: + file: scripts/compose/relay.yml + service: relay environment: PUBLIC_IP4_ADDR: 203.0.113.102 PUBLIC_IP6_ADDR: 203:0:113::102 - # Token for self-hosted Relay - # FIREZONE_TOKEN: ".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAEngLBONAWIAAVGA.E-f2MFdGMX7JTL2jwoHBdWcUd2G3UNz2JRZLbQrlf0k" - # Token for global Relay - FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo" - RUST_LOG: ${RUST_LOG:-debug} - RUST_BACKTRACE: 1 - FIREZONE_API_URL: ws://api:8081 - OTLP_GRPC_ENDPOINT: otel:4317 - EBPF_OFFLOADING: eth0 command: - sh - -c @@ -664,44 +369,18 @@ services: ip -6 route add 203:0:113::/64 via 172:29:2::254 firezone-relay - privileged: true - init: true - build: - target: ${DOCKER_BUILD_TARGET:-debug} - context: rust - dockerfile: Dockerfile - args: - PACKAGE: firezone-relay - image: ${RELAY_IMAGE:-ghcr.io/firezone/debug/relay}:${RELAY_TAG:-main} - sysctls: - - net.ipv6.conf.all.disable_ipv6=0 - - net.ipv6.conf.default.disable_ipv6=0 - healthcheck: - test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"] - start_period: 10s - interval: 30s - retries: 5 - timeout: 5s depends_on: relay-2-router: condition: "service_healthy" - api: - condition: "service_healthy" networks: relay-2-internal: ipv4_address: 172.29.2.100 ipv6_address: 172:29:2::100 - extra_hosts: - - "api:203.0.113.10" - - "api:203:0:113::10" relay-2-router: - build: - context: scripts/router - cap_add: - - NET_ADMIN - privileged: true - sysctls: *ip-forwarding + extends: + file: scripts/compose/router.yml + service: router environment: PORT_FORWARDS: | 3478 172.29.2.100 udp @@ -769,90 +448,6 @@ services: networks: app-internal: - # EdgeShark is useful for attaching wireshark to TUN devices within containers. It is reachable at http://localhost:5001 - # You'll also need the extcap plugin: https://github.com/siemens/cshargextcap - gostwire: - image: "ghcr.io/siemens/ghostwire" - restart: "unless-stopped" - read_only: true - entrypoint: - - "/gostwire" - - "--http=[::]:5000" - - "--brand=Edgeshark" - - "--brandicon=PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCA2LjM1IDYuMzUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggZD0iTTUuNzMgNC41M2EuNC40IDAgMDEtLjA2LS4xMmwtLjAyLS4wNC0uMDMuMDNjLS4wNi4xMS0uMDcuMTItLjEuMS0uMDEtLjAyIDAtLjAzLjAzLS4xbC4wNi0uMS4wNC0uMDVjLjAzIDAgLjA0LjAxLjA2LjA5bC4wNC4xMmMuMDMuMDUuMDMuMDcgMCAuMDhoLS4wMnptLS43NS0uMDZjLS4wMi0uMDEtLjAxLS4wMy4wMS0uMDUuMDMtLjAyLjA1LS4wNi4wOC0uMTQuMDMtLjA4LjA1LS4xLjA4LS4wN2wuMDIuMDdjLjAyLjEuMDMuMTIuMDUuMTMuMDUuMDIuMDQuMDctLjAxLjA3LS4wNCAwLS4wNi0uMDMtLjA5LS4xMiAwLS4wMi0uMDEgMC0uMDQuMDRhLjM2LjM2IDAgMDEtLjA0LjA3Yy0uMDIuMDItLjA1LjAyLS4wNiAwem0tLjQ4LS4wM2MtLjAxLS4wMSAwLS4wMy4wMS0uMDZsLjA3LS4xM2MuMDYtLjEuMDctLjEzLjEtLjEyLjAyLjAyLjAzLjA0LjA0LjEuMDEuMDguMDIuMTEuMDQuMTR2LjA0Yy0uMDEuMDItLjA0LjAyLS4wNiAwbC0uMDMtLjEtLjAxLS4wNS0uMDYuMDlhLjQ2LjQ2IDAgMDEtLjA1LjA4Yy0uMDIuMDItLjA0LjAyLS4wNSAwem0uMDgtLjc1YS4wNS4wNSAwIDAxLS4wMS0uMDRMNC41IDMuNWwtLjAyLS4wNGguMDNjLjAzLS4wMi4wMy0uMDEuMDguMDdsLjAzLjA1LjA3LS4xM2MwLS4wMyAwLS4wMy4wMi0uMDQuMDMgMCAuMDQgMCAuMDQuMDJhLjYuNiAwIDAxLS4xMi4yNmMtLjAzLjAyLS4wMy4wMi0uMDUgMHptLjU0LS4xYy0uMDEtLjAyLS4wMi0uMDMtLjAyLS4wOGEuNDQuNDQgMCAwMC0uMDMtLjA4Yy0uMDItLjA1LS4wMi0uMDggMC0uMDguMDUtLjAxLjA2IDAgLjA2LjA0bC4wMy4wOGMuMDIgMCAuMDYtLjA4LjA3LS4xMmwuMDEtLjAyLjA2LS4wMS0uMTIuMjZoLS4wNnptLjQ3LS4wOGwtLjAyLS4wOWEuNTUuNTUgMCAwMC0uMDItLjEyYzAtLjAyIDAtLjAyLjAyLS4wMmguMDNsLjAyLjExdi4wM2MuMDEgMCAuMDQtLjAzLjA2LS4wN2wuMDUtLjA5Yy4wMi0uMDIuMDYtLjAyLjA2IDBsLS4xNC4yNWMtLjAxLjAxLS4wNS4wMi0uMDYgMHptLS43Ni0uNDFjLS4wNi0uMDMtLjA4LS4xNC0uMDUtLjJsLjA0LS4wM2MuMDMtLjAyLjAzLS4wMi4wNy0uMDIuMDYgMCAuMDkuMDEuMTIuMDUuMDMuMDUuMDIuMTItLjAzLjE2LS4wNS4wNS0uMS4wNi0uMTUuMDR6bS4zNS0uMDVBLjEzLjEzIDAgMDE1LjE0IDNsLS4wMS0uMDVjMC0uMDQgMC0uMDUuMDYtLjFsLjA2LS4wMmMuMDcgMCAuMTEuMDUuMS4xMmwtLjAxLjA2Yy0uMDQuMDQtLjEyLjA1LS4xNi4wM3oiLz48cGF0aCBkPSJNMy44NCA1LjQ4Yy0uMTctLjIxLS4wNC0uNS0uMDYtLjc0LjA1LS44My4xLTEuNjYuMDctMi41LjE3LS4xNC40NC4wOC41OS0uMDItLjIyLS4xMy0uNTQtLjEzLS44LS4xNC0uMTQuMDktLjUuMDItLjUuMTcuMi4wOC41My0uMTUuNjQuMDItLjAxLjgxLS4wMyAxLjYyLS4xIDIuNDItLjA1LjIzLjA3LjU2LS4xNC43MmE5LjUxIDkuNTEgMCAwMS0uNjYtLjUzYy4xMy0uMDQuMzItLjQuMS0uMi0uMjEuMjItLjUxLjI3LS43OS4zNS0uMTIuMDgtLjU0LjEyLS4zMi0uMTEuMi0uMjIuNDUtLjQyLjUtLjcyLS4xMy0uMTItLjEuMy0uMjUuMDgtLjIzLS4xNi0uNDMtLjM1LS42Ny0uNS0uMjEuMTItLjQ1LjIzLS43LjI5LS4xNy4xLS42MS4xNC0uMzItLjE0LjItLjIuNDMtLjQyLjQtLjcyLjAzLS4zMS0uMDgtLjYxLS4yLS44OWEyLjA3IDIuMDcgMCAwMC0uNDMtLjY0Yy0uMTgtLjE2LS4wMi0uMy4xNi0uMTkuMzcuMTcuNy40Ljk4LjcuMDkuMTcuMTMuNC4zNi4yNS40NC0uMDguOS0uMSAxLjM0LS4yMS4xMy0uMy4wNC0uNjQtLjEyLS45MSAwLS4xNy0uNDItLjM4LS4yMi0uNDYuMzguMS43Ny4yMyAxLjA4LjQ4LjMyLjE5LjY0LjQ1LjY5Ljg0LjEuMTguNS4xMi43MS4xOS4zMy4wNC42Ni4wOSAxIC4wOS4xNS4xMi0uMDguNDcgMCAuNjgtLjA4LjE1LS40LjA3LS41OS4xNC0uMTIuMTMtLjE0LjQzLS4xNS42NC0uMDUuMTUuMTguNDguMDYuNWExLjQgMS40IDAgMDEwLTEuMWMtLjItLjA2LS4zMy4wNi0uNTMuMDQtLjIzLjA0LS40NS4xLS42OC4xMi0uMTMuMi0uMjMuNTQtLjA2Ljc1LjEzLjE5LjMuMi40OC4yLjE4LjAzLjM2LjA1LjU1LjA0LjE4LS4wMS4zNi4wNy41Mi4wNS4yLjAxLjEuNC4wNy41Mi0uMzguMDctLjc2LjE2LTEuMTQuMjUtLjMuMDQtLjU4LjE0LS44Ny4xOXptLjQyLS4zOGMuMDgtLjEuMDItLjU0LS4wNC0uMjQgMCAuMDYtLjEuMy4wNC4yNHptLjU4LS4xYy4wOS0uMTItLjA0LS40Mi0uMDctLjE0LS4wMi4wNi0uMDIuMi4wNy4xNHptLjU2LS4wNmMuMTItLjE0LS4wOC0uMzQtLjA4LS4wOC0uMDEuMDQuMDIuMTMuMDguMDh6bS0yLjE3LS42Yy4wMy0uMjUuMDItLjUyLjA2LS43OS4wMi0uMjUuMDUtLjUgMC0uNzUtLjA5LjItLjAzLjQ4LS4wOS43LS4wMi4yOC0uMDguNTctLjA0Ljg1LjAyLjAyLjA1LjAyLjA3IDB6bS0uNjctLjI3Yy4wOS0uMy4wNy0uNjMuMDgtLjk0LS4wMS0uMTIuMDEtLjQ3LS4xLS40LjAzLjQzLjAzLjg4LS4wNiAxLjMyIDAgLjAzLjA1LjA1LjA4LjAyem0tLjYtLjVjLjEtLjI0LjE0LS41My4xLS43OS0uMTQgMC0uMDMuMzYtLjEuNS4wMS4wNi0uMTMuMzIgMCAuM3ptLS40My0uMmMuMDQtLjE2LjE1LS41IDAtLjU3LjA1LjE4LS4xMi40NS0uMDMuNThsLjAzLS4wMXptMy40My0uMjJjLjIyLS4xMyAwLS42Ni0uMjItLjM1LS4xLjE1IDAgLjQyLjIyLjM1em0uMzQtLjA2Yy4yOCAwIC4xOC0uNTMtLjA5LS4zNC0uMTIuMDctLjEyLjQyLjA5LjM0em0uNDQtLjAxYy0uMDEtLjEuMTItLjM4LS4wMS0uNC0uMDIuMS0uMTcuNDEgMCAuNHptLTEuMjYtLjAyYzAtLjEzLjAyLS41NS0uMTQtLjUuMDguMTItLjAyLjUxLjE0LjV6Ii8+PHBhdGggZD0iTTUuNTMgMy4yN2wtLjI1LjAzYS41Ny41NyAwIDAxLS4xMy4yNGwtLjA2LS4yLS4zNy4wNGMwIC4xLS4wMy4xOS0uMS4yOGwtLjEzLS4yNC0uMjIuMDNzLS4xNS4yOC0uMTUuNDYuMDcuNDYuMzcuNTNoLjAzbC4xNS0uMjUuMDguMjguMjUuMDIuMTItLjJjLjA1LjEuMS4yLjEyLjJsLjMuMDJ2LS4wOHMtLjEyLS4yNS0uMTItLjM5Yy0uMDItLjI3LjExLS43Ny4xMS0uNzd6IiBmaWxsLW9wYWNpdHk9Ii41Ii8+PC9zdmc+" - user: "65534" - # In order to set only exactly a specific set of capabilities without - # any additional Docker container default capabilities, we need to drop - # "all" capabilities. Regardless of the order (there ain't one) of YAML - # dictionary keys, Docker carries out dropping all capabilities first, - # and only then adds capabilities. See also: - # https://stackoverflow.com/a/63219871. - cap_drop: - - ALL - cap_add: - - CAP_SYS_ADMIN # change namespaces - - CAP_SYS_CHROOT # change mount namespaces - - CAP_SYS_PTRACE # access nsfs namespace information - - CAP_DAC_READ_SEARCH # access/scan /proc/[$PID]/fd itself - - CAP_DAC_OVERRIDE # access container engine unix domain sockets without being rude, erm, root. - - CAP_NET_RAW # pingin' 'round - - CAP_NET_ADMIN # 'nuff tables - security_opt: - # The default Docker container AppArmor profile blocks namespace - # discovery, due to reading from /proc/$PID/ns/* is considered to be - # ptrace read/ready operations. - - apparmor:unconfined - # Essential since we need full PID view. - pid: "host" - cgroup: host - networks: - 99-ghost-in-da-edge: - priority: 100 - - edgeshark: - image: "ghcr.io/siemens/packetflix" - read_only: true - restart: "unless-stopped" - entrypoint: - - "/packetflix" - - "--port=5001" - - "--discovery-service=gostwire.ghost-in-da-edge" - - "--gw-port=5000" - - "--proxy-discovery" - - "--debug" - - ports: - - "127.0.0.1:5001:5001" - - # Run as non-root user (baked into the meta data of the image anyway). - user: "65534" - - # In order to set only exactly a specific set of capabilities without - # any additional Docker container default capabilities, we need to drop - # "all" capabilities. Regardless of the order (there ain't one) of YAML - # dictionary keys, Docker carries out dropping all capabilities first, - # and only then adds capabilities. See also: - # https://stackoverflow.com/a/63219871. - cap_drop: - - ALL - cap_add: - - CAP_SYS_ADMIN # change namespaces - - CAP_SYS_CHROOT # change mount namespaces - - CAP_SYS_PTRACE # access nsfs namespace information - - CAP_NET_ADMIN # allow dumpcap to control promisc. mode - - CAP_NET_RAW # capture raw packets, and not that totally burnt stuff - security_opt: - # The default Docker container AppArmor profile blocks namespace - # discovery, due to reading from /proc/$PID/ns/* is considered to be - # ptrace read/ready operations. - - apparmor:unconfined - - # Essential since we need full PID view. - pid: "host" - networks: - 99-ghost-in-da-edge: - priority: 100 - networks: # Internet network - where all public IPs live internet: @@ -896,25 +491,3 @@ networks: config: - subnet: 172.31.0.0/24 - subnet: 172:31:0::/64 - - dns_resources: - ipam: - config: - - subnet: 172.21.0.0/24 - - resources: - enable_ipv6: true - ipam: - config: - - subnet: 172.20.0.0/24 - - subnet: 172:20:0::/64 - - # Monitoring network - 99-ghost-in-da-edge: - name: ghost-in-da-edge - internal: false - -volumes: - postgres-data: - elixir-build-cache: - assets-build-cache: diff --git a/scripts/compose/edgeshark.yml b/scripts/compose/edgeshark.yml new file mode 100644 index 000000000..a43940f44 --- /dev/null +++ b/scripts/compose/edgeshark.yml @@ -0,0 +1,92 @@ +# requires docker compose plugin (=v2) +# +# wget -q --no-cache -O - https://github.com/siemens/edgeshark/raw/main/deployments/wget/docker-compose.yaml | docker compose -f - up +name: "edgeshark" +services: + gostwire: + image: "ghcr.io/siemens/ghostwire" + restart: "unless-stopped" + read_only: true + entrypoint: + - "/gostwire" + - "--http=[::]:5000" + - "--brand=Edgeshark" + - "--brandicon=PHN2ZyB3aWR0aD0iMjQiIGhlaWdodD0iMjQiIHZpZXdCb3g9IjAgMCA2LjM1IDYuMzUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PHBhdGggZD0iTTUuNzMgNC41M2EuNC40IDAgMDEtLjA2LS4xMmwtLjAyLS4wNC0uMDMuMDNjLS4wNi4xMS0uMDcuMTItLjEuMS0uMDEtLjAyIDAtLjAzLjAzLS4xbC4wNi0uMS4wNC0uMDVjLjAzIDAgLjA0LjAxLjA2LjA5bC4wNC4xMmMuMDMuMDUuMDMuMDcgMCAuMDhoLS4wMnptLS43NS0uMDZjLS4wMi0uMDEtLjAxLS4wMy4wMS0uMDUuMDMtLjAyLjA1LS4wNi4wOC0uMTQuMDMtLjA4LjA1LS4xLjA4LS4wN2wuMDIuMDdjLjAyLjEuMDMuMTIuMDUuMTMuMDUuMDIuMDQuMDctLjAxLjA3LS4wNCAwLS4wNi0uMDMtLjA5LS4xMiAwLS4wMi0uMDEgMC0uMDQuMDRhLjM2LjM2IDAgMDEtLjA0LjA3Yy0uMDIuMDItLjA1LjAyLS4wNiAwem0tLjQ4LS4wM2MtLjAxLS4wMSAwLS4wMy4wMS0uMDZsLjA3LS4xM2MuMDYtLjEuMDctLjEzLjEtLjEyLjAyLjAyLjAzLjA0LjA0LjEuMDEuMDguMDIuMTEuMDQuMTR2LjA0Yy0uMDEuMDItLjA0LjAyLS4wNiAwbC0uMDMtLjEtLjAxLS4wNS0uMDYuMDlhLjQ2LjQ2IDAgMDEtLjA1LjA4Yy0uMDIuMDItLjA0LjAyLS4wNSAwem0uMDgtLjc1YS4wNS4wNSAwIDAxLS4wMS0uMDRMNC41IDMuNWwtLjAyLS4wNGguMDNjLjAzLS4wMi4wMy0uMDEuMDguMDdsLjAzLjA1LjA3LS4xM2MwLS4wMyAwLS4wMy4wMi0uMDQuMDMgMCAuMDQgMCAuMDQuMDJhLjYuNiAwIDAxLS4xMi4yNmMtLjAzLjAyLS4wMy4wMi0uMDUgMHptLjU0LS4xYy0uMDEtLjAyLS4wMi0uMDMtLjAyLS4wOGEuNDQuNDQgMCAwMC0uMDMtLjA4Yy0uMDItLjA1LS4wMi0uMDggMC0uMDguMDUtLjAxLjA2IDAgLjA2LjA0bC4wMy4wOGMuMDIgMCAuMDYtLjA4LjA3LS4xMmwuMDEtLjAyLjA2LS4wMS0uMTIuMjZoLS4wNnptLjQ3LS4wOGwtLjAyLS4wOWEuNTUuNTUgMCAwMC0uMDItLjEyYzAtLjAyIDAtLjAyLjAyLS4wMmguMDNsLjAyLjExdi4wM2MuMDEgMCAuMDQtLjAzLjA2LS4wN2wuMDUtLjA5Yy4wMi0uMDIuMDYtLjAyLjA2IDBsLS4xNC4yNWMtLjAxLjAxLS4wNS4wMi0uMDYgMHptLS43Ni0uNDFjLS4wNi0uMDMtLjA4LS4xNC0uMDUtLjJsLjA0LS4wM2MuMDMtLjAyLjAzLS4wMi4wNy0uMDIuMDYgMCAuMDkuMDEuMTIuMDUuMDMuMDUuMDIuMTItLjAzLjE2LS4wNS4wNS0uMS4wNi0uMTUuMDR6bS4zNS0uMDVBLjEzLjEzIDAgMDE1LjE0IDNsLS4wMS0uMDVjMC0uMDQgMC0uMDUuMDYtLjFsLjA2LS4wMmMuMDcgMCAuMTEuMDUuMS4xMmwtLjAxLjA2Yy0uMDQuMDQtLjEyLjA1LS4xNi4wM3oiLz48cGF0aCBkPSJNMy44NCA1LjQ4Yy0uMTctLjIxLS4wNC0uNS0uMDYtLjc0LjA1LS44My4xLTEuNjYuMDctMi41LjE3LS4xNC40NC4wOC41OS0uMDItLjIyLS4xMy0uNTQtLjEzLS44LS4xNC0uMTQuMDktLjUuMDItLjUuMTcuMi4wOC41My0uMTUuNjQuMDItLjAxLjgxLS4wMyAxLjYyLS4xIDIuNDItLjA1LjIzLjA3LjU2LS4xNC43MmE5LjUxIDkuNTEgMCAwMS0uNjYtLjUzYy4xMy0uMDQuMzItLjQuMS0uMi0uMjEuMjItLjUxLjI3LS43OS4zNS0uMTIuMDgtLjU0LjEyLS4zMi0uMTEuMi0uMjIuNDUtLjQyLjUtLjcyLS4xMy0uMTItLjEuMy0uMjUuMDgtLjIzLS4xNi0uNDMtLjM1LS42Ny0uNS0uMjEuMTItLjQ1LjIzLS43LjI5LS4xNy4xLS42MS4xNC0uMzItLjE0LjItLjIuNDMtLjQyLjQtLjcyLjAzLS4zMS0uMDgtLjYxLS4yLS44OWEyLjA3IDIuMDcgMCAwMC0uNDMtLjY0Yy0uMTgtLjE2LS4wMi0uMy4xNi0uMTkuMzcuMTcuNy40Ljk4LjcuMDkuMTcuMTMuNC4zNi4yNS40NC0uMDguOS0uMSAxLjM0LS4yMS4xMy0uMy4wNC0uNjQtLjEyLS45MSAwLS4xNy0uNDItLjM4LS4yMi0uNDYuMzguMS43Ny4yMyAxLjA4LjQ4LjMyLjE5LjY0LjQ1LjY5Ljg0LjEuMTguNS4xMi43MS4xOS4zMy4wNC42Ni4wOSAxIC4wOS4xNS4xMi0uMDguNDcgMCAuNjgtLjA4LjE1LS40LjA3LS41OS4xNC0uMTIuMTMtLjE0LjQzLS4xNS42NC0uMDUuMTUuMTguNDguMDYuNWExLjQgMS40IDAgMDEwLTEuMWMtLjItLjA2LS4zMy4wNi0uNTMuMDQtLjIzLjA0LS40NS4xLS42OC4xMi0uMTMuMi0uMjMuNTQtLjA2Ljc1LjEzLjE5LjMuMi40OC4yLjE4LjAzLjM2LjA1LjU1LjA0LjE4LS4wMS4zNi4wNy41Mi4wNS4yLjAxLjEuNC4wNy41Mi0uMzguMDctLjc2LjE2LTEuMTQuMjUtLjMuMDQtLjU4LjE0LS44Ny4xOXptLjQyLS4zOGMuMDgtLjEuMDItLjU0LS4wNC0uMjQgMCAuMDYtLjEuMy4wNC4yNHptLjU4LS4xYy4wOS0uMTItLjA0LS40Mi0uMDctLjE0LS4wMi4wNi0uMDIuMi4wNy4xNHptLjU2LS4wNmMuMTItLjE0LS4wOC0uMzQtLjA4LS4wOC0uMDEuMDQuMDIuMTMuMDguMDh6bS0yLjE3LS42Yy4wMy0uMjUuMDItLjUyLjA2LS43OS4wMi0uMjUuMDUtLjUgMC0uNzUtLjA5LjItLjAzLjQ4LS4wOS43LS4wMi4yOC0uMDguNTctLjA0Ljg1LjAyLjAyLjA1LjAyLjA3IDB6bS0uNjctLjI3Yy4wOS0uMy4wNy0uNjMuMDgtLjk0LS4wMS0uMTIuMDEtLjQ3LS4xLS40LjAzLjQzLjAzLjg4LS4wNiAxLjMyIDAgLjAzLjA1LjA1LjA4LjAyem0tLjYtLjVjLjEtLjI0LjE0LS41My4xLS43OS0uMTQgMC0uMDMuMzYtLjEuNS4wMS4wNi0uMTMuMzIgMCAuM3ptLS40My0uMmMuMDQtLjE2LjE1LS41IDAtLjU3LjA1LjE4LS4xMi40NS0uMDMuNThsLjAzLS4wMXptMy40My0uMjJjLjIyLS4xMyAwLS42Ni0uMjItLjM1LS4xLjE1IDAgLjQyLjIyLjM1em0uMzQtLjA2Yy4yOCAwIC4xOC0uNTMtLjA5LS4zNC0uMTIuMDctLjEyLjQyLjA5LjM0em0uNDQtLjAxYy0uMDEtLjEuMTItLjM4LS4wMS0uNC0uMDIuMS0uMTcuNDEgMCAuNHptLTEuMjYtLjAyYzAtLjEzLjAyLS41NS0uMTQtLjUuMDguMTItLjAyLjUxLjE0LjV6Ii8+PHBhdGggZD0iTTUuNTMgMy4yN2wtLjI1LjAzYS41Ny41NyAwIDAxLS4xMy4yNGwtLjA2LS4yLS4zNy4wNGMwIC4xLS4wMy4xOS0uMS4yOGwtLjEzLS4yNC0uMjIuMDNzLS4xNS4yOC0uMTUuNDYuMDcuNDYuMzcuNTNoLjAzbC4xNS0uMjUuMDguMjguMjUuMDIuMTItLjJjLjA1LjEuMS4yLjEyLjJsLjMuMDJ2LS4wOHMtLjEyLS4yNS0uMTItLjM5Yy0uMDItLjI3LjExLS43Ny4xMS0uNzd6IiBmaWxsLW9wYWNpdHk9Ii41Ii8+PC9zdmc+" + user: "65534" + # In order to set only exactly a specific set of capabilities without + # any additional Docker container default capabilities, we need to drop + # "all" capabilities. Regardless of the order (there ain't one) of YAML + # dictionary keys, Docker carries out dropping all capabilities first, + # and only then adds capabilities. See also: + # https://stackoverflow.com/a/63219871. + cap_drop: + - ALL + cap_add: + - CAP_SYS_ADMIN # change namespaces + - CAP_SYS_CHROOT # change mount namespaces + - CAP_SYS_PTRACE # access nsfs namespace information + - CAP_DAC_READ_SEARCH # access/scan /proc/[$PID]/fd itself + - CAP_DAC_OVERRIDE # access container engine unix domain sockets without being rude, erm, root. + - CAP_NET_RAW # pingin' 'round + - CAP_NET_ADMIN # 'nuff tables + security_opt: + # The default Docker container AppArmor profile blocks namespace + # discovery, due to reading from /proc/$PID/ns/* is considered to be + # ptrace read/ready operations. + - apparmor:unconfined + # Essential since we need full PID view. + pid: "host" + cgroup: host + networks: + 99-ghost-in-da-edge: + priority: 100 + + edgeshark: + image: "ghcr.io/siemens/packetflix" + read_only: true + restart: "unless-stopped" + entrypoint: + - "/packetflix" + - "--port=5001" + - "--discovery-service=gostwire.ghost-in-da-edge" + - "--gw-port=5000" + - "--proxy-discovery" + - "--debug" + + ports: + - "127.0.0.1:5001:5001" + + # Run as non-root user (baked into the meta data of the image anyway). + user: "65534" + + # In order to set only exactly a specific set of capabilities without + # any additional Docker container default capabilities, we need to drop + # "all" capabilities. Regardless of the order (there ain't one) of YAML + # dictionary keys, Docker carries out dropping all capabilities first, + # and only then adds capabilities. See also: + # https://stackoverflow.com/a/63219871. + cap_drop: + - ALL + cap_add: + - CAP_SYS_ADMIN # change namespaces + - CAP_SYS_CHROOT # change mount namespaces + - CAP_SYS_PTRACE # access nsfs namespace information + - CAP_NET_ADMIN # allow dumpcap to control promisc. mode + - CAP_NET_RAW # capture raw packets, and not that totally burnt stuff + security_opt: + # The default Docker container AppArmor profile blocks namespace + # discovery, due to reading from /proc/$PID/ns/* is considered to be + # ptrace read/ready operations. + - apparmor:unconfined + + # Essential since we need full PID view. + pid: "host" + + networks: + 99-ghost-in-da-edge: + priority: 100 + +networks: + 99-ghost-in-da-edge: + name: ghost-in-da-edge + internal: false diff --git a/scripts/compose/portal.yml b/scripts/compose/portal.yml new file mode 100644 index 000000000..268897acd --- /dev/null +++ b/scripts/compose/portal.yml @@ -0,0 +1,95 @@ +services: + # Dependencies + postgres: + # TODO: Enable pgaudit on dev instance. See https://github.com/pgaudit/pgaudit/issues/44#issuecomment-455090262 + image: postgres:17 + command: + ["postgres", "-c", "wal_level=logical", "-c", "max_connections=200"] + volumes: + - postgres-data:/var/lib/postgresql/data + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: firezone_dev + healthcheck: + test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"] + start_period: 20s + interval: 30s + retries: 5 + timeout: 5s + ports: + - 5432:5432/tcp + networks: + - app-internal + + vault: + image: vault:1.13.3 + environment: + VAULT_ADDR: "http://127.0.0.1:8200" + VAULT_DEV_ROOT_TOKEN_ID: "firezone" + VAULT_LOG_LEVEL: "debug" + ports: + - 8200:8200/tcp + cap_add: + - IPC_LOCK + networks: + - app-internal + healthcheck: + test: + [ + "CMD", + "wget", + "--spider", + "--proxy", + "off", + "http://127.0.0.1:8200/v1/sys/health?standbyok=true", + ] + interval: 10s + timeout: 3s + retries: 10 + start_period: 5s + + common: + image: alpine:latest # Dummy, will always be overridden. + environment: + # Database + RUN_CONDITIONAL_MIGRATIONS: "true" + DATABASE_HOST: postgres + DATABASE_PORT: 5432 + DATABASE_NAME: firezone_dev + DATABASE_USER: postgres + DATABASE_PASSWORD: postgres + # Feature flags + FEATURE_POLICY_CONDITIONS_ENABLED: "true" + FEATURE_MULTI_SITE_RESOURCES_ENABLED: "true" + FEATURE_SELF_HOSTED_RELAYS_ENABLED: "true" + FEATURE_IDP_SYNC_ENABLED: "true" + FEATURE_REST_API_ENABLED: "true" + FEATURE_INTERNET_RESOURCE_ENABLED: "true" + # Seeds + STATIC_SEEDS: "true" + ## Warning: The token is for the blackhole Postmark server created in a separate isolated account, + ## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard. + OUTBOUND_EMAIL_ADAPTER_OPTS: '{"api_key":"7da7d1cd-111c-44a7-b5ac-4027b9d230e5"}' + # Secrets + TOKENS_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" + TOKENS_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" + SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2" + LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" + COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" + COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2" + # Auth + AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token,google_workspace,microsoft_entra,okta,jumpcloud,mock" + # Erlang + ERLANG_DISTRIBUTION_PORT: 9000 + RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV" + # Emails + OUTBOUND_EMAIL_FROM: "public-noreply@firez.one" + OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark" + networks: + - app-internal + +volumes: + postgres-data: + elixir-build-cache: + assets-build-cache: diff --git a/scripts/compose/relay.yml b/scripts/compose/relay.yml new file mode 100644 index 000000000..02216e665 --- /dev/null +++ b/scripts/compose/relay.yml @@ -0,0 +1,33 @@ +services: + relay: + environment: + FIREZONE_TOKEN: ".SFMyNTY.g2gDaAN3A25pbG0AAAAkZTgyZmNkYzEtMDU3YS00MDE1LWI5MGItM2IxOGYwZjI4MDUzbQAAADhDMTROR0E4N0VKUlIwM0c0UVBSMDdBOUM2Rzc4NFRTU1RIU0Y0VEk1VDBHRDhENkwwVlJHPT09PW4GAOb7sImUAWIAAVGA.e_k2YXxBOSmqVSu5RRscjZJBkZ7OAGzkpr5X2ge1MNo" + RUST_LOG: ${RUST_LOG:-debug} + RUST_BACKTRACE: 1 + FIREZONE_API_URL: ws://api:8081 + OTLP_GRPC_ENDPOINT: otel:4317 + EBPF_OFFLOADING: eth0 + privileged: true + init: true + build: + target: ${DOCKER_BUILD_TARGET:-debug} + context: rust + dockerfile: Dockerfile + args: + PACKAGE: firezone-relay + image: ${RELAY_IMAGE:-ghcr.io/firezone/debug/relay}:${RELAY_TAG:-main} + sysctls: + - net.ipv6.conf.all.disable_ipv6=0 + - net.ipv6.conf.default.disable_ipv6=0 + healthcheck: + test: ["CMD-SHELL", "lsof -i UDP | grep firezone-relay"] + start_period: 10s + interval: 30s + retries: 5 + timeout: 5s + depends_on: + api: + condition: "service_healthy" + extra_hosts: + - "api:203.0.113.10" + - "api:203:0:113::10" diff --git a/scripts/compose/resources.yml b/scripts/compose/resources.yml new file mode 100644 index 000000000..4bc8817e3 --- /dev/null +++ b/scripts/compose/resources.yml @@ -0,0 +1,60 @@ +services: + httpbin: + image: kennethreitz/httpbin + # Needed to bind to IPv6 + command: ["gunicorn", "-b", "[::]:80", "httpbin:app"] + healthcheck: + test: ["CMD-SHELL", "ps -C gunicorn"] + networks: + resources: + ipv4_address: 172.20.0.100 + ipv6_address: 172:20:0::100 + + download.httpbin: # Named after `httpbin` because that is how DNS resources are configured for the test setup. + build: + target: ${DOCKER_BUILD_TARGET:-debug} + context: rust + dockerfile: Dockerfile + args: + PACKAGE: http-test-server + image: ${HTTP_TEST_SERVER_IMAGE:-ghcr.io/firezone/debug/http-test-server}:${HTTP_TEST_SERVER_TAG:-main} + environment: + PORT: 80 + networks: + dns_resources: + ipv4_address: 172.21.0.101 + + dns.httpbin.search.test: + image: kennethreitz/httpbin + healthcheck: + test: ["CMD-SHELL", "ps -C gunicorn"] + networks: + dns_resources: + ipv4_address: 172.21.0.100 + + iperf3: + image: mlabbe/iperf3 + healthcheck: + test: + [ + "CMD-SHELL", + "(cat /proc/net/tcp | grep 5201) && (cat /proc/net/udp | grep 5201)", + ] + command: -s -V + networks: + resources: + ipv4_address: 172.20.0.110 + ipv6_address: 172:20:0::110 + +networks: + dns_resources: + ipam: + config: + - subnet: 172.21.0.0/24 + + resources: + enable_ipv6: true + ipam: + config: + - subnet: 172.20.0.0/24 + - subnet: 172:20:0::/64 diff --git a/scripts/compose/router.yml b/scripts/compose/router.yml new file mode 100644 index 000000000..e92e0a31a --- /dev/null +++ b/scripts/compose/router.yml @@ -0,0 +1,13 @@ +services: + router: + build: + context: ../router + cap_add: + - NET_ADMIN + privileged: true + sysctls: + - net.ipv4.ip_forward=1 + - net.ipv6.conf.all.disable_ipv6=0 + - net.ipv6.conf.default.disable_ipv6=0 + - net.ipv6.conf.all.forwarding=1 + - net.ipv6.conf.default.forwarding=1