Deployment for the cloud version (#1638)

TODO:
- [x] Cluster formation for all API and web nodes
- [x] Injest Docker logs to Stackdriver
- [x] Fix assets building for prod

To finish later:
- [ ] Structured logging:
https://issuetracker.google.com/issues/285950891
- [ ] Better networking policy (eg. use public postmark ranges and deny
all unwanted egress)
- [ ] OpenTelemetry collector for Google Stackdriver
- [ ] LoggerJSON.Plug integration

---------

Signed-off-by: Andrew Dryga <andrew@dryga.com>
Co-authored-by: Jamil <jamilbk@users.noreply.github.com>
This commit is contained in:
Andrew Dryga
2023-06-06 15:03:26 -06:00
committed by GitHub
parent d27856a8f1
commit d9eb2d18df
87 changed files with 4616 additions and 427 deletions

19
.dockerignore Normal file
View File

@@ -0,0 +1,19 @@
# Documentation
docs
# Website
www
# MacOS
.DS_Store
# Git
.git
.gitignore
.gitmodules
.github
# Terraform
.terraform
*.tfstate.backup
terraform.tfstate.d

View File

@@ -3,11 +3,14 @@ on:
pull_request:
paths:
- "elixir/**"
- ".github/workflows/elixir.yml"
push:
branches:
- master
- cloud
paths:
- "elixir/**"
- ".github/workflows/elixir.yml"
# Cancel old workflow runs if new code is pushed
concurrency:
@@ -392,34 +395,131 @@ jobs:
name: Elixir Acceptance Test Report
path: elixir/_build/test/lib/*/test-junit-report.xml
reporter: java-junit
container-build:
web-container-build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./elixir
permissions:
contents: read
id-token: "write"
needs:
- unit-test
- acceptance-test
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
APPLICATION_NAME: web
REGISTRY: us-east1-docker.pkg.dev
GCLOUD_PROJECT: firezone-staging
GOOGLE_CLOUD_PROJECT: firezone-staging
CLOUDSDK_PROJECT: firezone-staging
CLOUDSDK_CORE_PROJECT: firezone-staging
GCP_PROJECT: firezone-staging
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- uses: actions/checkout@v3
- id: auth
uses: google-github-actions/auth@v1
with:
token_format: "access_token"
workload_identity_provider: "projects/397012414171/locations/global/workloadIdentityPools/github-actions/providers/github-actions"
service_account: "github-actions@github-iam-387915.iam.gserviceaccount.com"
export_environment_variables: false
- name: Change current gcloud account
run: gcloud --quiet config set project ${GCLOUD_PROJECT}
- name: Login to Google Artifact Registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: oauth2accesstoken
password: ${{ steps.auth.outputs.access_token }}
- name: Build Tag and Version ID
id: vsn
env:
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
run: |
TAG=$(echo ${BRANCH_NAME} | sed 's/\//_/g' | sed 's/\:/_/g')
echo "TAG=branch-${TAG}" >> $GITHUB_ENV
- name: Pull cache layers
run: |
docker pull ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:master || true
docker pull ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:${{ env.TAG }} || true
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
platforms: linux/amd64
build-args: |
VERSION=0.0.0-dev.${{ github.sha }}
APPLICATION_NAME=${{ env.APPLICATION_NAME }}
APPLICATION_VERSION=0.0.0-sha.${{ github.sha }}
context: elixir/
file: elixir/Dockerfile
push: false
tags: ${{ github.ref_type }}-${{ github.ref_name }}
push: true
tags: ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:${{ env.TAG }} , ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:${{ github.sha }}
# TODO: add a sanity check to make sure the image is actually built
# and can be started
api-container-build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./elixir
permissions:
contents: read
id-token: "write"
needs:
- unit-test
- acceptance-test
env:
APPLICATION_NAME: api
REGISTRY: us-east1-docker.pkg.dev
GCLOUD_PROJECT: firezone-staging
GOOGLE_CLOUD_PROJECT: firezone-staging
CLOUDSDK_PROJECT: firezone-staging
CLOUDSDK_CORE_PROJECT: firezone-staging
GCP_PROJECT: firezone-staging
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- uses: actions/checkout@v3
- id: auth
uses: google-github-actions/auth@v1
with:
token_format: "access_token"
workload_identity_provider: "projects/397012414171/locations/global/workloadIdentityPools/github-actions/providers/github-actions"
service_account: "github-actions@github-iam-387915.iam.gserviceaccount.com"
export_environment_variables: false
- name: Change current gcloud account
run: gcloud --quiet config set project ${GCLOUD_PROJECT}
- name: Login to Google Artifact Registry
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: oauth2accesstoken
password: ${{ steps.auth.outputs.access_token }}
- name: Build Tag and Version ID
id: vsn
env:
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
run: |
TAG=$(echo ${BRANCH_NAME} | sed 's/\//_/g' | sed 's/\:/_/g')
echo "TAG=branch-${TAG}" >> $GITHUB_ENV
- name: Pull cache layers
run: |
docker pull ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:master || true
docker pull ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:${{ env.TAG }} || true
- name: Build and push Docker image
uses: docker/build-push-action@v4
with:
platforms: linux/amd64
build-args: |
APPLICATION_NAME=${{ env.APPLICATION_NAME }}
APPLICATION_VERSION=0.0.0-sha.${{ github.sha }}
context: elixir/
file: elixir/Dockerfile
push: true
tags: ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:${{ env.TAG }} , ${{ env.REGISTRY }}/${{ env.GCLOUD_PROJECT }}/firezone/${{ env.APPLICATION_NAME }}:${{ github.sha }}
# TODO: add a sanity check to make sure the image is actually built
# and can be started

4
.terraformignore Normal file
View File

@@ -0,0 +1,4 @@
elixir
rust
www
.github

View File

@@ -3,6 +3,7 @@
nodejs 18.16.0
elixir 1.14.4-otp-25
erlang 25.3.2
terraform 1.4.6
# Used for static analysis
python 3.9.13

View File

@@ -45,8 +45,7 @@ the following general guidelines:
Docker is the preferred method of development Firezone locally. It (mostly)
works cross-platform, and can be used to develop Firezone on all three
major desktop OS. This also provides a small but somewhat realistic network
environment with working nftables and WireGuard subsystems for live development.
major desktop OS.
### Docker Setup

228
docker-compose.yml Normal file
View File

@@ -0,0 +1,228 @@
version: '3.8'
services:
# Dependencies
postgres:
image: postgres:15
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: firezone_dev
healthcheck:
test:
[
"CMD-SHELL",
"pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"
]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
ports:
- 5432:5432
networks:
- app
vault:
image: vault
environment:
VAULT_ADDR: 'http://127.0.0.1:8200'
VAULT_DEV_ROOT_TOKEN_ID: 'firezone'
ports:
- 8200:8200/tcp
cap_add:
- IPC_LOCK
networks:
- app
# Firezone Components
web:
build:
context: elixir
args:
APPLICATION_NAME: web
image: firezone_web_dev
hostname: web.cluster.local
ports:
- 8080:8080/tcp
environment:
# Web Server
EXTERNAL_URL: http://localhost:8080/
PHOENIX_HTTP_WEB_PORT: "8080"
PHOENIX_SECURE_COOKIES: false
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "web.cluster.local"
RELEASE_NAME: "web"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token"
# Secrets
AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
RELAYS_AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
RELAYS_AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
GATEWAYS_AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
GATEWAYS_AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Telemetry
TELEMETRY_ENABLED: "false"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: "{\"api_key\":\"7da7d1cd-111c-44a7-b5ac-4027b9d230e5\"}"
depends_on:
postgres:
condition: 'service_healthy'
networks:
- app
api:
build:
context: elixir
args:
APPLICATION_NAME: api
image: firezone_api_dev
hostname: api.cluster.local
ports:
- 8081:8081/tcp
environment:
# Web Server
EXTERNAL_URL: http://localhost:8081/
PHOENIX_HTTP_API_PORT: "8081"
PHOENIX_SECURE_COOKIES: false
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Cluster.Strategy.Epmd"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{"hosts":["api@api.cluster.local","web@web.cluster.local"]}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "api.cluster.local"
RELEASE_NAME: "api"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token"
# Secrets
AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
RELAYS_AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
RELAYS_AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
GATEWAYS_AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
GATEWAYS_AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Telemetry
TELEMETRY_ENABLED: "false"
# Debugging
LOG_LEVEL: "debug"
# Emails
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: "{\"api_key\":\"7da7d1cd-111c-44a7-b5ac-4027b9d230e5\"}"
depends_on:
postgres:
condition: 'service_healthy'
networks:
- app
# This is a service container which allows to run mix tasks for local development
# without having to install Elixir and Erlang on the host machine.
elixir:
build:
context: elixir
target: builder
args:
APPLICATION_NAME: api
image: firezone_local_dev
hostname: elixir
volumes:
- elixir-build-cache:/app/_build
- ./elixir/apps:/app/apps
- ./elixir/config:/app/config
- ./elixir/priv:/app/priv
- ./elixir/rel:/app/rel
- ./elixir/mix.exs:/app/mix.exs
- ./elixir/mix.lock:/app/mix.lock
- assets-build-cache:/app/apps/web/assets/node_modules
environment:
# Web Server
EXTERNAL_URL: http://localhost:8081/
# Erlang
ERLANG_DISTRIBUTION_PORT: 9000
ERLANG_CLUSTER_ADAPTER: "Elixir.Domain.Cluster.Local"
ERLANG_CLUSTER_ADAPTER_CONFIG: '{}'
RELEASE_COOKIE: "NksuBhJFBhjHD1uUa9mDOHV"
RELEASE_HOSTNAME: "mix.cluster.local"
RELEASE_NAME: "mix"
# Database
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
# Auth
AUTH_PROVIDER_ADAPTERS: "email,openid_connect,userpass,token"
# Secrets
AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
RELAYS_AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
RELAYS_AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
GATEWAYS_AUTH_TOKEN_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
GATEWAYS_AUTH_TOKEN_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
SECRET_KEY_BASE: "5OVYJ83AcoQcPmdKNksuBhJFBhjHD1uUa9mDOHV/6EIdBQ6pXksIhkVeWIzFk5S2"
LIVE_VIEW_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_SIGNING_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
COOKIE_ENCRYPTION_SALT: "t01wa0K4lUd7mKa0HAtZdE+jFOPDDej2"
# Telemetry
TELEMETRY_ENABLED: "false"
# Higher log level not to make seeds output too verbose
LOG_LEVEL: "info"
# Emails
OUTBOUND_EMAIL_ADAPTER: "Elixir.Swoosh.Adapters.Postmark"
## Warning: The token is for the blackhole Postmark server created in a separate isolated account,
## that WILL NOT send any actual emails, but you can see and debug them in the Postmark dashboard.
OUTBOUND_EMAIL_ADAPTER_OPTS: "{\"api_key\":\"7da7d1cd-111c-44a7-b5ac-4027b9d230e5\"}"
# Mix env should be set to prod to use secrets declared above,
# otherwise seeds will generate invalid tokens
MIX_ENV: "prod"
depends_on:
postgres:
condition: 'service_healthy'
networks:
- app
networks:
app:
enable_ipv6: true
ipam:
config:
- subnet: 172.28.0.0/16
- subnet: 2001:3990:3990::/64
volumes:
postgres-data:
elixir-build-cache:
assets-build-cache:

View File

@@ -15,3 +15,11 @@ docs
# Git
.git
.gitignore
.gitmodules
.github
# Terraform
.terraform
*.tfstate.backup
terraform.tfstate.d

View File

@@ -2,7 +2,6 @@ ARG ELIXIR_VERSION=1.14.3
ARG OTP_VERSION=25.2.1
ARG ALPINE_VERSION=3.16.3
ARG APP_NAME="web"
ARG BUILDER_IMAGE="firezone/elixir:${ELIXIR_VERSION}-otp-${OTP_VERSION}"
ARG RUNNER_IMAGE="alpine:${ALPINE_VERSION}"
@@ -18,32 +17,23 @@ WORKDIR /app
RUN mix local.hex --force && \
mix local.rebar --force
# set build ENV
ENV MIX_ENV="prod"
# install mix dependencies
COPY mix.exs mix.lock ./
COPY apps/domain/mix.exs ./apps/domain/mix.exs
COPY apps/web/mix.exs ./apps/web/mix.exs
COPY apps/api/mix.exs ./apps/api/mix.exs
COPY config/ config/
RUN mix deps.get --only $MIX_ENV
RUN mkdir config
# copy compile-time config files before we compile dependencies
# to ensure any relevant config change will trigger the dependencies
# to be re-compiled.
COPY config/config.exs config/${MIX_ENV}.exs config/
RUN mix deps.compile
ARG MIX_ENV="prod"
RUN mix deps.get --only ${MIX_ENV}
RUN mix deps.compile --skip-umbrella-children
COPY priv priv
COPY apps apps
# mix phx.digest triggers web compilation, need version to be set here
ARG VERSION=0.0.0-docker
ENV VERSION=$VERSION
ARG APPLICATION_VERSION=0.0.0-dev.docker
# Install and compile assets
# Install pipeline and compile assets for Web app
RUN cd apps/web \
&& mix assets.setup \
&& mix assets.deploy \
@@ -52,24 +42,31 @@ RUN cd apps/web \
# Compile the release
RUN mix compile
# Changes to config/runtime.exs don't require recompiling the code
COPY config/runtime.exs config/
COPY rel rel
RUN mix release
ARG APPLICATION_NAME
RUN mix release ${APPLICATION_NAME}
# start a new build stage so that the final image will only contain
# the compiled release and other runtime necessities
FROM ${RUNNER_IMAGE}
RUN apk add -u --no-cache nftables libstdc++ ncurses-libs openssl
RUN apk add -u --no-cache libstdc++ ncurses-libs openssl curl
WORKDIR /app
# set runner ENV
ENV MIX_ENV="prod"
ARG MIX_ENV="prod"
ARG APPLICATION_NAME
ARG APPLICATION_VERSION=0.0.0-dev.docker
ENV APPLICATION_NAME=$APPLICATION_NAME
ENV APPLICATION_VERSION=$APPLICATION_VERSION
# Only copy the final release from the build stage
COPY --from=builder /app/_build/${MIX_ENV}/rel/${APP_NAME} ./
COPY --from=builder /app/_build/${MIX_ENV}/rel/${APPLICATION_NAME} ./
CMD ["/app/bin/server"]
# Change user to "default" to limit runtime privileges
# USER default
CMD bin/server

194
elixir/README.md Normal file
View File

@@ -0,0 +1,194 @@
# Welcome to Elixir-land!
This README provides an overview for running and managing Firezone's Elixir-based control plane.
## Running Control Plane for local development
You can use the [Top-Level Docker Compose](../docker-compose.yml) to start any services locally. The `web` and `api` compose services are built application releases that are pretty much the same as the ones we run in production, while the `elixir` compose service runs raw Elixir code, without a built release.
This means you'll want to use the `elixir` compose service to run Mix tasks and any Elixir code on-the-fly, but you can't do that in `web`/`api` so easily because Elixir strips out Mix and other tooling [when building an application release](https://hexdocs.pm/mix/Mix.Tasks.Release.html).
`elixir` additionally caches `_build` and `node_modules` to speed up compilation time and syncs
`/apps`, `/config` and other folders with the host machine.
```bash
# Make sure to run this every time code in elixir/ changes,
# docker doesn't do that for you!
docker-compose build
# Create the database
#
# Hint: you can run any mix commands like this,
# eg. mix ecto.reset will reset your database
#
# Also to drop the database you need to stop all active connections,
# so if you get an error stop all services first:
#
# docker-compose down
#
# Or you can just run both reset and seed in one-liner:
#
# docker-compose run elixir /bin/sh -c "cd apps/domain && mix do ecto.reset, ecto.seed"
#
docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.create"
# Ensure database is migrated before running seeds
docker-compose run api bin/migrate
# or
docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.migrate"
# Seed the database
# Hint: some access tokens will be generated and written to stdout,
# don't forget to save them for later
docker-compose run api bin/seed
# or
docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.seed"
# Start the API service for control plane sockets while listening to STDIN (where you will see all the logs)
docker-compose up api --build
```
Now you can verify that it's working by connecting to a websocket:
<details>
<summary>Gateway</summary>
```elixir
websocat --header="User-Agent: iOS/12.7 (iPhone) connlib/0.7.412" "ws://127.0.0.1:8081/gateway/websocket?token=GATEWAY_TOKEN_FROM_SEEDS&external_id=thisisrandomandpersistent&name_suffix=kkX1&public_key=kceI60D6PrwOIiGoVz6hD7VYCgD1H57IVQlPJTTieUE="
```
</details>
<details>
<summary>Relay</summary>
```elixir
websocat --header="User-Agent: Linux/5.2.6 (Debian; x86_64) relay/0.7.412" "ws://127.0.0.1:8081/relay/websocket?token=RELAY_TOKEN_FROM_SEEDS&ipv4=24.12.79.100&ipv6=4d36:aa7f:473c:4c61:6b9e:2416:9917:55cc"
# Here is what you will see in docker logs firezone-api-1
# {"time":"2023-06-05T23:16:01.537Z","severity":"info","message":"CONNECTED TO API.Relay.Socket in 251ms\n Transport: :websocket\n Serializer: Phoenix.Socket.V1.JSONSerializer\n Parameters: %{\"ipv4\" => \"24.12.79.100\", \"ipv6\" => \"4d36:aa7f:473c:4c61:6b9e:2416:9917:55cc\", \"stamp_secret\" => \"[FILTERED]\", \"token\" => \"[FILTERED]\"}","metadata":{"domain":["elixir"],"erl_level":"info"}}
# After this you need to join the `relay` topic and pass a `stamp_secret` in the payload.
# For details on this structure see https://hexdocs.pm/phoenix/Phoenix.Socket.Message.html
> {"event":"phx_join","topic":"relay","payload":{"stamp_secret":"makemerandomplz"},"ref":"unique_string_ref","join_ref":"unique_join_ref"}
{"event":"phx_reply","payload":{"response":{},"status":"ok"},"ref":"unique_string_ref","topic":"relay"}
{"event":"init","payload":{},"ref":null,"topic":"relay"}
```
</details>
<br />
Stopping everything is easy too:
```bash
docker-compose down
```
## Useful commands for local testing and debugging
Connecting to an IEx interactive console:
```bash
docker-compose run elixir /bin/sh -c "cd apps/domain && iex -S mix"
```
Connecting to a running api/web instance shell:
```bash
docker exec -it firezone-api-1 sh
/app
```
Connecting to a running api/web instance to run Elixir code from them:
```bash
# Start all services in daemon mode (in background)
docker-compose up -d --build
# Connect to a running API node
docker exec -it firezone-api-1 bin/api remote
Erlang/OTP 25 [erts-13.1.4] [source] [64-bit] [smp:5:5] [ds:5:5:10] [async-threads:1]
Interactive Elixir (1.14.3) - press Ctrl+C to exit (type h() ENTER for help)
iex(api@127.0.0.1)1>
# Connect to a running Web node
docker exec -it firezone-web-1 bin/web remote
Erlang/OTP 25 [erts-13.1.4] [source] [64-bit] [smp:5:5] [ds:5:5:10] [async-threads:1]
Interactive Elixir (1.14.3) - press Ctrl+C to exit (type h() ENTER for help)
iex(web@127.0.0.1)1>
```
From `iex` shell you can run any Elixir code, for example you can emulate a full flow using process messages,
just keep in mind that you need to run seeds before executing this example:
```elixir
[gateway | _rest_gateways] = Domain.Repo.all(Domain.Gateways.Gateway)
:ok = Domain.Gateways.connect_gateway(gateway)
[relay | _rest_relays] = Domain.Repo.all(Domain.Relays.Relay)
relay_secret = Domain.Crypto.rand_string()
:ok = Domain.Relays.connect_relay(relay, relay_secret)
```
Now if you connect and list resources there will be one online because there is a relay and gateway online.
Some of the functions require authorization, here is how you can obtain a subject:
```elixir
user_agent = "User-Agent: iOS/12.7 (iPhone) connlib/0.7.412"
remote_ip = {127, 0, 0, 1}
# For a client
{:ok, subject} = Domain.Auth.sign_in(client_token, user_agent, remote_ip)
# For an admin user
provider = Domain.Repo.get_by(Domain.Auth.Provider, adapter: :userpass)
identity = Domain.Repo.get_by(Domain.Auth.Identity, provider_id: provider.id, provider_identifier: "firezone@localhost")
subject = Domain.Auth.build_subject(identity, nil, user_agent, remote_ip)
```
## Connecting to a staging or production instances
We use Google Cloud Platform for all our staging and production infrastructure. You'll need access to this env to perform the commands below; to get and access you need to add yourself to `project_owners` in `main.tf` for each of the [environments](../terraform/environments).
This is a danger zone so first of all, ALWAYS make sure on which environment your code is running:
```bash
gcloud config get project
firezone-staging
```
Then you want to figure out which specific instance you want to connect to:
```bash
gcloud compute instances list
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
api-b02t us-east1-d n1-standard-1 10.128.0.22 RUNNING
api-srkp us-east1-d n1-standard-1 10.128.0.23 RUNNING
web-51wd us-east1-d n1-standard-1 10.128.0.21 RUNNING
web-6k3n us-east1-d n1-standard-1 10.128.0.20 RUNNING
```
SSH into the VM and enter remote Elixir shell:
```bash
gcloud compute ssh api-b02t
No zone specified. Using zone [us-east1-d] for instance: [api-b02t].
...
########################[ Welcome ]########################
# You have logged in to the guest OS. #
# To access your containers use 'docker attach' command #
###########################################################
andrew@api-b02t ~ $ docker ps --format json | jq '"\(.ID) \(.Image)"'
"1ab7d7c6878c - us-east1-docker.pkg.dev/firezone-staging/firezone/api:branch-andrew_deployment"
andrew@api-b02t ~ $ docker exec -it 1ab7d7c6878c bin/api remote
Erlang/OTP 25 [erts-13.1.4] [source] [64-bit] [smp:1:1] [ds:1:1:10] [async-threads:1] [jit]
Interactive Elixir (1.14.3) - press Ctrl+C to exit (type h() ENTER for help)
iex(api@api-b02t.us-east1-d.c.firezone-staging.internal)1>
```

View File

@@ -81,7 +81,13 @@ defmodule API.Device.Channel do
with {:ok, resource} <- Resources.fetch_resource_by_id(resource_id, socket.assigns.subject),
# :ok = Resource.authorize(resource, socket.assigns.subject),
{:ok, [_ | _] = relays} <- Relays.list_connected_relays_for_resource(resource) do
reply = {:ok, %{relays: Views.Relay.render_many(relays, socket.assigns.expires_at)}}
reply =
{:ok,
%{
relays: Views.Relay.render_many(relays, socket.assigns.subject.expires_at),
resource_id: resource_id
}}
{:reply, reply, socket}
else
{:ok, []} -> {:reply, {:error, :offline}, socket}
@@ -111,7 +117,7 @@ defmodule API.Device.Channel do
%{
device_id: socket.assigns.device.id,
resource_id: resource_id,
authorization_expires_at: socket.assigns.expires_at,
authorization_expires_at: socket.assigns.subject.expires_at,
device_rtc_session_description: device_rtc_session_description,
device_preshared_key: preshared_key
}}

View File

@@ -1,10 +1,11 @@
defmodule API.Device.Socket do
use Phoenix.Socket
alias Domain.{Auth, Devices}
require Logger
## Channels
channel "device:*", API.Device.Channel
channel "device", API.Device.Channel
## Authentication
@@ -22,12 +23,16 @@ defmodule API.Device.Socket do
{:ok, socket}
else
{:error, :unauthorized} ->
{:error, :invalid}
{:error, :invalid_token}
{:error, reason} ->
Logger.debug("Error connecting device websocket: #{inspect(reason)}")
{:error, reason}
end
end
def connect(_params, _socket, _connect_info) do
{:error, :invalid}
{:error, :missing_token}
end
@impl true

View File

@@ -1,25 +1,65 @@
defmodule API.Endpoint do
use Phoenix.Endpoint, otp_app: :api
plug Plug.RewriteOn, [:x_forwarded_host, :x_forwarded_port, :x_forwarded_proto]
plug Plug.MethodOverride
plug :put_hsts_header
plug Plug.Head
if code_reloading? do
plug Phoenix.CodeReloader
end
plug Plug.RewriteOn, [:x_forwarded_proto]
plug Plug.MethodOverride
plug RemoteIp,
headers: ["x-forwarded-for"],
proxies: {__MODULE__, :external_trusted_proxies, []},
clients: {__MODULE__, :clients, []}
plug Plug.RequestId
# TODO: Rework LoggerJSON to use Telemetry and integrate it
# https://hexdocs.pm/phoenix/Phoenix.Logger.html
plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint]
socket "/gateway", API.Gateway.Socket, API.Sockets.options()
socket "/device", API.Device.Socket, API.Sockets.options()
socket "/relay", API.Relay.Socket, API.Sockets.options()
plug :healthz
plug :not_found
def put_hsts_header(conn, _opts) do
scheme =
config(:url, [])
|> Keyword.get(:scheme)
if scheme == "https" do
put_resp_header(
conn,
"strict-transport-security",
"max-age=63072000; includeSubDomains; preload"
)
else
conn
end
end
def healthz(%Plug.Conn{request_path: "/healthz"} = conn, _opts) do
conn
|> put_resp_content_type("application/json")
|> send_resp(200, Jason.encode!(%{status: "ok"}))
|> halt()
end
def healthz(conn, _opts) do
conn
end
def not_found(conn, _opts) do
conn
|> send_resp(:not_found, "Not found")
|> halt()
end
def external_trusted_proxies do
Domain.Config.fetch_env!(:api, :external_trusted_proxies)
|> Enum.map(&to_string/1)

View File

@@ -1,10 +1,11 @@
defmodule API.Gateway.Socket do
use Phoenix.Socket
alias Domain.Gateways
require Logger
## Channels
channel "gateway:*", API.Gateway.Channel
channel "gateway", API.Gateway.Channel
## Authentication
@@ -25,6 +26,10 @@ defmodule API.Gateway.Socket do
|> assign(:gateway, gateway)
{:ok, socket}
else
{:error, reason} ->
Logger.debug("Error connecting gateway websocket: #{inspect(reason)}")
{:error, reason}
end
end

View File

@@ -1,10 +1,11 @@
defmodule API.Relay.Socket do
use Phoenix.Socket
alias Domain.Relays
require Logger
## Channels
channel "relay:*", API.Relay.Channel
channel "relay", API.Relay.Channel
## Authentication
@@ -25,6 +26,10 @@ defmodule API.Relay.Socket do
|> assign(:relay, relay)
{:ok, socket}
else
{:error, reason} ->
Logger.debug("Error connecting relay websocket: #{inspect(reason)}")
{:error, reason}
end
end

View File

@@ -6,20 +6,27 @@ defmodule API.Sockets do
def options do
[
transport_log: :debug,
check_origin: :conn,
connect_info: [:trace_context_headers, :user_agent, :peer_data, :x_headers],
websocket: [
transport_log: :debug,
check_origin: :conn,
connect_info: [:trace_context_headers, :user_agent, :peer_data, :x_headers],
error_handler: {__MODULE__, :handle_error, []}
],
longpoll: false
]
end
@spec handle_error(Plug.Conn.t(), :invalid | :rate_limit | :unauthenticated) :: Plug.Conn.t()
def handle_error(conn, :unauthenticated), do: Plug.Conn.send_resp(conn, 403, "Forbidden")
def handle_error(conn, :invalid), do: Plug.Conn.send_resp(conn, 422, "Unprocessable Entity")
def handle_error(conn, :rate_limit), do: Plug.Conn.send_resp(conn, 429, "Too many requests")
def handle_error(conn, :unauthenticated),
do: Plug.Conn.send_resp(conn, 403, "Forbidden")
def handle_error(conn, :invalid_token),
do: Plug.Conn.send_resp(conn, 422, "Unprocessable Entity")
def handle_error(conn, :rate_limit),
do: Plug.Conn.send_resp(conn, 429, "Too many requests")
def handle_error(conn, %Ecto.Changeset{}),
do: Plug.Conn.send_resp(conn, 422, "Invalid or missing connection parameters")
# if Mix.env() == :test do
# defp maybe_allow_sandbox_access(%{user_agent: user_agent}) do

View File

@@ -25,7 +25,7 @@ defmodule API.MixProject do
end
def version do
System.get_env("VERSION", "0.0.0+git.0.deadbeef")
System.get_env("APPLICATION_VERSION", "0.0.0+git.0.deadbeef")
end
def application do

View File

@@ -20,12 +20,13 @@ defmodule API.Device.ChannelTest do
expires_at = DateTime.utc_now() |> DateTime.add(30, :second)
subject = %{subject | expires_at: expires_at}
{:ok, _reply, socket} =
API.Device.Socket
|> socket("device:#{device.id}", %{
device: device,
subject: subject,
expires_at: expires_at
subject: subject
})
|> subscribe_and_join(API.Device.Channel, "device")
@@ -106,7 +107,8 @@ defmodule API.Device.ChannelTest do
:ok = Domain.Relays.connect_relay(relay, stamp_secret)
ref = push(socket, "list_relays", %{"resource_id" => resource.id})
assert_reply ref, :ok, %{relays: relays}
resource_id = resource.id
assert_reply ref, :ok, %{relays: relays, resource_id: ^resource_id}
ipv4_stun_uri = "stun:#{relay.ipv4}:#{relay.port}"
ipv4_turn_uri = "turn:#{relay.ipv4}:#{relay.port}"
@@ -143,7 +145,7 @@ defmodule API.Device.ChannelTest do
assert [expires_at, salt] = String.split(username1, ":", parts: 2)
expires_at = expires_at |> String.to_integer() |> DateTime.from_unix!()
socket_expires_at = DateTime.truncate(socket.assigns.expires_at, :second)
socket_expires_at = DateTime.truncate(socket.assigns.subject.expires_at, :second)
assert expires_at == socket_expires_at
assert is_binary(salt)
@@ -225,7 +227,7 @@ defmodule API.Device.ChannelTest do
authorization_expires_at: authorization_expires_at
} = payload
assert authorization_expires_at == socket.assigns.expires_at
assert authorization_expires_at == socket.assigns.subject.expires_at
send(channel_pid, {:connect, socket_ref, resource.id, gateway.public_key, "FULL_RTC_SD"})

View File

@@ -12,12 +12,12 @@ defmodule API.Device.SocketTest do
describe "connect/3" do
test "returns error when token is missing" do
assert connect(Socket, %{}, @connect_info) == {:error, :invalid}
assert connect(Socket, %{}, @connect_info) == {:error, :missing_token}
end
test "returns error when token is invalid" do
attrs = connect_attrs(token: "foo")
assert connect(Socket, attrs, @connect_info) == {:error, :invalid}
assert connect(Socket, attrs, @connect_info) == {:error, :invalid_token}
end
test "creates a new device" do

View File

@@ -2,6 +2,20 @@ defmodule Domain.Application do
use Application
def start(_type, _args) do
# Configure Logger severity at runtime
:ok = LoggerJSON.configure_log_level_from_env!("LOG_LEVEL")
_ =
:telemetry.attach(
"repo-log-handler",
[:domain, :repo, :query],
&LoggerJSON.Ecto.telemetry_logging_handler/4,
:debug
)
_ = OpentelemetryEcto.setup([:domain, :repo])
_ = OpentelemetryFinch.setup()
Supervisor.start_link(children(), strategy: :one_for_one, name: __MODULE__.Supervisor)
end
@@ -15,10 +29,13 @@ defmodule Domain.Application do
Domain.Auth,
Domain.Relays,
Domain.Gateways,
Domain.Devices
Domain.Devices,
# Observability
# Domain.Telemetry
# Erlang Clustering
Domain.Cluster
]
end
end

View File

@@ -236,6 +236,8 @@ defmodule Domain.Auth do
config = fetch_config!()
key_base = Keyword.fetch!(config, :key_base)
salt = Keyword.fetch!(config, :salt)
# TODO: we don't want client token to be invalid if you reconnect client from a different ip,
# for the clients that move between cellular towers
payload = session_token_payload(subject)
max_age = DateTime.diff(subject.expires_at, DateTime.utc_now(), :second)

View File

@@ -0,0 +1,37 @@
defmodule Domain.Cluster do
use Supervisor
def start_link(opts) do
Supervisor.start_link(__MODULE__, opts, name: __MODULE__.Supervisor)
end
@impl true
def init(_opts) do
config = Domain.Config.fetch_env!(:domain, __MODULE__)
adapter = Keyword.fetch!(config, :adapter)
adapter_config = Keyword.fetch!(config, :adapter_config)
pool_opts = Domain.Config.fetch_env!(:domain, :http_client_ssl_opts)
topology_config = [
default: [
strategy: adapter,
config: adapter_config
]
]
shared_children = [
{Finch, name: __MODULE__.Finch, pools: %{default: pool_opts}}
]
children =
if adapter != Domain.Cluster.Local do
[
{Cluster.Supervisor, [topology_config, [name: __MODULE__]]}
]
else
[]
end
Supervisor.init(shared_children ++ children, strategy: :rest_for_one)
end
end

View File

@@ -0,0 +1,257 @@
defmodule Domain.Cluster.GoogleComputeLabelsStrategy do
@moduledoc """
This module implements libcluster strategy for Google Compute Engine, which uses
Compute API to fetch list of instances in a project by a `cluster_name` label
and then joins them into an Erlang Cluster using their internal IP addresses.
In order to work properly, few prerequisites must be met:
1. Compute API must be enabled for the project;
2. Instance must have access to Compute API (either by having `compute-ro` or `compute-rw` scope),
and service account must have a role which grants `compute.instances.list` and `compute.zones.list`
permissions;
3. Instances must have a `cluster_name` label with the same value for all instances in a cluster,
and a valid `application` which can be used as Erlang node name.
"""
use GenServer
use Cluster.Strategy
alias Cluster.Strategy.State
defmodule Meta do
@type t :: %{
access_token: String.t(),
access_token_expires_at: DateTime.t(),
nodes: MapSet.t()
}
defstruct access_token: nil,
access_token_expires_at: nil,
nodes: MapSet.new()
end
@default_polling_interval 5_000
def start_link(args), do: GenServer.start_link(__MODULE__, args)
@impl true
def init([%State{} = state]) do
{:ok, %{state | meta: %Meta{}}, {:continue, :start}}
end
@impl true
def handle_continue(:start, state) do
{:noreply, load(state)}
end
@impl true
def handle_info(:timeout, state) do
handle_info(:load, state)
end
def handle_info(:load, %State{} = state) do
{:noreply, load(state)}
end
def handle_info(_, state) do
{:noreply, state}
end
defp load(%State{topology: topology, meta: %Meta{} = meta} = state) do
{:ok, nodes, state} = fetch_nodes(state)
new_nodes = MapSet.new(nodes)
added_nodes = MapSet.difference(new_nodes, meta.nodes)
removed_nodes = MapSet.difference(state.meta.nodes, new_nodes)
new_nodes =
case Cluster.Strategy.disconnect_nodes(
topology,
state.disconnect,
state.list_nodes,
MapSet.to_list(removed_nodes)
) do
:ok ->
new_nodes
{:error, bad_nodes} ->
# Add back the nodes which should have been removed_nodes, but which couldn't be for some reason
Enum.reduce(bad_nodes, new_nodes, fn {n, _}, acc ->
MapSet.put(acc, n)
end)
end
new_nodes =
case Cluster.Strategy.connect_nodes(
topology,
state.connect,
state.list_nodes,
MapSet.to_list(added_nodes)
) do
:ok ->
new_nodes
{:error, bad_nodes} ->
# Remove the nodes which should have been added_nodes, but couldn't be for some reason
Enum.reduce(bad_nodes, new_nodes, fn {n, _}, acc ->
MapSet.delete(acc, n)
end)
end
Process.send_after(self(), :load, polling_interval(state))
%State{state | meta: %{state.meta | nodes: new_nodes}}
end
@doc false
# We use Google Compute Engine metadata server to fetch the node access token,
# it will have scopes declared in the instance template but actual permissions
# are limited by the service account attached to it.
def refresh_access_token(state) do
config = fetch_config!()
token_endpoint_url = Keyword.fetch!(config, :token_endpoint_url)
request = Finch.build(:get, token_endpoint_url, [{"Metadata-Flavor", "Google"}])
case Finch.request(request, Domain.Cluster.Finch) do
{:ok, %Finch.Response{status: 200, body: response}} ->
%{"access_token" => access_token, "expires_in" => expires_in} = Jason.decode!(response)
access_token_expires_at = DateTime.utc_now() |> DateTime.add(expires_in - 1, :second)
{:ok,
%{
state
| meta: %{
state.meta
| access_token: access_token,
access_token_expires_at: access_token_expires_at
}
}}
{:ok, response} ->
Cluster.Logger.warn(state.topology, "Can't fetch instance metadata: #{inspect(response)}")
{:error, {response.status, response.body}}
{:error, reason} ->
Cluster.Logger.warn(state.topology, "Can not fetch instance metadata: #{inspect(reason)}")
{:error, reason}
end
end
defp maybe_refresh_access_token(state) do
cond do
is_nil(state.meta.access_token) ->
refresh_access_token(state)
is_nil(state.meta.access_token_expires_at) ->
refresh_access_token(state)
DateTime.diff(state.meta.access_token_expires_at, DateTime.utc_now()) > 0 ->
{:ok, state}
true ->
refresh_access_token(state)
end
end
@doc false
# We use Google Compute API to fetch the list of instances in all regions of a project,
# instances are filtered by cluster name and status, and then we use this instance labels
# to figure out the actual node name (which is set in `rel/env.sh.eex` by also reading node metadata).
def fetch_nodes(state, remaining_retry_count \\ 3) do
with {:ok, state} <- maybe_refresh_access_token(state),
{:ok, nodes} <- fetch_google_cloud_instances(state) do
{:ok, nodes, state}
else
{:error, %{"error" => %{"code" => 401}} = reason} ->
Cluster.Logger.error(
state.topology,
"Invalid access token was used: #{inspect(reason)}"
)
if remaining_retry_count == 0 do
{:error, reason}
else
{:ok, state} = refresh_access_token(state)
fetch_nodes(state, remaining_retry_count - 1)
end
{:error, reason} ->
Cluster.Logger.error(
state.topology,
"Can not fetch list of nodes or access token: #{inspect(reason)}"
)
if remaining_retry_count == 0 do
{:error, reason}
else
backoff_interval = Keyword.get(state.config, :backoff_interval, 1_000)
:timer.sleep(backoff_interval)
fetch_nodes(state, remaining_retry_count - 1)
end
end
end
defp fetch_google_cloud_instances(state) do
project_id = Keyword.fetch!(state.config, :project_id)
cluster_name = Keyword.fetch!(state.config, :cluster_name)
cluster_name_label = Keyword.get(state.config, :cluster_name_label, "cluster_name")
node_name_label = Keyword.get(state.config, :node_name_label, "application")
aggregated_list_endpoint_url =
fetch_config!()
|> Keyword.fetch!(:aggregated_list_endpoint_url)
|> String.replace("${project_id}", project_id)
filter = "labels.#{cluster_name_label}=#{cluster_name} AND status=RUNNING"
query = URI.encode_query(%{"filter" => filter})
request =
Finch.build(:get, aggregated_list_endpoint_url <> "?" <> query, [
{"Authorization", "Bearer #{state.meta.access_token}"}
])
with {:ok, %Finch.Response{status: 200, body: response}} <-
Finch.request(request, Domain.Cluster.Finch),
{:ok, %{"items" => items}} <- Jason.decode(response) do
nodes =
items
|> Enum.flat_map(fn
{_zone, %{"instances" => instances}} ->
instances
{_zone, %{"warning" => %{"code" => "NO_RESULTS_ON_PAGE"}}} ->
[]
end)
|> Enum.filter(fn
%{"status" => "RUNNING", "labels" => %{^cluster_name_label => ^cluster_name}} -> true
%{"status" => _status, "labels" => _labels} -> false
end)
|> Enum.map(fn %{"zone" => zone, "name" => name, "labels" => labels} ->
release_name = Map.fetch!(labels, node_name_label)
zone = String.split(zone, "/") |> List.last()
node_name = :"#{release_name}@#{name}.#{zone}.c.#{project_id}.internal"
Cluster.Logger.debug(state.topology, "Found node: #{inspect(node_name)}")
node_name
end)
{:ok, nodes}
else
{:ok, %Finch.Response{status: status, body: body}} ->
{:error, {status, body}}
{:ok, map} ->
{:error, map}
{:error, reason} ->
{:error, reason}
end
end
defp fetch_config! do
Domain.Config.fetch_env!(:domain, __MODULE__)
end
defp polling_interval(%State{config: config}) do
Keyword.get(config, :polling_interval, @default_polling_interval)
end
end

View File

@@ -57,6 +57,11 @@ defmodule Domain.Config.Definitions do
:database_ssl_opts,
:database_parameters
]},
{"Erlang Cluster",
[
:erlang_cluster_adapter,
:erlang_cluster_adapter_config
]},
{"Secrets and Encryption",
"""
Your secrets should be generated during installation automatically and persisted to `.env` file.
@@ -276,6 +281,46 @@ defmodule Domain.Config.Definitions do
dump: &Dumper.keyword/1
)
##############################################
## Erlang Cluster
##############################################
@doc """
An adapter that will be used to discover and connect nodes to the Erlang cluster.
Set to `Domain.Cluster.Local` to disable
"""
defconfig(
:erlang_cluster_adapter,
{:parameterized, Ecto.Enum,
Ecto.Enum.init(
values: [
Elixir.Cluster.Strategy.LocalEpmd,
Elixir.Cluster.Strategy.Epmd,
Elixir.Cluster.Strategy.Gossip,
Elixir.Domain.Cluster.GoogleComputeLabelsStrategy,
Domain.Cluster.Local
]
)},
default: Domain.Cluster.Local
)
@doc """
Config for the Erlang cluster adapter.
"""
defconfig(:erlang_cluster_adapter_config, :map,
default: [],
dump: fn map ->
keyword = Dumper.keyword(map)
if compile_config!(:erlang_cluster_adapter) == Elixir.Cluster.Strategy.Epmd do
Keyword.update!(keyword, :hosts, fn hosts -> Enum.map(hosts, &String.to_atom/1) end)
else
keyword
end
end
)
##############################################
## Secrets
##############################################

View File

@@ -1,11 +1,39 @@
defmodule Domain.Release do
require Logger
@repos Application.compile_env!(:domain, :ecto_repos)
@otp_app :domain
@repos Application.compile_env!(@otp_app, :ecto_repos)
def migrate do
for repo <- @repos do
{:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true))
end
end
def seed(directory \\ seed_script_path(@otp_app)) do
IO.puts("Starting #{@otp_app} app..")
{:ok, _} = Application.ensure_all_started(@otp_app)
IO.puts("Running seed scripts in #{directory}..")
Path.join(directory, "seeds.exs")
|> Path.wildcard()
|> Enum.sort()
|> Enum.each(fn path ->
IO.puts("Requiring #{path}..")
Code.require_file(path)
end)
end
defp seed_script_path(app), do: priv_dir(app, ["repo"])
defp priv_dir(app, path) when is_list(path) do
case :code.priv_dir(app) do
priv_path when is_list(priv_path) or is_binary(priv_path) ->
Path.join([priv_path] ++ path)
{:error, :bad_name} ->
raise ArgumentError, "unknown application: #{inspect(app)}"
end
end
end

View File

@@ -3,7 +3,14 @@ defmodule Domain.Resources do
alias Domain.Resources.{Authorizer, Resource}
def fetch_resource_by_id(id, %Auth.Subject{} = subject) do
with :ok <- Auth.ensure_has_permissions(subject, Authorizer.manage_resources_permission()),
required_permissions =
{:one_of,
[
Authorizer.manage_resources_permission(),
Authorizer.view_available_resources_permission()
]}
with :ok <- Auth.ensure_has_permissions(subject, required_permissions),
true <- Validator.valid_uuid?(id) do
Resource.Query.by_id(id)
|> Authorizer.for_subject(subject)
@@ -24,7 +31,15 @@ defmodule Domain.Resources do
end
def list_resources(%Auth.Subject{} = subject) do
with :ok <- Auth.ensure_has_permissions(subject, Authorizer.manage_resources_permission()) do
required_permissions =
{:one_of,
[
Authorizer.manage_resources_permission(),
Authorizer.view_available_resources_permission()
]}
with :ok <- Auth.ensure_has_permissions(subject, required_permissions) do
# TODO: maybe we need to also enrich the data and show if it's online or not
Resource.Query.all()
|> Authorizer.for_subject(subject)
|> Repo.list()

View File

@@ -3,6 +3,7 @@ defmodule Domain.Resources.Authorizer do
alias Domain.Resources.Resource
def manage_resources_permission, do: build(Resource, :manage)
def view_available_resources_permission, do: build(Resource, :view_available_resources)
@impl Domain.Auth.Authorizer
def list_permissions_for_role(:account_admin_user) do
@@ -11,6 +12,12 @@ defmodule Domain.Resources.Authorizer do
]
end
def list_permissions_for_role(:account_user) do
[
view_available_resources_permission()
]
end
def list_permissions_for_role(_) do
[]
end
@@ -20,6 +27,10 @@ defmodule Domain.Resources.Authorizer do
cond do
has_permission?(subject, manage_resources_permission()) ->
Resource.Query.by_account_id(queryable, subject.account.id)
# TODO: for end users we must return only resources that user has access to (evaluate the policy)
has_permission?(subject, view_available_resources_permission()) ->
Resource.Query.by_account_id(queryable, subject.account.id)
end
end
end

View File

@@ -3,6 +3,7 @@ defmodule Domain.Version do
user_agent
|> String.split(" ")
|> Enum.find_value(fn
"relay/" <> version -> version
"connlib/" <> version -> version
_ -> nil
end)

View File

@@ -26,7 +26,7 @@ defmodule Domain.MixProject do
def version do
# Use dummy version for dev and test
System.get_env("VERSION", "0.0.0+git.0.deadbeef")
System.get_env("APPLICATION_VERSION", "0.0.0+git.0.deadbeef")
end
def application do
@@ -50,8 +50,6 @@ defmodule Domain.MixProject do
{:postgrex, "~> 0.16"},
{:decimal, "~> 2.0"},
{:ecto_sql, "~> 3.7"},
{:cloak, "~> 1.1"},
{:cloak_ecto, "~> 1.2"},
# PubSub and Presence
{:phoenix, "~> 1.7", runtime: false},
@@ -62,13 +60,21 @@ defmodule Domain.MixProject do
{:openid_connect, github: "firezone/openid_connect", branch: "master"},
{:argon2_elixir, "~> 2.0"},
# Other deps
{:telemetry, "~> 1.0"},
# Erlang Clustering
{:libcluster, "~> 3.3"},
# Product Analytics
{:posthog, "~> 0.1"},
# Runtime debugging
# Observability and Runtime debugging
{:telemetry, "~> 1.0"},
{:logger_json, "~> 5.1"},
{:recon, "~> 2.5"},
{:observer_cli, "~> 1.7"},
{:opentelemetry, "~> 1.3"},
{:opentelemetry_exporter, "~> 1.5"},
{:opentelemetry_ecto, "~> 1.1"},
{:opentelemetry_finch, "~> 0.2.0"},
# Test and dev deps
{:bypass, "~> 2.1", only: :test}

View File

@@ -84,7 +84,20 @@ relay_group =
|> Repo.insert!()
IO.puts("Created relay groups:")
IO.puts(" #{relay_group.name} token: #{hd(relay_group.tokens).value}")
IO.puts(" #{relay_group.name} token: #{Relays.encode_token!(hd(relay_group.tokens))}")
IO.puts("")
{:ok, relay} =
Relays.upsert_relay(hd(relay_group.tokens), %{
ipv4: {189, 172, 73, 111},
ipv6: {0, 0, 0, 0, 0, 0, 0, 1},
last_seen_user_agent: "iOS/12.7 (iPhone) connlib/0.7.412",
last_seen_remote_ip: %Postgrex.INET{address: {189, 172, 73, 111}}
})
IO.puts("Created relays:")
IO.puts(" Group #{relay_group.name}:")
IO.puts(" IPv4: #{relay.ipv4} IPv6: #{relay.ipv6}")
IO.puts("")
gateway_group =
@@ -93,7 +106,11 @@ gateway_group =
|> Repo.insert!()
IO.puts("Created gateway groups:")
IO.puts(" #{gateway_group.name_prefix} token: #{hd(gateway_group.tokens).value}")
IO.puts(
" #{gateway_group.name_prefix} token: #{Gateways.encode_token!(hd(gateway_group.tokens))}"
)
IO.puts("")
{:ok, gateway} =

View File

@@ -0,0 +1,181 @@
defmodule Domain.Cluster.GoogleComputeLabelsStrategyTest do
use ExUnit.Case, async: true
import Domain.Cluster.GoogleComputeLabelsStrategy
alias Domain.Cluster.GoogleComputeLabelsStrategy.Meta
alias Cluster.Strategy.State
alias Domain.Mocks.GoogleCloudPlatform
describe "refresh_access_token/1" do
test "returns access token" do
bypass = Bypass.open()
GoogleCloudPlatform.mock_instance_metadata_token_endpoint(bypass)
state = %State{meta: %Meta{}}
assert {:ok, state} = refresh_access_token(state)
assert state.meta.access_token == "GCP_ACCESS_TOKEN"
expected_access_token_expires_at = DateTime.add(DateTime.utc_now(), 3595, :second)
assert DateTime.diff(state.meta.access_token_expires_at, expected_access_token_expires_at) in -2..2
assert_receive {:bypass_request, conn}
assert {"metadata-flavor", "Google"} in conn.req_headers
end
test "returns error when endpoint is not available" do
bypass = Bypass.open()
Bypass.down(bypass)
GoogleCloudPlatform.override_endpoint_url(
:token_endpoint_url,
"http://localhost:#{bypass.port}/"
)
state = %State{meta: %Meta{}}
assert refresh_access_token(state) ==
{:error, %Mint.TransportError{reason: :econnrefused}}
end
end
describe "fetch_nodes/1" do
test "returns list of nodes in all regions when access token is not set" do
bypass = Bypass.open()
GoogleCloudPlatform.mock_instance_metadata_token_endpoint(bypass)
GoogleCloudPlatform.mock_instances_list_endpoint(bypass)
state = %State{
meta: %Meta{},
config: [
project_id: "firezone-staging",
cluster_name: "firezone"
]
}
assert {:ok, nodes, state} = fetch_nodes(state)
assert nodes == [
:"api@api-q3j6.us-east1-d.c.firezone-staging.internal"
]
assert state.meta.access_token
assert state.meta.access_token_expires_at
end
test "retruns list of nodes when token is not expired" do
bypass = Bypass.open()
GoogleCloudPlatform.mock_instances_list_endpoint(bypass)
state = %State{
meta: %Meta{
access_token: "ACCESS_TOKEN",
access_token_expires_at: DateTime.utc_now() |> DateTime.add(5, :second)
},
config: [
project_id: "firezone-staging",
cluster_name: "firezone",
backoff_interval: 1
]
}
assert {:ok, nodes, ^state} = fetch_nodes(state)
assert nodes == [
:"api@api-q3j6.us-east1-d.c.firezone-staging.internal"
]
assert_receive {:bypass_request, conn}
assert {"authorization", "Bearer ACCESS_TOKEN"} in conn.req_headers
end
test "returns error when compute endpoint is down" do
bypass = Bypass.open()
Bypass.down(bypass)
GoogleCloudPlatform.override_endpoint_url(
:aggregated_list_endpoint_url,
"http://localhost:#{bypass.port}/"
)
state = %State{
meta: %Meta{
access_token: "ACCESS_TOKEN",
access_token_expires_at: DateTime.utc_now() |> DateTime.add(5, :second)
},
config: [
project_id: "firezone-staging",
cluster_name: "firezone",
backoff_interval: 1
]
}
assert fetch_nodes(state) == {:error, %Mint.TransportError{reason: :econnrefused}}
GoogleCloudPlatform.override_endpoint_url(
:token_endpoint_url,
"http://localhost:#{bypass.port}/"
)
state = %State{
meta: %Meta{},
config: [
project_id: "firezone-staging",
cluster_name: "firezone",
backoff_interval: 1
]
}
assert fetch_nodes(state) == {:error, %Mint.TransportError{reason: :econnrefused}}
end
test "refreshes the access token if it expired" do
bypass = Bypass.open()
GoogleCloudPlatform.mock_instance_metadata_token_endpoint(bypass)
GoogleCloudPlatform.mock_instances_list_endpoint(bypass)
state = %State{
meta: %Meta{
access_token: "ACCESS_TOKEN",
access_token_expires_at: DateTime.utc_now() |> DateTime.add(-5, :second)
},
config: [
project_id: "firezone-staging",
cluster_name: "firezone",
backoff_interval: 1
]
}
assert {:ok, _nodes, updated_state} = fetch_nodes(state)
assert updated_state.meta.access_token != state.meta.access_token
assert updated_state.meta.access_token_expires_at != state.meta.access_token_expires_at
end
test "refreshes the access token if it became invalid even through did not expire" do
resp = %{
"error" => %{
"code" => 401,
"status" => "UNAUTHENTICATED"
}
}
bypass = Bypass.open()
GoogleCloudPlatform.mock_instance_metadata_token_endpoint(bypass)
GoogleCloudPlatform.mock_instances_list_endpoint(bypass, resp)
state = %State{
meta: %Meta{
access_token: "ACCESS_TOKEN",
access_token_expires_at: DateTime.utc_now() |> DateTime.add(5, :second)
},
config: [
project_id: "firezone-staging",
cluster_name: "firezone",
backoff_interval: 1
]
}
assert {:error, _reason} = fetch_nodes(state)
end
end
end

View File

@@ -57,7 +57,15 @@ defmodule Domain.ResourcesTest do
assert fetch_resource_by_id(Ecto.UUID.generate(), subject) ==
{:error,
{:unauthorized,
[missing_permissions: [Resources.Authorizer.manage_resources_permission()]]}}
[
missing_permissions: [
{:one_of,
[
Resources.Authorizer.manage_resources_permission(),
Resources.Authorizer.view_available_resources_permission()
]}
]
]}}
end
end
@@ -83,7 +91,22 @@ defmodule Domain.ResourcesTest do
assert list_resources(subject) == {:ok, []}
end
test "returns all resources", %{
test "returns all resources for account admin subject", %{
account: account
} do
actor = ActorsFixtures.create_actor(type: :account_user, account: account)
identity = AuthFixtures.create_identity(account: account, actor: actor)
subject = AuthFixtures.create_subject(identity)
ResourcesFixtures.create_resource(account: account)
ResourcesFixtures.create_resource(account: account)
ResourcesFixtures.create_resource()
assert {:ok, resources} = list_resources(subject)
assert length(resources) == 2
end
test "returns all resources for account user subject", %{
account: account,
subject: subject
} do
@@ -103,7 +126,15 @@ defmodule Domain.ResourcesTest do
assert list_resources(subject) ==
{:error,
{:unauthorized,
[missing_permissions: [Resources.Authorizer.manage_resources_permission()]]}}
[
missing_permissions: [
{:one_of,
[
Resources.Authorizer.manage_resources_permission(),
Resources.Authorizer.view_available_resources_permission()
]}
]
]}}
end
end

View File

@@ -0,0 +1,159 @@
defmodule Domain.Mocks.GoogleCloudPlatform do
def override_endpoint_url(endpoint, url) do
config = Domain.Config.fetch_env!(:domain, Domain.Cluster.GoogleComputeLabelsStrategy)
strategy_config = Keyword.put(config, endpoint, url)
Domain.Config.put_env_override(
:domain,
Domain.Cluster.GoogleComputeLabelsStrategy,
strategy_config
)
end
def mock_instance_metadata_token_endpoint(bypass, resp \\ nil) do
token_endpoint_path = "computeMetadata/v1/instance/service-accounts/default/token"
resp =
resp ||
%{
"access_token" => "GCP_ACCESS_TOKEN",
"expires_in" => 3595,
"token_type" => "Bearer"
}
test_pid = self()
Bypass.expect(bypass, "GET", token_endpoint_path, fn conn ->
conn = Plug.Conn.fetch_query_params(conn)
send(test_pid, {:bypass_request, conn})
Plug.Conn.send_resp(conn, 200, Jason.encode!(resp))
end)
override_endpoint_url(
:token_endpoint_url,
"http://localhost:#{bypass.port}/#{token_endpoint_path}"
)
bypass
end
def mock_instances_list_endpoint(bypass, resp \\ nil) do
aggregated_instances_endpoint_path =
"compute/v1/projects/firezone-staging/aggregated/instances"
project_endpoint = "https://www.googleapis.com/compute/v1/projects/firezone-staging"
resp =
resp ||
%{
"kind" => "compute#instanceAggregatedList",
"id" => "projects/firezone-staging/aggregated/instances",
"items" => %{
"zones/us-east1-c" => %{
"warning" => %{
"code" => "NO_RESULTS_ON_PAGE"
}
},
"zones/us-east1-d" => %{
"instances" => [
%{
"kind" => "compute#instance",
"id" => "101389045528522181",
"creationTimestamp" => "2023-06-02T13:38:02.907-07:00",
"name" => "api-q3j6",
"tags" => %{
"items" => [
"app-api"
],
"fingerprint" => "utkJlpAke8c="
},
"machineType" =>
"#{project_endpoint}/zones/us-east1-d/machineTypes/n1-standard-1",
"status" => "RUNNING",
"zone" => "#{project_endpoint}/zones/us-east1-d",
"networkInterfaces" => [
%{
"kind" => "compute#networkInterface",
"network" => "#{project_endpoint}/global/networks/firezone-staging",
"subnetwork" => "#{project_endpoint}/regions/us-east1/subnetworks/app",
"networkIP" => "10.128.0.43",
"name" => "nic0",
"fingerprint" => "_4XbqLiVdkI=",
"stackType" => "IPV4_ONLY"
}
],
"disks" => [],
"metadata" => %{
"kind" => "compute#metadata",
"fingerprint" => "3mI-QpsQdDk=",
"items" => []
},
"serviceAccounts" => [
%{
"email" => "app-api@firezone-staging.iam.gserviceaccount.com",
"scopes" => [
"https://www.googleapis.com/auth/compute.readonly",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/trace.append"
]
}
],
"selfLink" => "#{project_endpoint}/zones/us-east1-d/instances/api-q3j6",
"scheduling" => %{
"onHostMaintenance" => "MIGRATE",
"automaticRestart" => true,
"preemptible" => false,
"provisioningModel" => "STANDARD"
},
"cpuPlatform" => "Intel Haswell",
"labels" => %{
"application" => "api",
"cluster_name" => "firezone",
"container-vm" => "cos-105-17412-101-13",
"managed_by" => "terraform",
"version" => "0-0-1"
},
"labelFingerprint" => "ISmB9O6lTvg=",
"startRestricted" => false,
"deletionProtection" => false,
"shieldedInstanceConfig" => %{
"enableSecureBoot" => false,
"enableVtpm" => true,
"enableIntegrityMonitoring" => true
},
"shieldedInstanceIntegrityPolicy" => %{
"updateAutoLearnPolicy" => true
},
"fingerprint" => "fK6yUz9ED6s=",
"lastStartTimestamp" => "2023-06-02T13:38:06.900-07:00"
}
]
},
"zones/asia-northeast1-a" => %{
"warning" => %{
"code" => "NO_RESULTS_ON_PAGE"
}
}
}
}
test_pid = self()
Bypass.expect(bypass, "GET", aggregated_instances_endpoint_path, fn conn ->
conn = Plug.Conn.fetch_query_params(conn)
send(test_pid, {:bypass_request, conn})
Plug.Conn.send_resp(conn, 200, Jason.encode!(resp))
end)
override_endpoint_url(
:aggregated_list_endpoint_url,
"http://localhost:#{bypass.port}/#{aggregated_instances_endpoint_path}"
)
bypass
end
end

View File

@@ -1,3 +1,4 @@
@import "@fontsource/source-sans-pro";
@import "tailwindcss/base";
@import "tailwindcss/components";
@import "tailwindcss/utilities";
@import "tailwindcss/utilities";

View File

@@ -1,19 +1,5 @@
// If you want to use Phoenix channels, run `mix help phx.gen.channel`
// to get started and then uncomment the line below.
// import "./user_socket.js"
// You can include dependencies in two ways.
//
// The simplest option is to put them in assets/vendor and
// import them using relative paths:
//
// import "../vendor/some-package.js"
//
// Alternatively, you can `npm install some-package --prefix assets` and import
// them using a path starting with the package name:
//
// import "some-package"
//
// IMPORTANT: DO NOT INCLUDE ANY CSS FILES HERE
// Otherwise, esbuild will also build app.css and override anything that tailwind generated.
// Include phoenix_html to handle method=PUT/DELETE in forms and buttons.
import "phoenix_html"
@@ -21,14 +7,12 @@ import "phoenix_html"
// Flowbite's Phoenix LiveView integration
import "flowbite/dist/flowbite.phoenix.js"
// Custom fonts
import "@fontsource/source-sans-pro"
// Establish Phoenix Socket and LiveView configuration.
import {Socket} from "phoenix"
import {LiveSocket} from "phoenix_live_view"
import topbar from "../vendor/topbar"
// Read CSRF token from the meta tag and use it in the LiveSocket params
let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content")
let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}})

View File

@@ -3,6 +3,10 @@ defmodule Web.Application do
@impl true
def start(_type, _args) do
_ = OpentelemetryLiveView.setup()
_ = :opentelemetry_cowboy.setup()
_ = OpentelemetryPhoenix.setup(adapter: :cowboy2)
children = [
Web.Telemetry,
{Phoenix.PubSub, name: Web.PubSub},

View File

@@ -0,0 +1,9 @@
defmodule Web.HealthController do
use Web, :controller
def healthz(conn, _params) do
conn
|> put_resp_content_type("application/json")
|> send_resp(200, Jason.encode!(%{status: "ok"}))
end
end

View File

@@ -1,6 +1,11 @@
defmodule Web.Endpoint do
use Phoenix.Endpoint, otp_app: :web
plug Plug.RewriteOn, [:x_forwarded_host, :x_forwarded_port, :x_forwarded_proto]
plug Plug.MethodOverride
plug :put_hsts_header
plug Plug.Head
socket "/live", Phoenix.LiveView.Socket,
websocket: [
connect_info: [
@@ -46,14 +51,33 @@ defmodule Web.Endpoint do
pass: ["*/*"],
json_decoder: Phoenix.json_library()
plug Plug.MethodOverride
plug Plug.Head
# TODO: ensure that phoenix configured to resolve opts at runtime
plug Plug.Session, Web.Session.options()
# We wrap Plug.Session because it's options are resolved at compile-time,
# which doesn't work with Elixir releases and runtime configuration
plug :session
plug Web.Router
def put_hsts_header(conn, _opts) do
scheme =
config(:url, [])
|> Keyword.get(:scheme)
if scheme == "https" do
put_resp_header(
conn,
"strict-transport-security",
"max-age=63072000; includeSubDomains; preload"
)
else
conn
end
end
def session(conn, _opts) do
opts = Web.Session.options()
Plug.Session.call(conn, Plug.Session.init(opts))
end
def external_trusted_proxies do
Domain.Config.fetch_env!(:web, :external_trusted_proxies)
|> Enum.map(&to_string/1)

View File

@@ -15,7 +15,7 @@ defmodule Web.Router do
# TODO: auth
end
pipeline :browser_static do
pipeline :public do
plug :accepts, ["html", "xml"]
end
@@ -72,8 +72,10 @@ defmodule Web.Router do
end
scope "/browser", Web do
pipe_through :browser_static
pipe_through :public
get "/config.xml", BrowserController, :config
end
get "/healthz", Web.HealthController, :healthz
end

View File

@@ -2,9 +2,7 @@ defmodule Web.Session do
# 4 hours
@max_cookie_age 14_400
# The session will be stored in the cookie and signed,
# this means its contents can be read but not tampered with.
# Set :encryption_salt if you would also like to encrypt it.
# The session will be stored in the cookie signed and encrypted for 4 hours
@session_options [
store: :cookie,
key: "_firezone_key",

View File

@@ -18,7 +18,7 @@ defmodule Web.MixProject do
end
def version do
System.get_env("VERSION", "0.0.0+git.0.deadbeef")
System.get_env("APPLICATION_VERSION", "0.0.0+git.0.deadbeef")
end
def application do
@@ -47,7 +47,7 @@ defmodule Web.MixProject do
{:remote_ip, "~> 1.0"},
# Asset pipeline deps
{:esbuild, "~> 0.5", runtime: Mix.env() == :dev},
{:esbuild, "~> 0.7", runtime: Mix.env() == :dev},
{:tailwind, "~> 0.2.0", runtime: Mix.env() == :dev},
# Observability and debugging deps
@@ -60,6 +60,12 @@ defmodule Web.MixProject do
{:phoenix_swoosh, "~> 1.0"},
{:gen_smtp, "~> 1.0"},
# Observability
{:opentelemetry_cowboy, "~> 0.2.1"},
{:opentelemetry_liveview, "~> 1.0.0-rc.4"},
{:opentelemetry_phoenix, "~> 1.1"},
{:nimble_options, "~> 1.0", override: true},
# Other deps
{:jason, "~> 1.2"},
{:file_size, "~> 3.0.1"},
@@ -76,12 +82,12 @@ defmodule Web.MixProject do
[
setup: ["deps.get", "assets.setup", "assets.build"],
"assets.setup": [
"cmd cd assets && yarn install",
"cmd cd assets && yarn install --frozen-lockfile",
"tailwind.install --if-missing",
"esbuild.install --if-missing"
],
"assets.build": ["tailwind default", "esbuild default"],
"assets.deploy": ["tailwind default --minify", "esbuild default --minify", "phx.digest"],
"assets.build": ["tailwind web", "esbuild web"],
"assets.deploy": ["tailwind web --minify", "esbuild web --minify", "phx.digest"],
"ecto.seed": ["ecto.create", "ecto.migrate", "run ../domain/priv/repo/seeds.exs"],
"ecto.setup": ["ecto.create", "ecto.migrate"],
"ecto.reset": ["ecto.drop", "ecto.setup"],

View File

@@ -0,0 +1,10 @@
defmodule Web.HealthControllerTest do
use Web.ConnCase, async: true
describe "healthz/2" do
test "returns valid JSON health status", %{conn: conn} do
test_conn = get(conn, ~p"/healthz")
assert json_response(test_conn, 200) == %{"status" => "ok"}
end
end
end

View File

@@ -27,7 +27,7 @@ config :domain, Domain.Repo,
queue_target: 500,
queue_interval: 1000,
migration_timestamps: [type: :timestamptz],
start_apps_before_migration: [:ssl]
start_apps_before_migration: [:ssl, :logger_json]
config :domain, Domain.Devices, upstream_dns: ["1.1.1.1"]
@@ -56,11 +56,6 @@ config :domain, Domain.Auth,
config :web, ecto_repos: [Domain.Repo]
config :web, generators: [binary_id: true, context_app: :domain]
config :web,
external_url: "http://localhost:13000/",
# TODO: use endpoint path instead?
path_prefix: "/"
config :web, Web.Endpoint,
url: [
scheme: "http",
@@ -117,14 +112,42 @@ config :api,
cookie_signing_salt: "WjllcThpb2Y=",
cookie_encryption_salt: "M0EzM0R6NEMyaw=="
config :api,
external_trusted_proxies: [],
private_clients: [%{__struct__: Postgrex.INET, address: {172, 28, 0, 0}, netmask: 16}]
###############################
##### Erlang Cluster ##########
###############################
config :domain, Domain.Cluster.GoogleComputeLabelsStrategy,
token_endpoint_url:
"http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token",
aggregated_list_endpoint_url:
"https://compute.googleapis.com/compute/v1/projects/${project_id}/aggregated/instances"
config :domain, Domain.Cluster,
adapter: Domain.Cluster.Local,
adapter_config: []
###############################
##### Third-party configs #####
###############################
config :domain,
http_client_ssl_opts: []
config :openid_connect,
finch_transport_opts: []
config :mime, :types, %{
"application/xml" => ["xml"]
}
config :opentelemetry,
span_processor: :batch,
traces_exporter: :none
config :logger, :console,
level: String.to_atom(System.get_env("LOG_LEVEL", "info")),
format: "$time $metadata[$level] $message\n",
@@ -142,24 +165,35 @@ config :web, Web.Mailer,
adapter: Web.Mailer.NoopAdapter,
from_email: "test@firez.one"
# TODO: actually copy fonts here, otherwise:application
# Failed to load resource: the server responded with a status of 404 ()
# source-sans-pro-all-400-normal.woff:1 Failed to load resource: the server responded with a status of 404 ()
config :esbuild,
version: "0.14.41",
default: [
args:
~w(js/app.js --bundle --loader:.woff2=file --loader:.woff=file --target=es2017 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*),
version: "0.17.19",
web: [
args: [
"js/app.js",
"--bundle",
"--loader:.woff2=file",
"--loader:.woff=file",
"--target=es2017",
"--outdir=../priv/static/assets",
"--external:/fonts/*",
"--external:/images/*"
],
cd: Path.expand("../apps/web/assets", __DIR__),
env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)}
]
# Configure tailwind (the version is required)
config :tailwind,
version: "3.2.4",
default: [
args: ~w(
--config=tailwind.config.js
--input=css/app.css
--output=../priv/static/assets/app.css
),
version: "3.3.2",
web: [
args: [
"--config=tailwind.config.js",
"--input=css/app.css",
"--output=../priv/static/assets/app.css"
],
cd: Path.expand("../apps/web/assets", __DIR__)
]

View File

@@ -23,8 +23,8 @@ config :web, Web.Endpoint,
debug_errors: true,
check_origin: ["//127.0.0.1", "//localhost"],
watchers: [
esbuild: {Esbuild, :install_and_run, [:default, ~w(--sourcemap=inline --watch)]},
tailwind: {Tailwind, :install_and_run, [:default, ~w(--watch)]}
esbuild: {Esbuild, :install_and_run, [:web, ~w(--sourcemap=inline --watch)]},
tailwind: {Tailwind, :install_and_run, [:web, ~w(--watch)]}
],
live_reload: [
patterns: [
@@ -32,7 +32,8 @@ config :web, Web.Endpoint,
~r"apps/web/priv/gettext/.*(po)$",
~r"apps/web/lib/web/.*(ex|eex|heex)$"
]
]
],
server: true
###############################
##### API #####################
@@ -40,12 +41,13 @@ config :web, Web.Endpoint,
config :api, dev_routes: true
config :api, Web.Endpoint,
config :api, API.Endpoint,
http: [port: 13001],
debug_errors: true,
code_reloader: true,
check_origin: ["//127.0.0.1", "//localhost"],
watchers: []
watchers: [],
server: true
###############################
##### Third-party configs #####

View File

@@ -16,10 +16,42 @@ config :web, Web.Endpoint,
cache_static_manifest: "priv/static/cache_manifest.json",
server: true
###############################
##### API #####################
###############################
config :api, API.Endpoint, server: true
###############################
##### Third-party configs #####
###############################
config :phoenix, :filter_parameters, [
"password",
"secret",
"token",
"public_key",
"private_key",
"preshared_key"
]
# Do not print debug messages in production and handle all
# other reports by Elixir Logger with JSON back-end so that.
# we can parse them in log analysys tools.
# Notice: SASL reports turned off because of their verbosity.
# Notice: Log level can be overriden on production with LOG_LEVEL environment variable.
config :logger,
backends: [LoggerJSON],
utc_log: true,
level: :info,
handle_sasl_reports: false,
handle_otp_reports: true
config :logger_json, :backend,
json_encoder: Jason,
formatter: LoggerJSON.Formatters.GoogleCloudLogger,
metadata: :all
config :logger, level: :info
config :swoosh, local: false

View File

@@ -52,12 +52,7 @@ if config_env() == :prod do
##### Web #####################
###############################
config :web,
external_url: external_url,
path_prefix: external_url_path
config :web, Web.Endpoint,
server: true,
http: [
ip: compile_config!(:phoenix_listen_address).address,
port: compile_config!(:phoenix_http_web_port),
@@ -88,13 +83,11 @@ if config_env() == :prod do
###############################
config :api, API.Endpoint,
server: true,
http: [
ip: compile_config!(:phoenix_listen_address).address,
port: compile_config!(:phoenix_http_api_port),
protocol_options: compile_config!(:phoenix_http_protocol_options)
],
# TODO: force_ssl: [rewrite_on: [:x_forwarded_proto], hsts: true],
url: [
scheme: external_url_scheme,
host: external_url_host,
@@ -108,10 +101,34 @@ if config_env() == :prod do
cookie_signing_salt: compile_config!(:cookie_signing_salt),
cookie_encryption_salt: compile_config!(:cookie_encryption_salt)
config :api,
external_trusted_proxies: compile_config!(:phoenix_external_trusted_proxies),
private_clients: compile_config!(:phoenix_private_clients)
###############################
##### Erlang Cluster ##########
###############################
config :domain, Domain.Cluster,
adapter: compile_config!(:erlang_cluster_adapter),
adapter_config: compile_config!(:erlang_cluster_adapter_config)
###############################
##### Third-party configs #####
###############################
if System.get_env("OTLP_ENDPOINT") do
config :opentelemetry,
traces_exporter: :otlp
config :opentelemetry_exporter,
otlp_protocol: :http_protobuf,
otlp_endpoint: System.get_env("OTLP_ENDPOINT")
end
config :domain,
http_client_ssl_opts: compile_config!(:http_client_ssl_opts)
config :openid_connect,
finch_transport_opts: compile_config!(:http_client_ssl_opts)

View File

@@ -1,153 +0,0 @@
version: '3.7'
services:
caddy:
image: caddy:2
volumes:
- ./priv/Caddyfile:/etc/caddy/Caddyfile
- ./priv/pki:/data/caddy/pki
ports:
- 80:80
- 443:443
networks:
app:
ipv4_address: 172.28.0.99
ipv6_address: 2001:3990:3990::99
firezone:
build:
context: .
dockerfile: Dockerfile.dev
args:
DATABASE_HOST: postgres
DATABASE_PORT: 5432
DATABASE_NAME: firezone_dev
DATABASE_USER: postgres
DATABASE_PASSWORD: postgres
image: firezone_dev
volumes:
- ./priv:/var/app/priv
- ./apps:/var/app/apps
- ./config:/var/app/config
- ./mix.exs:/var/app/mix.exs
- ./mix.lock:/var/app/mix.lock
# Mask the following build directories to keep compiled binaries isolated
# from the local project. This is needed when the Docker Host platform
# doesn't match the platform under which Docker Engine is running. e.g.
# WSL, Docker for Mac, etc.
- /var/app/apps/web/assets/node_modules
ports:
- 51820:51820/udp
environment:
EXTERNAL_URL: ${EXTERNAL_URL:-https://localhost}
LOCAL_AUTH_ENABLED: 'true'
FZ_WALL_CLI_MODULE: FzWall.CLI.Live
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv4.ip_forward=1
- net.ipv6.conf.all.forwarding=1
depends_on:
postgres:
condition: 'service_healthy'
networks:
- app
- isolation
postgres:
image: postgres:15
volumes:
- postgres-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: firezone_dev
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
ports:
- 5432:5432
networks:
- app
# Vault can act as an OIDC IdP as well
vault:
image: vault
environment:
VAULT_ADDR: 'http://127.0.0.1:8200'
VAULT_DEV_ROOT_TOKEN_ID: 'firezone'
ports:
- 8200:8200/tcp
cap_add:
- IPC_LOCK
networks:
- app
saml-idp:
# This is a container with this PR merged: https://github.com/kristophjunge/docker-test-saml-idp/pull/27
image: vihangk1/docker-test-saml-idp:latest
environment:
SIMPLESAMLPHP_SP_ENTITY_ID: 'urn:firezone.dev:firezone-app'
SIMPLESAMLPHP_SP_ASSERTION_CONSUMER_SERVICE: 'http://localhost:13000/auth/saml/sp/consume/mysamlidp'
SIMPLESAMLPHP_SP_SINGLE_LOGOUT_SERVICE: 'http://localhost:13000/auth/saml/sp/logout/mysamlidp'
SIMPLESAMLPHP_SP_NAME_ID_FORMAT: 'urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress'
SIMPLESAMLPHP_SP_NAME_ID_ATTRIBUTE: 'email'
SIMPLESAMLPHP_IDP_AUTH: 'example-userpass'
ports:
- 8400:8080/tcp
- 8443:8443/tcp
networks:
- app
# Unfortunately the Linux VM kernel for Docker Desktop is not compiled with
# Dynamic Debug enabled, so we're unable to enable WireGuard debug logging.
# Since WireGuard is designed to be silent by default, this basically does
# nothing.
# wireguard-log:
# image: ubuntu:jammy
# # cap SYSLOG was enough for reading but privilege is required for tailing
# privileged: true
# command: >
# bash -c '
# mount -t debugfs none /sys/kernel/debug
# && echo module wireguard +p > /sys/kernel/debug/dynamic_debug/control
# && dmesg -wT | grep wireguard:'
client:
depends_on:
- firezone
image: linuxserver/wireguard:latest
environment:
- PUID=1000
- PGID=1000
- TZ=UTC
- ALLOWEDIPS="0.0.0.0/0,::/0"
volumes:
- ./priv/wg0.client.conf:/config/wg0.conf
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
- net.ipv4.conf.all.src_valid_mark=1
networks:
- isolation
volumes:
postgres-data:
# Disabled due to Authentik being disabled
# redis-data:
networks:
app:
enable_ipv6: true
ipam:
config:
- subnet: 172.28.0.0/16
- subnet: 2001:3990:3990::/64
isolation:

View File

@@ -3,7 +3,7 @@ defmodule Firezone.MixProject do
def version do
# Use dummy version for dev and test
System.get_env("VERSION", "0.0.0+git.0.deadbeef")
System.get_env("APPLICATION_VERSION", "0.0.0+git.0.deadbeef")
end
def project do
@@ -28,17 +28,7 @@ defmodule Firezone.MixProject do
plt_file: {:no_warn, "priv/plts/dialyzer.plt"}
],
aliases: aliases(),
default_release: :web,
releases: [
web: [
include_executables_for: [:unix],
validate_compile_env: true,
applications: [
web: :permanent
],
cookie: System.get_env("ERL_COOKIE")
]
]
releases: releases()
]
end
@@ -74,4 +64,23 @@ defmodule Firezone.MixProject do
start: ["compile --no-validate-compile-env", "phx.server", "run --no-halt"]
]
end
defp releases do
[
web: [
include_executables_for: [:unix],
validate_compile_env: true,
applications: [
web: :permanent
]
],
api: [
include_executables_for: [:unix],
validate_compile_env: true,
applications: [
api: :permanent
]
]
]
end
end

View File

@@ -1,13 +1,13 @@
%{
"acceptor_pool": {:hex, :acceptor_pool, "1.0.0", "43c20d2acae35f0c2bcd64f9d2bde267e459f0f3fd23dab26485bf518c281b21", [:rebar3], [], "hexpm", "0cbcd83fdc8b9ad2eee2067ef8b91a14858a5883cb7cd800e6fcd5803e158788"},
"argon2_elixir": {:hex, :argon2_elixir, "2.4.1", "edb27bdd326bc738f3e4614eddc2f73507be6fedc9533c6bcc6f15bbac9c85cc", [:make, :mix], [{:comeonin, "~> 5.3", [hex: :comeonin, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "0e21f52a373739d00bdfd5fe6da2f04eea623cb4f66899f7526dd9db03903d9f"},
"bunt": {:hex, :bunt, "0.2.1", "e2d4792f7bc0ced7583ab54922808919518d0e57ee162901a16a1b6664ef3b14", [:mix], [], "hexpm", "a330bfb4245239787b15005e66ae6845c9cd524a288f0d141c148b02603777a5"},
"bureaucrat": {:hex, :bureaucrat, "0.2.9", "d98e4d2b9bdbf22e4a45c2113ce8b38b5b63278506c6ff918e3b943a4355d85b", [:mix], [{:inflex, ">= 1.10.0", [hex: :inflex, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.2.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:plug, ">= 1.0.0", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 1.5 or ~> 2.0 or ~> 3.0 or ~> 4.0", [hex: :poison, repo: "hexpm", optional: true]}], "hexpm", "111c8dd84382a62e1026ae011d592ceee918553e5203fe8448d9ba6ccbdfff7d"},
"bypass": {:hex, :bypass, "2.1.0", "909782781bf8e20ee86a9cabde36b259d44af8b9f38756173e8f5e2e1fabb9b1", [:mix], [{:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "d9b5df8fa5b7a6efa08384e9bbecfe4ce61c77d28a4282f79e02f1ef78d96b80"},
"castore": {:hex, :castore, "1.0.2", "0c6292ecf3e3f20b7c88408f00096337c4bfd99bd46cc2fe63413ddbe45b3573", [:mix], [], "hexpm", "40b2dd2836199203df8500e4a270f10fc006cc95adc8a319e148dc3077391d96"},
"certifi": {:hex, :certifi, "2.9.0", "6f2a475689dd47f19fb74334859d460a2dc4e3252a3324bd2111b8f0429e7e21", [:rebar3], [], "hexpm", "266da46bdb06d6c6d35fde799bcb28d36d985d424ad7c08b5bb48f5b5cdd4641"},
"chatterbox": {:hex, :ts_chatterbox, "0.13.0", "6f059d97bcaa758b8ea6fffe2b3b81362bd06b639d3ea2bb088335511d691ebf", [:rebar3], [{:hpack, "~> 0.2.3", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "b93d19104d86af0b3f2566c4cba2a57d2e06d103728246ba1ac6c3c0ff010aa7"},
"cidr": {:git, "https://github.com/firezone/cidr-elixir.git", "a32125127a7910f476734f45391ba6d37036ee11", []},
"cloak": {:hex, :cloak, "1.1.2", "7e0006c2b0b98d976d4f559080fabefd81f0e0a50a3c4b621f85ceeb563e80bb", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "940d5ac4fcd51b252930fd112e319ea5ae6ab540b722f3ca60a85666759b9585"},
"cloak_ecto": {:hex, :cloak_ecto, "1.2.0", "e86a3df3bf0dc8980f70406bcb0af2858bac247d55494d40bc58a152590bd402", [:mix], [{:cloak, "~> 1.1.1", [hex: :cloak, repo: "hexpm", optional: false]}, {:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "8bcc677185c813fe64b786618bd6689b1707b35cd95acaae0834557b15a0c62f"},
"combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm", "1b1dbc1790073076580d0d1d64e42eae2366583e7aecd455d1215b0d16f2451b"},
"comeonin": {:hex, :comeonin, "5.3.3", "2c564dac95a35650e9b6acfe6d2952083d8a08e4a89b93a481acb552b325892e", [:mix], [], "hexpm", "3e38c9c2cb080828116597ca8807bb482618a315bfafd98c90bc22a821cc84df"},
"connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"},
@@ -15,6 +15,7 @@
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
"cowlib": {:hex, :cowlib, "2.12.1", "a9fa9a625f1d2025fe6b462cb865881329b5caff8f1854d1cbc9f9533f00e1e1", [:make, :rebar3], [], "hexpm", "163b73f6367a7341b33c794c4e88e7dbfe6498ac42dcd69ef44c5bc5507c8db0"},
"credo": {:hex, :credo, "1.7.0", "6119bee47272e85995598ee04f2ebbed3e947678dee048d10b5feca139435f75", [:mix], [{:bunt, "~> 0.2.1", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2.8", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "6839fcf63d1f0d1c0f450abc8564a57c43d644077ab96f2934563e68b8a769d7"},
"ctx": {:hex, :ctx, "0.6.0", "8ff88b70e6400c4df90142e7f130625b82086077a45364a78d208ed3ed53c7fe", [:rebar3], [], "hexpm", "a14ed2d1b67723dbebbe423b28d7615eb0bdcba6ff28f2d1f1b0a7e1d4aa5fc2"},
"db_connection": {:hex, :db_connection, "2.5.0", "bb6d4f30d35ded97b29fe80d8bd6f928a1912ca1ff110831edcd238a1973652c", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c92d5ba26cd69ead1ff7582dbb860adeedfff39774105a4f1c92cbb654b55aa2"},
"decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"},
"dialyxir": {:hex, :dialyxir, "1.3.0", "fd1672f0922b7648ff9ce7b1b26fcf0ef56dda964a459892ad15f6b4410b5284", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "00b2a4bcd6aa8db9dcb0b38c1225b7277dca9bc370b6438715667071a304696f"},
@@ -34,10 +35,13 @@
"floki": {:hex, :floki, "0.34.2", "5fad07ef153b3b8ec110b6b155ec3780c4b2c4906297d0b4be1a7162d04a7e02", [:mix], [], "hexpm", "26b9d50f0f01796bc6be611ca815c5e0de034d2128e39cc9702eee6b66a4d1c8"},
"gen_smtp": {:hex, :gen_smtp, "1.2.0", "9cfc75c72a8821588b9b9fe947ae5ab2aed95a052b81237e0928633a13276fd3", [:rebar3], [{:ranch, ">= 1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "5ee0375680bca8f20c4d85f58c2894441443a743355430ff33a783fe03296779"},
"gettext": {:hex, :gettext, "0.22.1", "e7942988383c3d9eed4bdc22fc63e712b655ae94a672a27e4900e3d4a2c43581", [:mix], [{:expo, "~> 0.4.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "ad105b8dab668ee3f90c0d3d94ba75e9aead27a62495c101d94f2657a190ac5d"},
"gproc": {:hex, :gproc, "0.8.0", "cea02c578589c61e5341fce149ea36ccef236cc2ecac8691fba408e7ea77ec2f", [:rebar3], [], "hexpm", "580adafa56463b75263ef5a5df4c86af321f68694e7786cb057fd805d1e2a7de"},
"grpcbox": {:hex, :grpcbox, "0.16.0", "b83f37c62d6eeca347b77f9b1ec7e9f62231690cdfeb3a31be07cd4002ba9c82", [:rebar3], [{:acceptor_pool, "~> 1.0.0", [hex: :acceptor_pool, repo: "hexpm", optional: false]}, {:chatterbox, "~> 0.13.0", [hex: :ts_chatterbox, repo: "hexpm", optional: false]}, {:ctx, "~> 0.6.0", [hex: :ctx, repo: "hexpm", optional: false]}, {:gproc, "~> 0.8.0", [hex: :gproc, repo: "hexpm", optional: false]}], "hexpm", "294df743ae20a7e030889f00644001370a4f7ce0121f3bbdaf13cf3169c62913"},
"guardian": {:hex, :guardian, "2.3.1", "2b2d78dc399a7df182d739ddc0e566d88723299bfac20be36255e2d052fd215d", [:mix], [{:jose, "~> 1.8", [hex: :jose, repo: "hexpm", optional: false]}, {:plug, "~> 1.3.3 or ~> 1.4", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "bbe241f9ca1b09fad916ad42d6049d2600bbc688aba5b3c4a6c82592a54274c3"},
"guardian_db": {:hex, :guardian_db, "2.1.0", "ec95a9d99cdd1e550555d09a7bb4a340d8887aad0697f594590c2fd74be02426", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.1", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:guardian, "~> 1.0 or ~> 2.0", [hex: :guardian, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.13", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "f8e7d543ac92c395f3a7fd5acbe6829faeade57d688f7562e2f0fca8f94a0d70"},
"hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~> 2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"},
"heroicons": {:hex, :heroicons, "0.5.2", "a7ae72460ecc4b74a4ba9e72f0b5ac3c6897ad08968258597da11c2b0b210683", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.18.2", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}], "hexpm", "7ef96f455c1c136c335f1da0f1d7b12c34002c80a224ad96fc0ebf841a6ffef5"},
"hpack": {:hex, :hpack_erl, "0.2.3", "17670f83ff984ae6cd74b1c456edde906d27ff013740ee4d9efaa4f1bf999633", [:rebar3], [], "hexpm", "06f580167c4b8b8a6429040df36cc93bba6d571faeaec1b28816523379cbb23a"},
"hpax": {:hex, :hpax, "0.1.2", "09a75600d9d8bbd064cdd741f21fc06fc1f4cf3d0fcc335e5aa19be1a7235c84", [:mix], [], "hexpm", "2c87843d5a23f5f16748ebe77969880e29809580efdaccd615cd3bed628a8c13"},
"httpoison": {:hex, :httpoison, "2.1.0", "655fd9a7b0b95ee3e9a3b535cf7ac8e08ef5229bab187fa86ac4208b122d934b", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "fc455cb4306b43827def4f57299b2d5ac8ac331cb23f517e734a4b78210a160c"},
"idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"},
@@ -45,6 +49,8 @@
"jason": {:hex, :jason, "1.4.0", "e855647bc964a44e2f67df589ccf49105ae039d4179db7f6271dfd3843dc27e6", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "79a3791085b2a0f743ca04cec0f7be26443738779d09302e01318f97bdb82121"},
"jose": {:hex, :jose, "1.11.5", "3bc2d75ffa5e2c941ca93e5696b54978323191988eb8d225c2e663ddfefd515e", [:mix, :rebar3], [], "hexpm", "dcd3b215bafe02ea7c5b23dafd3eb8062a5cd8f2d904fd9caa323d37034ab384"},
"junit_formatter": {:hex, :junit_formatter, "3.3.1", "c729befb848f1b9571f317d2fefa648e9d4869befc4b2980daca7c1edc468e40", [:mix], [], "hexpm", "761fc5be4b4c15d8ba91a6dafde0b2c2ae6db9da7b8832a55b5a1deb524da72b"},
"libcluster": {:hex, :libcluster, "3.3.2", "84c6ebfdc72a03805955abfb5ff573f71921a3e299279cc3445445d5af619ad1", [:mix], [{:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8b691ce8185670fc8f3fc0b7ed59eff66c6889df890d13411f8f1a0e6871d8a5"},
"logger_json": {:hex, :logger_json, "5.1.2", "7dde5f6dff814aba033f045a3af9408f5459bac72357dc533276b47045371ecf", [:mix], [{:ecto, "~> 2.1 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.5.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "ed42047e5c57a60d0fa1450aef36bc016d0f9a5e6c0807ebb0c03d8895fb6ebc"},
"makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"},
"makeup_elixir": {:hex, :makeup_elixir, "0.16.0", "f8c570a0d33f8039513fbccaf7108c5d750f47d8defd44088371191b76492b0b", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "28b2cbdc13960a46ae9a8858c4bebdec3c9a6d7b4b9e7f4ed1502f8159f338e7"},
"makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"},
@@ -59,6 +65,17 @@
"number": {:hex, :number, "1.0.3", "932c8a2d478a181c624138958ca88a78070332191b8061717270d939778c9857", [:mix], [{:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "dd397bbc096b2ca965a6a430126cc9cf7b9ef7421130def69bcf572232ca0f18"},
"observer_cli": {:hex, :observer_cli, "1.7.4", "3c1bfb6d91bf68f6a3d15f46ae20da0f7740d363ee5bc041191ce8722a6c4fae", [:mix, :rebar3], [{:recon, "~> 2.5.1", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "50de6d95d814f447458bd5d72666a74624eddb0ef98bdcee61a0153aae0865ff"},
"openid_connect": {:git, "https://github.com/firezone/openid_connect.git", "13320ed8b0d347330d07e1375a9661f3089b9c03", [branch: "master"]},
"opentelemetry": {:hex, :opentelemetry, "1.3.0", "988ac3c26acac9720a1d4fb8d9dc52e95b45ecfec2d5b5583276a09e8936bc5e", [:rebar3], [{:opentelemetry_api, "~> 1.2.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "8e09edc26aad11161509d7ecad854a3285d88580f93b63b0b1cf0bac332bfcc0"},
"opentelemetry_api": {:hex, :opentelemetry_api, "1.2.1", "7b69ed4f40025c005de0b74fce8c0549625d59cb4df12d15c32fe6dc5076ff42", [:mix, :rebar3], [{:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}], "hexpm", "6d7a27b7cad2ad69a09cabf6670514cafcec717c8441beb5c96322bac3d05350"},
"opentelemetry_cowboy": {:hex, :opentelemetry_cowboy, "0.2.1", "feb09d4abe48c6d983fd46ea7b500cdf31b0f77c80702e175fe1fd86f8a52445", [:rebar3], [{:cowboy_telemetry, "~> 0.4", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.0", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "21ba198dd51294211a498dee720a30d2c2cb4d35ddc843d84f2d4e0a9681be49"},
"opentelemetry_ecto": {:hex, :opentelemetry_ecto, "1.1.1", "218b791d2883becaf28d3fe25627b48f862ad63d4982dd0d10d307861eafa847", [:mix], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.2", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e5f4c76aa9385cefa099a88e19eba90a7a19ef82deec43e0c03c987528bdd826"},
"opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.5.0", "7f866236d7018c20de28ebc379c02b4b0d4fd6cfd058cd15351412e7b390a733", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.3", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.2", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "662fac229eba0114b3a9d1538fdf564bb46ca037cdb6d0e5fdc4c5d0da7a21be"},
"opentelemetry_finch": {:hex, :opentelemetry_finch, "0.2.0", "55ddfb96082dda59a64214f2d4640d2fb1323ca45bbb4b40d32599a0e8087a05", [:mix], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7364f70822ec282853cade12953f40d7b94e03967608a52fd406e3b080f18d5e"},
"opentelemetry_liveview": {:hex, :opentelemetry_liveview, "1.0.0-rc.4", "52915a83809100f31f7b6ea42e00b964a66032b75cc56e5b4cbcf7e21d4a45da", [:mix], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.0.0-beta.7", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e06ab69da7ee46158342cac42f1c22886bdeab53e8d8c4e237c3b3c2cf7b815d"},
"opentelemetry_phoenix": {:hex, :opentelemetry_phoenix, "1.1.0", "60c8b3f23d16f17103532f6f16003e1ef76eac67d4e5f8a206091fe59dcac263", [:mix], [{:nimble_options, "~> 0.5", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.2", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 0.2", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.0", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:plug, ">= 1.11.0", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5a38537aedc5d568590e8be9ffe481d668cba4ffd25f06fe2d33c11296d7855f"},
"opentelemetry_process_propagator": {:hex, :opentelemetry_process_propagator, "0.2.2", "85244a49f0c32ae1e2f3d58c477c265bd6125ee3480ade82b0fa9324b85ed3f0", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "04db13302a34bea8350a13ed9d49c22dfd32c4bc590d8aa88b6b4b7e4f346c61"},
"opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "0.2.0", "b67fe459c2938fcab341cb0951c44860c62347c005ace1b50f8402576f241435", [:mix, :rebar3], [], "hexpm", "d61fa1f5639ee8668d74b527e6806e0503efc55a42db7b5f39939d84c07d6895"},
"opentelemetry_telemetry": {:hex, :opentelemetry_telemetry, "1.0.0", "d5982a319e725fcd2305b306b65c18a86afdcf7d96821473cf0649ff88877615", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_registry, "~> 0.3.0", [hex: :telemetry_registry, repo: "hexpm", optional: false]}], "hexpm", "3401d13a1d4b7aa941a77e6b3ec074f0ae77f83b5b2206766ce630123a9291a9"},
"parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"},
"phoenix": {:hex, :phoenix, "1.7.2", "c375ffb482beb4e3d20894f84dd7920442884f5f5b70b9f4528cbe0cedefec63", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.4", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "1ebca94b32b4d0e097ab2444a9742ed8ff3361acad17365e4e6b2e79b4792159"},
"phoenix_ecto": {:hex, :phoenix_ecto, "4.4.1", "fe7a02387a7d26002a46b97e9879591efee7ebffe5f5e114fd196632e6e4a08d", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "ddccf8b4966180afe7630b105edb3402b1ca485e7468109540d262e842048ba4"},
@@ -87,7 +104,9 @@
"telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"},
"telemetry_metrics": {:hex, :telemetry_metrics, "0.6.1", "315d9163a1d4660aedc3fee73f33f1d355dcc76c5c3ab3d59e76e3edf80eef1f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7be9e0871c41732c233be71e4be11b96e56177bf15dde64a8ac9ce72ac9834c6"},
"telemetry_poller": {:hex, :telemetry_poller, "1.0.0", "db91bb424e07f2bb6e73926fcafbfcbcb295f0193e0a00e825e589a0a47e8453", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b3a24eafd66c3f42da30fc3ca7dda1e9d546c12250a2d60d7b81d264fbec4f6e"},
"telemetry_registry": {:hex, :telemetry_registry, "0.3.1", "14a3319a7d9027bdbff7ebcacf1a438f5f5c903057b93aee484cca26f05bdcba", [:mix, :rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "6d0ca77b691cf854ed074b459a93b87f4c7f5512f8f7743c635ca83da81f939e"},
"tesla": {:hex, :tesla, "1.7.0", "a62dda2f80d4f8a925eb7b8c5b78c461e0eb996672719fe1a63b26321a5f8b4e", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, "~> 1.3", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.0", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "2e64f01ebfdb026209b47bc651a0e65203fcff4ae79c11efb73c4852b00dc313"},
"tls_certificate_check": {:hex, :tls_certificate_check, "1.19.0", "c76c4c5d79ee79a2b11c84f910c825d6f024a78427c854f515748e9bd025e987", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "4083b4a298add534c96125337cb01161c358bb32dd870d5a893aae685fd91d70"},
"ueberauth": {:hex, :ueberauth, "0.10.3", "4a3bd7ab7b5d93d301d264f0f6858392654ee92171f4437d067d1ae227c051d9", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "1394f36a6c64e97f2038cf95228e7e52b4cb75417962e30418fbe9902b30e6d3"},
"ueberauth_identity": {:hex, :ueberauth_identity, "0.4.2", "1ef48b37428d225a2eb0cc453b0d446440d8f62c70dbbfef675ed923986136f2", [:mix], [{:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}, {:ueberauth, "~> 0.7", [hex: :ueberauth, repo: "hexpm", optional: false]}], "hexpm", "134354bc3da3ece4333f3611fbe283372134b19b2ed8a3d7f43554c6102c4bff"},
"unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"},

View File

@@ -1,24 +1,43 @@
#!/bin/sh
# Sets and enables heart (recommended only in daemon mode)
# case $RELEASE_COMMAND in
# daemon*)
# HEART_COMMAND="$RELEASE_ROOT/bin/$RELEASE_NAME $RELEASE_COMMAND"
# export HEART_COMMAND
# export ELIXIR_ERL_OPTIONS="-heart"
# ;;
# *)
# ;;
# esac
case $RELEASE_COMMAND in
daemon*)
HEART_COMMAND="$RELEASE_ROOT/bin/$RELEASE_NAME $RELEASE_COMMAND"
export HEART_COMMAND
export ELIXIR_ERL_OPTIONS="-heart -kernel inet_dist_listen_min ${ERLANG_DISTRIBUTION_PORT} inet_dist_listen_max ${ERLANG_DISTRIBUTION_PORT}"
;;
start*)
export ELIXIR_ERL_OPTIONS="-kernel inet_dist_listen_min ${ERLANG_DISTRIBUTION_PORT} inet_dist_listen_max ${ERLANG_DISTRIBUTION_PORT}"
;;
*)
;;
esac
# Set the release to work across nodes. If using the long name format like
# the one below (my_app@127.0.0.1), you need to also uncomment the
# RELEASE_DISTRIBUTION variable below. Must be "sname", "name" or "none".
export RELEASE_DISTRIBUTION=name
# Read current hostname from metadata server if available,
# this is to ensure that the hostname is correct in Google Cloud Compute.
#
# Having a valid DNS record is important to remotely connect to a running Erlang node.
if [[ "${RELEASE_HOST_DISCOVERY_METHOD}" == "gce_metadata" ]]; then
GCP_PROJECT_ID=$(curl "http://metadata.google.internal/computeMetadata/v1/project/project-id" -H "Metadata-Flavor: Google" -s)
GCP_INSTANCE_NAME=$(curl "http://metadata.google.internal/computeMetadata/v1/instance/name" -H "Metadata-Flavor: Google" -s)
GCP_INSTANCE_ZONE=$(curl "http://metadata.google.internal/computeMetadata/v1/instance/zone" -H "Metadata-Flavor: Google" -s | sed 's:.*/::')
RELEASE_HOSTNAME="$GCP_INSTANCE_NAME.$GCP_INSTANCE_ZONE.c.${GCP_PROJECT_ID}.internal"
else
RELEASE_HOSTNAME=${RELEASE_HOSTNAME:-127.0.0.1}
fi
# RELEASE_NAME is guaranteed to be set by the start script and defaults to 'firezone'
# set RELEASE_NAME in the environment to a unique value when running multiple instances
# in the same network namespace (i.e. with host networking in Podman)
export RELEASE_NODE=$RELEASE_NAME@127.0.0.1
export RELEASE_NODE=${RELEASE_NAME}@${RELEASE_HOSTNAME}
# Choices here are 'interactive' and 'embedded'. 'interactive' boots faster which
# prevents some runit process management edge cases at the expense of the application

View File

@@ -1,76 +1,12 @@
#!/bin/sh
: "${WIREGUARD_INTERFACE_NAME:=wg-firezone}"
# Note: we keep legacy default values for those variables to avoid breaking existing deployments,
# but they will go away in the 0.8.0 release.
: "${WIREGUARD_IPV4_ADDRESS:=10.3.2.1}"
: "${WIREGUARD_IPV4_ENABLED:=true}"
: "${WIREGUARD_IPV4_NETWORK:=10.3.2.0/24}"
: "${WIREGUARD_IPV6_ADDRESS:=fd00::3:2:1}"
: "${WIREGUARD_IPV6_ENABLED:=true}"
: "${WIREGUARD_IPV6_NETWORK:=fd00::3:2:0/120}"
: "${WIREGUARD_MTU:=1280}"
setup_interface()
{
if ! ip link show ${WIREGUARD_INTERFACE_NAME} &> /dev/null; then
echo "Creating WireGuard interface ${WIREGUARD_INTERFACE_NAME}"
ip link add ${WIREGUARD_INTERFACE_NAME} type wireguard
fi
if [ "$WIREGUARD_IPV4_ENABLED" = "true" ]; then
ip address replace ${WIREGUARD_IPV4_ADDRESS} dev ${WIREGUARD_INTERFACE_NAME}
fi
if [ "$WIREGUARD_IPV6_ENABLED" = "true" ]; then
ip -6 address replace ${WIREGUARD_IPV6_ADDRESS} dev ${WIREGUARD_INTERFACE_NAME}
fi
ip link set mtu ${WIREGUARD_MTU} up dev ${WIREGUARD_INTERFACE_NAME}
}
add_routes()
{
if [ "$WIREGUARD_IPV4_ENABLED" = "true" ]; then
if ! ip route show dev ${WIREGUARD_INTERFACE_NAME} | grep -q "${WIREGUARD_IPV4_NETWORK}"; then
echo "Adding route ${WIREGUARD_IPV4_NETWORK} for interface ${WIREGUARD_INTERFACE_NAME}"
ip route add ${WIREGUARD_IPV4_NETWORK} dev ${WIREGUARD_INTERFACE_NAME}
fi
fi
if [ "$WIREGUARD_IPV6_ENABLED" = "true" ]; then
if ! ip -6 route show dev ${WIREGUARD_INTERFACE_NAME} | grep -q "${WIREGUARD_IPV6_NETWORK}"; then
echo "Adding route ${WIREGUARD_IPV6_NETWORK} for interface ${WIREGUARD_INTERFACE_NAME}"
ip -6 route add ${WIREGUARD_IPV6_NETWORK} dev ${WIREGUARD_INTERFACE_NAME}
fi
fi
}
mkdir -p /var/firezone
setup_telemetry() {
[ -f /var/firezone/.tid ] || cat /proc/sys/kernel/random/uuid > /var/firezone/.tid
export TELEMETRY_ID=$(cat /var/firezone/.tid)
}
gen_cert() {
openssl req \
-x509 \
-sha256 \
-nodes \
-days 365 \
-newkey rsa:2048 \
-keyout /var/firezone/saml.key \
-out /var/firezone/saml.crt \
-subj "/C=US/ST=Denial/L=Firezone/O=Dis/CN=www.example.com"
}
setup_saml() {
([ -f /var/firezone/saml.key ] && [ -f /var/firezone/saml.crt ]) || gen_cert
}
setup_interface
add_routes
setup_saml
setup_telemetry
cd -P -- "$(dirname -- "$0")"

View File

@@ -1,4 +0,0 @@
#!/bin/sh
set -e
source "$(dirname -- "$0")/bootstrap"
exec ./firezone eval Domain.Release.create_admin_user

View File

@@ -1,26 +0,0 @@
#!/bin/sh
cat <<-EOF
VERSION=latest
EXTERNAL_URL=_CHANGE_ME_
DEFAULT_ADMIN_EMAIL=_CHANGE_ME_
DEFAULT_ADMIN_PASSWORD=$(openssl rand -base64 12)
GUARDIAN_SECRET_KEY=$(openssl rand -base64 48)
SECRET_KEY_BASE=$(openssl rand -base64 48)
LIVE_VIEW_SIGNING_SALT=$(openssl rand -base64 24)
COOKIE_SIGNING_SALT=$(openssl rand -base64 6)
COOKIE_ENCRYPTION_SALT=$(openssl rand -base64 6)
DATABASE_ENCRYPTION_KEY=$(openssl rand -base64 32)
DATABASE_PASSWORD=$(openssl rand -base64 12)
# The ability to change the IPv4 and IPv6 address pool will be removed
# in a future Firezone release in order to reduce the possible combinations
# of network configurations we need to handle.
#
# Due to the above, we recommend not changing these unless absolutely
# necessary.
WIREGUARD_IPV4_NETWORK=100.64.0.0/10
WIREGUARD_IPV4_ADDRESS=100.64.0.1
WIREGUARD_IPV6_NETWORK=fd00::/106
WIREGUARD_IPV6_ADDRESS=fd00::1
EOF

View File

@@ -1,4 +1,4 @@
#!/bin/sh
set -e
source "$(dirname -- "$0")/bootstrap"
exec ./firezone eval Domain.Release.migrate
exec ./${APPLICATION_NAME} eval Domain.Release.migrate

View File

@@ -1,4 +1,4 @@
#!/bin/sh
set -e
source "$(dirname -- "$0")/bootstrap"
exec ./firezone eval Domain.Release.create_api_token
exec ./${APPLICATION_NAME} eval Domain.Release.seed

View File

@@ -1,11 +1,5 @@
#!/bin/sh
set -e
source "$(dirname -- "$0")/bootstrap"
./firezone eval Domain.Release.migrate
if [ "$RESET_ADMIN_ON_BOOT" = "true" ]; then
./firezone eval Domain.Release.create_admin_user
fi
exec ./firezone start
./migrate
exec ./${APPLICATION_NAME} start

View File

@@ -1,11 +1,29 @@
## Customize flags given to the VM: http://erlang.org/doc/man/erl.html
## -mode/-name/-sname/-setcookie are configured via env vars, do not set them here
# Customize flags given to the VM: http://erlang.org/doc/man/erl.html
# -mode/-name/-sname/-setcookie are configured via env vars, do not set them here
## Number of dirty schedulers doing IO work (file, sockets, and others)
##+SDio 5
# Number of dirty schedulers doing IO work (file, sockets, and others)
#
# Interacting with the file system usually goes through the async pool.
# Increasing the pool increasing boot time but it will
# likely increase performance for the plug static layer.
+SDio 20
## Increase number of concurrent ports/sockets
##+Q 65536
# Double the default maximum ports value
# +Q 131072
## Tweak GC to run more often
##-env ERL_FULLSWEEP_AFTER 10
# Bind shedulers to CPU's (good when there are no other processes in OS that bind to processors)
# +stbt db
# Disable schedulers compaction of load (don't disable schedulers that is out of work)
#
# This is good for latency and also will keep our Google sustainable load discount higher.
# +scl false
# Enable port parallelism (good for parallelism, bad for latency)
# +spp true
# Doubles the distribution buffer busy limit (good for latency, increases memory consumption)
# +zdbbl 2048
# Tweak GC to run more often
#-env ERL_FULLSWEEP_AFTER 10

11
terraform/.gitignore vendored Normal file
View File

@@ -0,0 +1,11 @@
# Ignore Terraform state and temporary files
.terraform
*.tfstate.backup
terraform.tfstate.d
out.plan
# Don't ever commit these files to git
*.p12
*id_rsa*
*.key
*.csr

View File

@@ -0,0 +1,98 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/google" {
version = "4.66.0"
hashes = [
"h1:rN7iHu/t+Xps0D4RUM2ZkgLdXAY6ftey+o/5osP9jKE=",
"zh:141cddc714dec246957a47cb4103b34302222fc93a87b64de88116b22ebb0ea1",
"zh:276ebd75cb7c265d12b2c611a5f8d38fd6b892ef3edec1b845a934721db794e5",
"zh:574ae7b4808c1560b5a55a75ca2ad5d8ff6b5fb9dad6dffce3fae7ff8ccf78a9",
"zh:65309953f79827c23cc800fc093619a1e0e51a53e2429e9b04e537a11012f989",
"zh:6d67d3edea47767a873c38f1ff519d4450d8e1189a971bda7b0ffde9c9c65a86",
"zh:7fb116be869e30ee155c27f122d415f34d1d5de735d1fa9c4280cac71a42e8f4",
"zh:8a95ed92bb4547f4a40c953a6bd1db659b739f67adcacd798b11fafaec55ee67",
"zh:94f0179e84eb74823d8be4781b0a15f7f34ee39a7b158075504c882459f1ab23",
"zh:a58a7c5ace957cb4395f4b3bb11687e3a5c79362a744107f16623118cffc9370",
"zh:ab38b66f3c5c00df64c86fb4e47caef8cf451d5ed1f76845fd8b2c59628dc18a",
"zh:cc6bb1799e38912affc2a5b6f1c52b08f286d3751206532c04482b5ca0418eb6",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/google-beta" {
version = "4.66.0"
hashes = [
"h1:z8dx8fWyjpymy5nzJGhEq9IJ+K8vVaWPawZTOhL7NuU=",
"zh:253391f3b3cc9c6908b9fcbb8704e423071121fef476d5de824a187df76924a0",
"zh:2fb223b4fba1fcccb02cc3d0d5103fdf687a722b461828b3885043dd643f8efd",
"zh:6ca0094c20f4e9f25f11ab016f0b54fcfd62076ea30bb43d4c69d52633a0cfb8",
"zh:757ffff89a521073c8fa7f663cf3d9d20629d6e72b837b74c0221bcf34531cfd",
"zh:7d1459b9b3bd9e0dc887b9c476cfa58e2cbb7d56d5ffdeaec0fdd535a38373d4",
"zh:92ad7a5489cd3f51b69c0136095d94f3092c8c7e0d5c8befe1ff53c18761aade",
"zh:9f477e3dbaac8302160bfcfb9c064de72eb6776130a5671380066ac2e84ceae8",
"zh:d1580b146b16d56ccd18a1bbc4a4cac2607e37ed5baf6290cc929f5c025bf526",
"zh:d30d5b3ebd6c4123a53cef4c7c6606b06d27f1cb798b387b9a65b55f8c7b6b9f",
"zh:e3cdc92f111499702f7a807fe6cf8873714939efc05b774cfbde76b8a199da46",
"zh:f2cd44444b6d7760a8a6deaf54ca67ae3696d3f5640b107ad7be91dde8a60c25",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.1"
hashes = [
"h1:ydA0/SNRVB1o95btfshvYsmxA+jZFRZcvKzZSB+4S1M=",
"zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840",
"zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb",
"zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5",
"zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238",
"zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc",
"zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970",
"zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2",
"zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5",
"zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f",
"zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.5.1"
hashes = [
"h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
"zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
"zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
"zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
"zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
"zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
"zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
"zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
"zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
"zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
"zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.4"
constraints = "~> 4.0"
hashes = [
"h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=",
"zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
"zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
"zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
"zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
"zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
"zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
"zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
"zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
"zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
"zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
"zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

View File

@@ -0,0 +1,346 @@
# Allow Google Cloud and Let's Encrypt to issue cerificates for our domain
resource "google_dns_record_set" "dns-caa" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CAA"
name = module.google-cloud-dns.dns_name
rrdatas = [
"0 issue \"letsencrypt.org\"",
"0 issue \"pki.goog\"",
"0 iodef \"mailto:security@firezone.dev\""
]
ttl = 3600
}
# Website
resource "google_dns_record_set" "website-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = module.google-cloud-dns.dns_name
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
resource "google_dns_record_set" "website-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = module.google-cloud-dns.dns_name
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "website-www-redirect" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "www.${module.google-cloud-dns.dns_name}"
rrdatas = ["firez.one."]
ttl = 3600
}
# Our team's Firezone instance(s)
resource "google_dns_record_set" "dogfood" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "dogfood.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.56.50"]
ttl = 3600
}
resource "google_dns_record_set" "awsfz1" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "awsfz1.${module.google-cloud-dns.dns_name}"
rrdatas = ["ec2-52-200-241-107.compute-1.amazonaws.com."]
ttl = 3600
}
# Our MAIN discourse instance, do not change this!
resource "google_dns_record_set" "discourse" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "discourse.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.77.86.150"]
ttl = 300
}
# VPN-protected DNS records
resource "google_dns_record_set" "metabase" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "metabase.${module.google-cloud-dns.dns_name}"
rrdatas = ["10.5.96.5"]
ttl = 3600
}
# Wireguard test servers
resource "google_dns_record_set" "wg0" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "wg0.${module.google-cloud-dns.dns_name}"
rrdatas = ["54.151.104.17"]
ttl = 3600
}
resource "google_dns_record_set" "wg1" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "wg1.${module.google-cloud-dns.dns_name}"
rrdatas = ["54.183.57.227"]
ttl = 3600
}
resource "google_dns_record_set" "wg2" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "wg2.${module.google-cloud-dns.dns_name}"
rrdatas = ["54.177.212.45"]
ttl = 3600
}
# Connectivity check servers
resource "google_dns_record_set" "ping-backend" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "ping-backend.${module.google-cloud-dns.dns_name}"
rrdatas = ["149.28.197.67"]
ttl = 3600
}
resource "google_dns_record_set" "ping-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "ping.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "ping-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = "ping.${module.google-cloud-dns.dns_name}"
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
# Telemetry servers
resource "google_dns_record_set" "old-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "old-telemetry.${module.google-cloud-dns.dns_name}"
rrdatas = ["143.244.211.244"]
ttl = 3600
}
resource "google_dns_record_set" "t-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "t.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "t-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = "t.${module.google-cloud-dns.dns_name}"
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
resource "google_dns_record_set" "telemetry-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "telemetry.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "telemetry-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = "telemetry.${module.google-cloud-dns.dns_name}"
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
# Third-party services
## Sendgrid
resource "google_dns_record_set" "sendgrid-project" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "23539796.${module.google-cloud-dns.dns_name}"
rrdatas = ["sendgrid.net."]
ttl = 3600
}
resource "google_dns_record_set" "sendgrid-return-1" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "em3706.${module.google-cloud-dns.dns_name}"
rrdatas = ["u23539796.wl047.sendgrid.net."]
ttl = 3600
}
resource "google_dns_record_set" "sendgrid-return-2" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "url6320.${module.google-cloud-dns.dns_name}"
rrdatas = ["sendgrid.net."]
ttl = 3600
}
resource "google_dns_record_set" "sendgrid-domainkey1" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "s1._domainkey.${module.google-cloud-dns.dns_name}"
rrdatas = ["s1.domainkey.u23539796.wl047.sendgrid.net."]
ttl = 3600
}
resource "google_dns_record_set" "sendgrid-domainkey2" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "s2._domainkey.${module.google-cloud-dns.dns_name}"
rrdatas = ["s2.domainkey.u23539796.wl047.sendgrid.net."]
ttl = 3600
}
# Postmark
resource "google_dns_record_set" "postmark-dkim" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "20230606183724pm._domainkey.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"k=rsa;p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCGB97X54FpoXNFuuPpI2u18ymEHBvNGfaRVXn9KEKAnSIfayJ6V3m5C5WGmfv579gyvfdDm04NAVBMcxe6mkjZHsZwds7mPjOYmRlsCClcy6ITqHwPdGSqP0f4zes1AT3Sr1GCQkl/2CdjWzc7HLoyViPxcH17yJN8HlfCYg5waQIDAQAB"
]
}
resource "google_dns_record_set" "postmark-return" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "pm-bounces.${module.google-cloud-dns.dns_name}"
rrdatas = ["pm.mtasv.net."]
ttl = 3600
}
# Google Workspace
resource "google_dns_record_set" "google-mail" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = module.google-cloud-dns.dns_name
type = "MX"
ttl = 3600
rrdatas = [
"1 aspmx.l.google.com.",
"5 alt1.aspmx.l.google.com.",
"5 alt2.aspmx.l.google.com.",
"10 alt3.aspmx.l.google.com.",
"10 alt4.aspmx.l.google.com."
]
}
resource "google_dns_record_set" "google-dmark" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "_dmarc.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=DMARC1;\" \"p=reject;\" \"rua=mailto:dmarc-reports@firezone.dev;\" \"pct=100;\" \"adkim=s;\" \"aspf=s\""
]
}
resource "google_dns_record_set" "google-spf" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "try.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=spf1 include:_spf.google.com ~all\""
]
}
resource "google_dns_record_set" "google-dkim" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "20190728104345pm._domainkey.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=DKIM1;\" \"k=rsa;\" \"p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlrJHV7oQ63ebQcZ7fsvo+kjb1R9UrkpcdAOkOeN74qMjypQA+hKVV9F2aDM8hFeZoQH9zwIgQi+\" \"0TcDKRr1O7BklmbSkoMaqM5gH2OQTqQWwU0v49POHiL6yWKO4L68peJMMEVX+xFcjxHI5j6dkLMmv+Y6IxrzsqgeXx7V6cFt5V1G8lr0DWC+yzhPioda+S21dWl1GwPdLBbQb80GV1mpV2rGImzeiZVv4/4Et7w0M55Rfy\" \"m4JICJ89FmjC1Ua05CvrD4dvugWqfVoGuP3nyQXEqP8wgyoPuOZPrcEQXu+IlBrWMRBKv7slI571YnUznwoKlkourgB+7qC/zU8KQIDAQAB\""
]
}

View File

@@ -0,0 +1,208 @@
resource "google_monitoring_notification_channel" "slack" {
project = module.google-cloud-project.project.project_id
display_name = "Slack: #alerts-infra"
type = "slack"
labels = {
"channel_name" = var.slack_alerts_channel
}
sensitive_labels {
auth_token = var.slack_alerts_auth_token
}
}
resource "google_monitoring_uptime_check_config" "api-https" {
project = module.google-cloud-project.project.project_id
display_name = "api-https"
timeout = "60s"
http_check {
port = "443"
use_ssl = true
validate_ssl = true
request_method = "GET"
path = "/healthz"
accepted_response_status_codes {
status_class = "STATUS_CLASS_2XX"
}
}
monitored_resource {
type = "uptime_url"
labels = {
project_id = module.google-cloud-project.project.project_id
host = module.api.host
}
}
content_matchers {
content = "\"ok\""
matcher = "MATCHES_JSON_PATH"
json_path_matcher {
json_path = "$.status"
json_matcher = "EXACT_MATCH"
}
}
checker_type = "STATIC_IP_CHECKERS"
}
resource "google_monitoring_uptime_check_config" "web-https" {
project = module.google-cloud-project.project.project_id
display_name = "api-https"
timeout = "60s"
http_check {
port = "443"
use_ssl = true
validate_ssl = true
request_method = "GET"
path = "/healthz"
accepted_response_status_codes {
status_class = "STATUS_CLASS_2XX"
}
}
monitored_resource {
type = "uptime_url"
labels = {
project_id = module.google-cloud-project.project.project_id
host = module.web.host
}
}
content_matchers {
content = "\"ok\""
matcher = "MATCHES_JSON_PATH"
json_path_matcher {
json_path = "$.status"
json_matcher = "EXACT_MATCH"
}
}
checker_type = "STATIC_IP_CHECKERS"
}
resource "google_monitoring_alert_policy" "instances_high_cpu_policy" {
project = module.google-cloud-project.project.project_id
display_name = "High Instance CPU utilization"
combiner = "OR"
notification_channels = [
google_monitoring_notification_channel.slack.name
]
conditions {
display_name = "VM Instance - CPU utilization"
condition_threshold {
filter = "resource.type = \"gce_instance\" AND metric.type = \"compute.googleapis.com/instance/cpu/utilization\" AND metadata.user_labels.managed_by = \"terraform\""
comparison = "COMPARISON_GT"
threshold_value = 0.8
duration = "60s"
trigger {
count = 1
}
aggregations {
alignment_period = "60s"
cross_series_reducer = "REDUCE_NONE"
per_series_aligner = "ALIGN_MEAN"
}
}
}
alert_strategy {
auto_close = "28800s"
}
}
resource "google_monitoring_alert_policy" "sql_high_cpu_policy" {
project = module.google-cloud-project.project.project_id
display_name = "High Cloud SQL CPU utilization"
combiner = "OR"
notification_channels = [
google_monitoring_notification_channel.slack.name
]
conditions {
display_name = "Cloud SQL Database - CPU utilization"
condition_threshold {
filter = "resource.type = \"cloudsql_database\" AND metric.type = \"cloudsql.googleapis.com/database/cpu/utilization\""
comparison = "COMPARISON_GT"
threshold_value = 0.8
duration = "60s"
trigger {
count = 1
}
aggregations {
alignment_period = "60s"
cross_series_reducer = "REDUCE_NONE"
per_series_aligner = "ALIGN_MEAN"
}
}
}
alert_strategy {
auto_close = "28800s"
}
}
resource "google_monitoring_alert_policy" "sql_disk_utiliziation_policy" {
project = module.google-cloud-project.project.project_id
display_name = "High Cloud SQL disk utilization"
combiner = "OR"
notification_channels = [
google_monitoring_notification_channel.slack.name
]
conditions {
display_name = "Cloud SQL Database - Disk utilization"
condition_threshold {
filter = "resource.type = \"cloudsql_database\" AND metric.type = \"cloudsql.googleapis.com/database/disk/utilization\""
comparison = "COMPARISON_GT"
threshold_value = 0.8
duration = "300s"
trigger {
count = 1
}
aggregations {
alignment_period = "300s"
cross_series_reducer = "REDUCE_NONE"
per_series_aligner = "ALIGN_MEAN"
}
}
}
alert_strategy {
auto_close = "28800s"
}
}

View File

@@ -0,0 +1,559 @@
locals {
project_owners = [
"a@firezone.dev",
"gabriel@firezone.dev",
"jamil@firezone.dev"
]
region = "us-east1"
availability_zone = "us-east1-d"
tld = "firez.one"
}
terraform {
cloud {
organization = "firezone"
hostname = "app.terraform.io"
workspaces {
name = "staging"
}
}
}
provider "random" {}
provider "null" {}
provider "google" {}
provider "google-beta" {}
# Create the project
module "google-cloud-project" {
source = "../../modules/google-cloud-project"
id = "firezone-staging"
name = "Staging Environment"
organization_id = "335836213177"
billing_account_id = "01DFC9-3D6951-579BE1"
}
# Grant owner access to the project
resource "google_project_iam_binding" "project_owners" {
project = module.google-cloud-project.project.project_id
role = "roles/owner"
members = formatlist("user:%s", local.project_owners)
}
# Grant GitHub Actions ability to write to the container registry
module "google-artifact-registry" {
source = "../../modules/google-artifact-registry"
project_id = module.google-cloud-project.project.project_id
project_name = module.google-cloud-project.name
region = local.region
writers = [
# This is GitHub Actions service account configured manually
# in the project github-iam-387915
"serviceAccount:github-actions@github-iam-387915.iam.gserviceaccount.com"
]
}
# Create a VPC
module "google-cloud-vpc" {
source = "../../modules/google-cloud-vpc"
project_id = module.google-cloud-project.project.project_id
name = module.google-cloud-project.project.project_id
}
# Enable Google Cloud Storage for the project
module "google-cloud-storage" {
source = "../../modules/google-cloud-storage"
project_id = module.google-cloud-project.project.project_id
}
# Create DNS managed zone
module "google-cloud-dns" {
source = "../../modules/google-cloud-dns"
project_id = module.google-cloud-project.project.project_id
tld = local.tld
dnssec_enabled = false
}
# Create the Cloud SQL database
module "google-cloud-sql" {
source = "../../modules/google-cloud-sql"
project_id = module.google-cloud-project.project.project_id
network = module.google-cloud-vpc.id
compute_region = local.region
compute_availability_zone = local.availability_zone
compute_instance_cpu_count = "2"
compute_instance_memory_size = "7680"
database_name = module.google-cloud-project.project.project_id
database_highly_available = false
database_backups_enabled = false
database_read_replica_locations = []
database_flags = {
# Increase the connections count a bit, but we need to set it to Ecto ((pool_count * pool_size) + 50)
"max_connections" = "500"
# Sets minimum treshold on dead tuples to prevent autovaccum running too often on small tables
# where 5% is less than 50 records
"autovacuum_vacuum_threshold" = "50"
# Trigger autovaccum for every 5% of the table changed
"autovacuum_vacuum_scale_factor" = "0.05"
"autovacuum_analyze_scale_factor" = "0.05"
# Give autovacuum 4x the cost limit to prevent it from never finishing
# on big tables
"autovacuum_vacuum_cost_limit" = "800"
# Give hash joins a bit more memory to work with
# "hash_mem_multiplier" = "3"
# This is standard value for work_mem
"work_mem" = "4096"
}
}
# Generate secrets
resource "random_password" "erlang_cluster_cookie" {
length = 64
special = false
}
resource "random_password" "auth_token_key_base" {
length = 64
special = false
}
resource "random_password" "auth_token_salt" {
length = 32
special = false
}
resource "random_password" "relays_auth_token_key_base" {
length = 64
special = false
}
resource "random_password" "relays_auth_token_salt" {
length = 32
special = false
}
resource "random_password" "gateways_auth_token_key_base" {
length = 64
special = false
}
resource "random_password" "gateways_auth_token_salt" {
length = 32
special = false
}
resource "random_password" "secret_key_base" {
length = 64
special = false
}
resource "random_password" "live_view_signing_salt" {
length = 32
special = false
}
resource "random_password" "cookie_signing_salt" {
length = 32
special = false
}
resource "random_password" "cookie_encryption_salt" {
length = 32
special = false
}
# # Deploy nginx to the compute for HTTPS termination
# # module "nginx" {
# # source = "../../modules/nginx"
# # project_id = module.google-cloud-project.project.project_id
# # }
# Create VPC subnet for the application instances,
# we want all apps to be in the same VPC in order for Erlang clustering to work
resource "google_compute_subnetwork" "apps" {
project = module.google-cloud-project.project.project_id
name = "app"
ip_cidr_range = "10.128.0.0/20"
region = local.region
network = module.google-cloud-vpc.id
private_ip_google_access = true
}
# Deploy the web app to the GCE
resource "random_password" "web_db_password" {
length = 16
}
resource "google_sql_user" "web" {
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
name = "web"
password = random_password.web_db_password.result
}
resource "google_sql_database" "firezone" {
project = module.google-cloud-project.project.project_id
name = "firezone"
instance = module.google-cloud-sql.master_instance_name
}
locals {
target_tags = ["app-web", "app-api"]
cluster = {
name = "firezone"
cookie = base64encode(random_password.erlang_cluster_cookie.result)
}
shared_application_environment_variables = [
# Database
{
name = "DATABASE_HOST"
value = module.google-cloud-sql.master_instance_ip_address
},
{
name = "DATABASE_NAME"
value = google_sql_database.firezone.name
},
{
name = "DATABASE_USER"
value = google_sql_user.web.name
},
{
name = "DATABASE_PASSWORD"
value = google_sql_user.web.password
},
# Secrets
{
name = "SECRET_KEY_BASE"
value = random_password.secret_key_base.result
},
{
name = "AUTH_TOKEN_KEY_BASE"
value = base64encode(random_password.auth_token_key_base.result)
},
{
name = "AUTH_TOKEN_SALT"
value = base64encode(random_password.auth_token_salt.result)
},
{
name = "RELAYS_AUTH_TOKEN_KEY_BASE"
value = base64encode(random_password.relays_auth_token_key_base.result)
},
{
name = "RELAYS_AUTH_TOKEN_SALT"
value = base64encode(random_password.relays_auth_token_salt.result)
},
{
name = "GATEWAYS_AUTH_TOKEN_KEY_BASE"
value = base64encode(random_password.gateways_auth_token_key_base.result)
},
{
name = "GATEWAYS_AUTH_TOKEN_SALT"
value = base64encode(random_password.gateways_auth_token_salt.result)
},
{
name = "SECRET_KEY_BASE"
value = base64encode(random_password.secret_key_base.result)
},
{
name = "LIVE_VIEW_SIGNING_SALT"
value = base64encode(random_password.live_view_signing_salt.result)
},
{
name = "COOKIE_SIGNING_SALT"
value = base64encode(random_password.cookie_signing_salt.result)
},
{
name = "COOKIE_ENCRYPTION_SALT"
value = base64encode(random_password.cookie_encryption_salt.result)
},
# Erlang
{
name = "ERLANG_DISTRIBUTION_PORT"
value = "9000"
},
{
name = "CLUSTER_NAME"
value = local.cluster.name
},
{
name = "ERLANG_CLUSTER_ADAPTER"
value = "Elixir.Domain.Cluster.GoogleComputeLabelsStrategy"
},
{
name = "ERLANG_CLUSTER_ADAPTER_CONFIG"
value = jsonencode({
project_id = module.google-cloud-project.project.project_id
cluster_name = local.cluster.name
cluster_name_label = "cluster_name"
node_name_label = "application"
polling_interval_ms = 7000
})
},
{
name = "RELEASE_COOKIE"
value = local.cluster.cookie
},
# Auth
{
name = "AUTH_PROVIDER_ADAPTERS"
value = "email,openid_connect,token"
},
# Telemetry
{
name = "TELEMETRY_ENABLED"
value = "false"
},
# OpenTelemetry requires an exporter to be set on every node
# {
# name = "OTLP_ENDPOINT"
# value = "http://0.0.0.0:55680",
# },
# Emails
{
name = "OUTBOUND_EMAIL_ADAPTER"
value = "Elixir.Swoosh.Adapters.Postmark"
},
{
name = "OUTBOUND_EMAIL_ADAPTER_OPTS"
value = "{\"api_key\":\"${var.postmark_server_api_token}\"}"
}
]
}
module "web" {
source = "../../modules/elixir-app"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n1-standard-1"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "web"
image_tag = var.web_image_tag
scaling_horizontal_replicas = 2
observability_log_level = "debug"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "web"
application_version = "0-0-1"
application_dns_tld = "app.${local.tld}"
application_ports = [
{
name = "http"
protocol = "TCP"
port = 80
health_check = {
initial_delay_sec = 30
check_interval_sec = 5
timeout_sec = 5
healthy_threshold = 1
unhealthy_threshold = 2
http_health_check = {}
}
}
]
application_environment_variables = concat([
# Web Server
{
name = "EXTERNAL_URL"
value = "https://app.${local.tld}"
},
{
name = "PHOENIX_HTTP_WEB_PORT"
value = "80"
}
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
}
}
module "api" {
source = "../../modules/elixir-app"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n1-standard-1"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "api"
image_tag = var.api_image_tag
scaling_horizontal_replicas = 2
observability_log_level = "debug"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "api"
application_version = "0-0-1"
application_dns_tld = "api.${local.tld}"
application_ports = [
{
name = "http"
protocol = "TCP"
port = 80
health_check = {
initial_delay_sec = 30
check_interval_sec = 5
timeout_sec = 5
healthy_threshold = 1
unhealthy_threshold = 2
tcp_health_check = {}
}
}
]
application_environment_variables = concat([
# Web Server
{
name = "EXTERNAL_URL"
value = "https://api.${local.tld}"
},
{
name = "PHOENIX_HTTP_API_PORT"
value = "80"
},
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
}
}
# Erlang Cluster
## Allow traffic between Elixir apps for Erlang clustering
resource "google_compute_firewall" "erlang-distribution" {
project = module.google-cloud-project.project.project_id
name = "erlang-distribution"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [4369, 9000]
}
allow {
protocol = "udp"
ports = [4369, 9000]
}
source_ranges = [google_compute_subnetwork.apps.ip_cidr_range]
target_tags = local.target_tags
}
## Allow service account to list running instances
resource "google_project_iam_custom_role" "erlang-discovery" {
project = module.google-cloud-project.project.project_id
title = "Read list of Compute instances"
description = "This role is used for Erlang Cluster discovery and allows to list running instances."
role_id = "compute.list_instances"
permissions = [
"compute.instances.list",
"compute.zones.list"
]
}
resource "google_project_iam_member" "application" {
for_each = toset([
module.api.service_account.email,
module.web.service_account.email,
])
project = module.google-cloud-project.project.project_id
role = "projects/${module.google-cloud-project.project.project_id}/roles/${google_project_iam_custom_role.erlang-discovery.role_id}"
member = "serviceAccount:${each.value}"
}
# Enable SSH on staging
resource "google_compute_firewall" "ssh" {
project = module.google-cloud-project.project.project_id
name = "staging-ssh"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [22]
}
allow {
protocol = "udp"
ports = [22]
}
allow {
protocol = "sctp"
ports = [22]
}
source_ranges = ["0.0.0.0/0"]
target_tags = local.target_tags
}

View File

@@ -0,0 +1,29 @@
## Router and Cloud NAT are required for instances without external IP address
resource "google_compute_router" "default" {
project = module.google-cloud-project.project.project_id
name = module.google-cloud-vpc.name
network = module.google-cloud-vpc.self_link
region = local.region
}
resource "google_compute_router_nat" "application" {
project = module.google-cloud-project.project.project_id
name = module.google-cloud-vpc.name
region = local.region
router = google_compute_router.default.name
nat_ip_allocate_option = "AUTO_ONLY"
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
enable_dynamic_port_allocation = false
min_ports_per_vm = 32
udp_idle_timeout_sec = 30
icmp_idle_timeout_sec = 30
tcp_established_idle_timeout_sec = 1200
tcp_transitory_idle_timeout_sec = 30
tcp_time_wait_timeout_sec = 120
}

View File

@@ -0,0 +1,3 @@
output "dns_name_servers" {
value = module.google-cloud-dns.name_servers
}

View File

@@ -0,0 +1,24 @@
variable "api_image_tag" {
type = string
description = "Image tag for the api service"
}
variable "web_image_tag" {
type = string
description = "Image tag for the web service"
}
variable "slack_alerts_channel" {
type = string
description = "Slack channel which will receive monitoring alerts"
default = "#alerts-infra"
}
variable "slack_alerts_auth_token" {
type = string
description = "Slack auth token for the infra alerts channel"
}
variable "postmark_server_api_token" {
type = string
}

View File

@@ -0,0 +1,2 @@
api_image_tag = "bbd9dcdd272e0bba193833421e8280ac88b5feae"
web_image_tag = "bbd9dcdd272e0bba193833421e8280ac88b5feae"

View File

@@ -0,0 +1,30 @@
terraform {
required_version = "1.4.6"
required_providers {
random = {
source = "hashicorp/random"
version = "~> 3.5"
}
null = {
source = "hashicorp/null"
version = "~> 3.2"
}
google = {
source = "hashicorp/google"
version = "~> 4.66"
}
google-beta = {
source = "hashicorp/google-beta"
version = "~> 4.66"
}
tls = {
source = "hashicorp/tls"
version = "~> 4.0"
}
}
}

View File

@@ -0,0 +1,621 @@
locals {
application_name = var.application_name != null ? var.application_name : var.image
application_version = var.application_version != null ? var.application_version : var.image_tag
application_labels = merge({
managed_by = "terraform"
# Note: this labels are used to fetch a release name for Erlang Cluster,
# and filter then by version
application = local.application_name
version = local.application_version
}, var.application_labels)
application_environment_variables = concat([
{
name = "RELEASE_HOST_DISCOVERY_METHOD"
value = "gce_metadata"
}
], var.application_environment_variables)
application_ports_by_name = { for port in var.application_ports : port.name => port }
google_load_balancer_ip_ranges = [
"130.211.0.0/22",
"35.191.0.0/16",
]
google_health_check_ip_ranges = [
"130.211.0.0/22",
"35.191.0.0/16"
]
}
# Fetch most recent COS image
data "google_compute_image" "coreos" {
family = "cos-105-lts"
project = "cos-cloud"
}
# Create IAM role for the application instances
resource "google_service_account" "application" {
project = var.project_id
account_id = "app-${local.application_name}"
display_name = "${local.application_name} app"
description = "Service account for ${local.application_name} application instances."
}
## Allow application service account to pull images from the container registry
resource "google_project_iam_member" "artifacts" {
project = var.project_id
role = "roles/artifactregistry.reader"
member = "serviceAccount:${google_service_account.application.email}"
}
## Allow fluentbit to injest logs
resource "google_project_iam_member" "logs" {
project = var.project_id
role = "roles/logging.logWriter"
member = "serviceAccount:${google_service_account.application.email}"
}
## Allow reporting application errors
resource "google_project_iam_member" "errors" {
project = var.project_id
role = "roles/errorreporting.writer"
member = "serviceAccount:${google_service_account.application.email}"
}
## Allow reporting metrics
resource "google_project_iam_member" "metrics" {
project = var.project_id
role = "roles/monitoring.metricWriter"
member = "serviceAccount:${google_service_account.application.email}"
}
## Allow reporting metrics
resource "google_project_iam_member" "service_management" {
project = var.project_id
role = "roles/servicemanagement.reporter"
member = "serviceAccount:${google_service_account.application.email}"
}
## Allow appending traces
resource "google_project_iam_member" "cloudtrace" {
project = var.project_id
role = "roles/cloudtrace.agent"
member = "serviceAccount:${google_service_account.application.email}"
}
# Deploy the app
resource "google_compute_instance_template" "application" {
project = var.project_id
name_prefix = "${local.application_name}-"
description = "This template is used to create ${local.application_name} instances."
machine_type = var.compute_instance_type
region = var.compute_instance_region
can_ip_forward = false
tags = ["app-${local.application_name}"]
labels = merge({
container-vm = data.google_compute_image.coreos.name
}, local.application_labels)
scheduling {
automatic_restart = true
on_host_maintenance = "MIGRATE"
provisioning_model = "STANDARD"
}
disk {
source_image = data.google_compute_image.coreos.self_link
auto_delete = true
boot = true
}
network_interface {
subnetwork = var.vpc_subnetwork
}
service_account {
email = google_service_account.application.email
scopes = [
# Those are copying gke-default scopes
"storage-ro",
"logging-write",
"monitoring",
"service-management",
"service-control",
"trace",
# Required to discover the other instances in the Erlang Cluster
"compute-ro",
]
}
metadata = merge({
gce-container-declaration = yamlencode({
spec = {
containers = [{
name = local.application_name != null ? local.application_name : var.image
image = "${var.container_registry}/${var.image_repo}/${var.image}:${var.image_tag}"
env = local.application_environment_variables
}]
volumes = []
restartPolicy = "Always"
}
})
# Enable FluentBit agent for logging, which will be default one from COS 109
google-logging-enabled = "true"
google-logging-use-fluentbit = "true"
# Report health-related metrics to Cloud Monitoring
google-monitoring-enabled = "true"
})
depends_on = [
google_project_service.compute,
google_project_service.pubsub,
google_project_service.bigquery,
google_project_service.container,
google_project_service.stackdriver,
google_project_service.logging,
google_project_service.monitoring,
google_project_service.clouddebugger,
google_project_service.cloudprofiler,
google_project_service.cloudtrace,
google_project_service.servicenetworking,
google_project_iam_member.artifacts,
google_project_iam_member.logs,
google_project_iam_member.errors,
google_project_iam_member.metrics,
google_project_iam_member.service_management,
google_project_iam_member.cloudtrace,
]
lifecycle {
create_before_destroy = true
}
}
# Create health checks for the application ports
resource "google_compute_health_check" "port" {
for_each = { for port in var.application_ports : port.name => port if try(port.health_check, null) != null }
project = var.project_id
name = "${local.application_name}-${each.key}"
check_interval_sec = each.value.health_check.check_interval_sec != null ? each.value.health_check.check_interval_sec : 5
timeout_sec = each.value.health_check.timeout_sec != null ? each.value.health_check.timeout_sec : 5
healthy_threshold = each.value.health_check.healthy_threshold != null ? each.value.health_check.healthy_threshold : 2
unhealthy_threshold = each.value.health_check.unhealthy_threshold != null ? each.value.health_check.unhealthy_threshold : 2
log_config {
enable = false
}
dynamic "tcp_health_check" {
for_each = try(each.value.health_check.tcp_health_check, null)[*]
content {
port = each.value.port
response = lookup(tcp_health_check.value, "response", null)
}
}
dynamic "http_health_check" {
for_each = try(each.value.health_check.http_health_check, null)[*]
content {
port = each.value.port
host = lookup(http_health_check.value, "host", null)
request_path = lookup(http_health_check.value, "request_path", null)
response = lookup(http_health_check.value, "response", null)
}
}
dynamic "https_health_check" {
for_each = try(each.value.health_check.https_health_check, null)[*]
content {
port = each.value.port
host = lookup(https_health_check.value, "host", null)
request_path = lookup(https_health_check.value, "request_path", null)
response = lookup(http_health_check.value, "response", null)
}
}
}
# Use template to deploy zonal instance group
resource "google_compute_region_instance_group_manager" "application" {
project = var.project_id
name = "${local.application_name}-group"
base_instance_name = local.application_name
region = var.compute_instance_region
distribution_policy_zones = var.compute_instance_availability_zones
target_size = var.scaling_horizontal_replicas
wait_for_instances = true
wait_for_instances_status = "STABLE"
version {
instance_template = google_compute_instance_template.application.self_link
}
dynamic "named_port" {
for_each = var.application_ports
content {
name = named_port.value.name
port = named_port.value.port
}
}
dynamic "auto_healing_policies" {
for_each = try([google_compute_health_check.port["http"].self_link], [])
content {
initial_delay_sec = local.application_ports_by_name["http"].health_check.initial_delay_sec
health_check = auto_healing_policies.value
}
}
update_policy {
type = "PROACTIVE"
minimal_action = "REPLACE"
max_unavailable_fixed = 1
max_surge_fixed = max(1, var.scaling_horizontal_replicas - 1)
}
depends_on = [
google_compute_instance_template.application
]
}
# Define a security policy which allows to filter traffic by IP address,
# an edge security policy can also detect and block common types of web attacks
resource "google_compute_security_policy" "default" {
project = var.project_id
name = local.application_name
rule {
action = "allow"
priority = "2147483647"
match {
versioned_expr = "SRC_IPS_V1"
config {
src_ip_ranges = ["*"]
}
}
description = "default allow rule"
}
}
# Expose the application ports via HTTP(S) load balancer with a managed SSL certificate and a static IP address
resource "google_compute_backend_service" "default" {
for_each = local.application_ports_by_name
project = var.project_id
name = "${local.application_name}-backend-${each.value.name}"
load_balancing_scheme = "EXTERNAL"
port_name = each.value.name
protocol = "HTTP"
timeout_sec = 10
connection_draining_timeout_sec = 120
enable_cdn = false
compression_mode = "DISABLED"
custom_request_headers = []
custom_response_headers = []
session_affinity = "CLIENT_IP"
health_checks = try([google_compute_health_check.port[each.key].self_link], null)
security_policy = google_compute_security_policy.default.self_link
backend {
balancing_mode = "UTILIZATION"
capacity_scaler = 1
group = google_compute_region_instance_group_manager.application.instance_group
# Do not send traffic to nodes that have CPU load higher than 80%
# max_utilization = 0.8
}
log_config {
enable = false
sample_rate = "1.0"
}
depends_on = [
google_compute_region_instance_group_manager.application,
google_compute_health_check.port,
]
}
## Create a SSL policy
resource "google_compute_ssl_policy" "application" {
project = var.project_id
name = local.application_name
min_tls_version = "TLS_1_2"
profile = "MODERN"
}
## Create a managed SSL certificate
resource "google_compute_managed_ssl_certificate" "default" {
project = var.project_id
name = "${local.application_name}-mig-lb-cert"
type = "MANAGED"
managed {
domains = [
var.application_dns_tld,
]
}
}
## Create URL map for the application
resource "google_compute_url_map" "default" {
project = var.project_id
name = local.application_name
default_service = google_compute_backend_service.default["http"].self_link
}
# Set up HTTP(s) proxies and redirect HTTP to HTTPS
resource "google_compute_url_map" "https_redirect" {
project = var.project_id
name = "${local.application_name}-https-redirect"
default_url_redirect {
https_redirect = true
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
strip_query = false
}
}
resource "google_compute_target_http_proxy" "default" {
project = var.project_id
name = "${local.application_name}-http"
url_map = google_compute_url_map.https_redirect.self_link
}
resource "google_compute_target_https_proxy" "default" {
project = var.project_id
name = "${local.application_name}-https"
url_map = google_compute_url_map.default.self_link
ssl_certificates = [google_compute_managed_ssl_certificate.default.self_link]
ssl_policy = google_compute_ssl_policy.application.self_link
quic_override = "NONE"
}
# Allocate global addresses for the load balancer and set up forwarding rules
## IPv4
resource "google_compute_global_address" "ipv4" {
project = var.project_id
name = "${local.application_name}-ipv4"
ip_version = "IPV4"
}
resource "google_compute_global_forwarding_rule" "http" {
project = var.project_id
name = local.application_name
labels = local.application_labels
target = google_compute_target_http_proxy.default.self_link
ip_address = google_compute_global_address.ipv4.address
port_range = "80"
load_balancing_scheme = "EXTERNAL"
}
resource "google_compute_global_forwarding_rule" "https" {
project = var.project_id
name = "${local.application_name}-https"
labels = local.application_labels
target = google_compute_target_https_proxy.default.self_link
ip_address = google_compute_global_address.ipv4.address
port_range = "443"
load_balancing_scheme = "EXTERNAL"
}
## IPv6
resource "google_compute_global_address" "ipv6" {
project = var.project_id
name = "${local.application_name}-ipv6"
ip_version = "IPV6"
}
resource "google_compute_global_forwarding_rule" "http_ipv6" {
project = var.project_id
name = "${local.application_name}-ipv6-http"
labels = local.application_labels
target = google_compute_target_http_proxy.default.self_link
ip_address = google_compute_global_address.ipv6.address
port_range = "80"
load_balancing_scheme = "EXTERNAL"
}
resource "google_compute_global_forwarding_rule" "https_ipv6" {
project = var.project_id
name = "${local.application_name}-ipv6-https"
labels = local.application_labels
target = google_compute_target_https_proxy.default.self_link
ip_address = google_compute_global_address.ipv6.address
port_range = "443"
load_balancing_scheme = "EXTERNAL"
}
## Open HTTP(S) ports for the load balancer
resource "google_compute_firewall" "http" {
project = var.project_id
name = "${local.application_name}-firewall-lb-to-instances"
network = var.vpc_network
source_ranges = local.google_load_balancer_ip_ranges
target_tags = ["app-${local.application_name}"]
dynamic "allow" {
for_each = var.application_ports
content {
protocol = allow.value.protocol
ports = [allow.value.port]
}
}
# We also enable UDP to allow QUIC if it's enabled
dynamic "allow" {
for_each = var.application_ports
content {
protocol = "udp"
ports = [allow.value.port]
}
}
}
## Open HTTP(S) ports for the health checks
resource "google_compute_firewall" "http-health-checks" {
project = var.project_id
name = "${local.application_name}-healthcheck"
network = var.vpc_network
source_ranges = local.google_health_check_ip_ranges
target_tags = ["app-${local.application_name}"]
dynamic "allow" {
for_each = var.application_ports
content {
protocol = allow.value.protocol
ports = [allow.value.port]
}
}
}
# Allow outbound traffic
resource "google_compute_firewall" "egress-ipv4" {
project = var.project_id
name = "${local.application_name}-egress-ipv4"
network = var.vpc_network
direction = "EGRESS"
target_tags = ["app-${local.application_name}"]
destination_ranges = ["0.0.0.0/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "egress-ipv6" {
project = var.project_id
name = "${local.application_name}-egress-ipv6"
network = var.vpc_network
direction = "EGRESS"
target_tags = ["app-${local.application_name}"]
destination_ranges = ["::/0"]
allow {
protocol = "all"
}
}
# Create DNS records for the application
resource "google_dns_record_set" "application-ipv4" {
project = var.project_id
name = "${var.application_dns_tld}."
type = "A"
ttl = 300
managed_zone = var.dns_managed_zone_name
rrdatas = [
google_compute_global_address.ipv4.address
]
}
resource "google_dns_record_set" "application-ipv6" {
project = var.project_id
name = "${var.application_dns_tld}."
type = "AAAA"
ttl = 300
managed_zone = var.dns_managed_zone_name
rrdatas = [
google_compute_global_address.ipv6.address
]
}

View File

@@ -0,0 +1,15 @@
output "service_account" {
value = google_service_account.application
}
output "target_tags" {
value = ["app-${local.application_name}"]
}
output "instance_group" {
value = google_compute_region_instance_group_manager.application
}
output "host" {
value = var.application_dns_tld
}

View File

@@ -0,0 +1,93 @@
resource "google_project_service" "compute" {
project = var.project_id
service = "compute.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "pubsub" {
project = var.project_id
service = "pubsub.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "bigquery" {
project = var.project_id
service = "bigquery.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "container" {
project = var.project_id
service = "container.googleapis.com"
depends_on = [
google_project_service.compute,
google_project_service.pubsub,
google_project_service.bigquery,
]
disable_on_destroy = false
}
resource "google_project_service" "stackdriver" {
project = var.project_id
service = "stackdriver.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "logging" {
project = var.project_id
service = "logging.googleapis.com"
disable_on_destroy = false
depends_on = [google_project_service.stackdriver]
}
resource "google_project_service" "monitoring" {
project = var.project_id
service = "monitoring.googleapis.com"
disable_on_destroy = false
depends_on = [google_project_service.stackdriver]
}
resource "google_project_service" "clouddebugger" {
project = var.project_id
service = "clouddebugger.googleapis.com"
disable_on_destroy = false
depends_on = [google_project_service.stackdriver]
}
resource "google_project_service" "cloudprofiler" {
project = var.project_id
service = "cloudprofiler.googleapis.com"
disable_on_destroy = false
depends_on = [google_project_service.stackdriver]
}
resource "google_project_service" "cloudtrace" {
project = var.project_id
service = "cloudtrace.googleapis.com"
disable_on_destroy = false
depends_on = [google_project_service.stackdriver]
}
resource "google_project_service" "servicenetworking" {
project = var.project_id
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}

View File

@@ -0,0 +1,269 @@
variable "project_id" {
type = string
description = "ID of a Google Cloud Project"
}
################################################################################
## Compute
################################################################################
variable "compute_instance_type" {
type = string
description = "Type of the instance."
default = "n1-standard-1"
}
variable "compute_instance_region" {
type = string
description = "Region which would be used to create compute resources."
}
variable "compute_instance_availability_zones" {
type = list(string)
description = "List of availability zone for the VMs. It must be in the same region as `var.compute_instance_region`."
}
################################################################################
## VPC
################################################################################
variable "vpc_network" {
description = "ID of a VPC which will be used to deploy the application."
type = string
}
variable "vpc_subnetwork" {
description = "ID of a VPC subnet which will be used to deploy the application."
type = string
}
################################################################################
## Container Registry
################################################################################
variable "container_registry" {
type = string
nullable = false
description = "Container registry URL to pull the image from."
}
# variable "container_registry_api_key" {
# type = string
# nullable = false
# }
# variable "container_registry_user_name" {
# type = string
# nullable = false
# }
################################################################################
## Container Image
################################################################################
variable "image_repo" {
type = string
nullable = false
description = "Repo of a container image used to deploy the application."
}
variable "image" {
type = string
nullable = false
description = "Container image used to deploy the application."
}
variable "image_tag" {
type = string
nullable = false
description = "Container image used to deploy the application."
}
################################################################################
## Scaling
################################################################################
variable "scaling_horizontal_replicas" {
type = number
nullable = false
default = 1
validation {
condition = var.scaling_horizontal_replicas > 0
error_message = "Number of replicas should be greater or equal to 0."
}
description = "Number of replicas in an instance group."
}
################################################################################
## Observability
################################################################################
variable "observability_log_level" {
type = string
nullable = false
default = "info"
validation {
condition = (
contains(
["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
var.observability_log_level
)
)
error_message = "Only Elixir Logger log levels are accepted."
}
description = "Sets LOG_LEVEL environment variable which applications should use to configure Elixir Logger. Default: 'info'."
}
################################################################################
## Erlang
################################################################################
variable "erlang_release_name" {
type = string
nullable = true
default = null
description = <<EOT
Name of an Erlang/Elixir release which should correspond to shell executable name which is used to run the container.
By default an `var.image_tag` with `-` replaced to `_` would be used.
EOT
}
variable "erlang_cluster_cookie" {
type = string
nullable = false
description = "Value of the Erlang cluster cookie."
}
variable "erlang_cluster_disterl_port" {
type = number
nullable = false
default = 10000
description = <<EOT
Sets the `LISTEN_DIST_MIN` and `LISTEN_DIST_MAX` environment variables that can be used by setting
`ELIXIR_ERL_OPTIONS="-kernel inet_dist_listen_min $\{LISTEN_DIST_MIN} inet_dist_listen_max $\{LISTEN_DIST_MAX}"`
option in `env.sh.eex` for Elixir release.
This helps when you want to forward the port from localhost to the cluster and connect to a remote Elixir node debugging
it in production.
Default: 10000.
EOT
}
variable "erlang_cluster_node_name" {
type = string
nullable = true
default = null
description = <<EOT
Name of the node in the Erlang cluster. Defaults to `replace(var.image_name, "_", "-")`.
EOT
}
################################################################################
## DNS
################################################################################
variable "dns_managed_zone_name" {
type = string
nullable = false
description = "Name of the DNS managed zone."
}
################################################################################
## Application
################################################################################
variable "application_name" {
type = string
nullable = true
default = null
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
}
variable "application_version" {
type = string
nullable = true
default = null
description = "Version of the application. Defaults to value of `var.image_tag`."
}
variable "application_labels" {
type = map(string)
nullable = false
default = {}
description = "Labels to add to all created by this module resources."
}
variable "application_dns_tld" {
type = string
nullable = false
description = "DNS host which will be used to create DNS records for the application and provision SSL-certificates."
}
variable "application_ports" {
type = list(object({
name = string
protocol = string
port = number
health_check = object({
initial_delay_sec = number
check_interval_sec = optional(number)
timeout_sec = optional(number)
healthy_threshold = optional(number)
unhealthy_threshold = optional(number)
tcp_health_check = optional(object({}))
http_health_check = optional(object({
host = optional(string)
request_path = optional(string)
port = optional(string)
response = optional(string)
}))
https_health_check = optional(object({
host = optional(string)
request_path = optional(string)
port = optional(string)
response = optional(string)
}))
})
}))
nullable = false
default = []
description = "List of ports to expose for the application. One of ports MUST be named 'http' for auth healing policy to work."
}
variable "application_environment_variables" {
type = list(object({
name = string
value = string
}))
nullable = false
default = []
description = "List of environment variables to set for all application containers."
}

View File

@@ -0,0 +1,35 @@
resource "google_project_service" "artifactregistry" {
project = var.project_id
service = "artifactregistry.googleapis.com"
disable_on_destroy = false
}
resource "google_artifact_registry_repository" "firezone" {
project = var.project_id
location = var.region
repository_id = "firezone"
description = "Repository for storing Docker images in the ${var.project_name}."
format = "DOCKER"
depends_on = [
google_project_service.artifactregistry
]
}
data "google_iam_policy" "github_actions" {
binding {
role = "roles/artifactregistry.writer"
members = var.writers
}
}
resource "google_artifact_registry_repository_iam_policy" "policy" {
project = google_artifact_registry_repository.firezone.project
location = google_artifact_registry_repository.firezone.location
repository = google_artifact_registry_repository.firezone.name
policy_data = data.google_iam_policy.github_actions.policy_data
}

View File

@@ -0,0 +1,11 @@
output "name" {
value = google_artifact_registry_repository.firezone.name
}
output "url" {
value = "${var.region}-docker.pkg.dev"
}
output "repo" {
value = "${var.project_id}/${google_artifact_registry_repository.firezone.name}"
}

View File

@@ -0,0 +1,16 @@
variable "project_id" {
description = "The ID of the project in which the resource belongs."
}
variable "project_name" {
description = "The name of the project in which the resource belongs."
}
variable "region" {
description = "The region in which the registry is hosted."
}
variable "writers" {
description = "The list of IAM members that have write access to the container registry."
type = list(string)
}

View File

@@ -0,0 +1,48 @@
resource "google_project_service" "dns" {
project = var.project_id
service = "dns.googleapis.com"
disable_on_destroy = false
}
resource "google_dns_managed_zone" "main" {
project = var.project_id
name = join("-", compact(split(".", var.tld)))
dns_name = "${var.tld}."
labels = {
managed = true
managed_by = "terraform"
}
dnssec_config {
kind = "dns#managedZoneDnsSecConfig"
non_existence = "nsec3"
state = var.dnssec_enabled ? "on" : "off"
default_key_specs {
algorithm = "rsasha256"
key_length = 2048
key_type = "keySigning"
kind = "dns#dnsKeySpec"
}
default_key_specs {
algorithm = "rsasha256"
key_length = 1024
key_type = "zoneSigning"
kind = "dns#dnsKeySpec"
}
}
lifecycle {
# prevent_destroy = true
ignore_changes = []
}
depends_on = [
google_project_service.dns
]
}

View File

@@ -0,0 +1,11 @@
output "name_servers" {
value = join(" ", google_dns_managed_zone.main.name_servers)
}
output "zone_name" {
value = google_dns_managed_zone.main.name
}
output "dns_name" {
value = google_dns_managed_zone.main.dns_name
}

View File

@@ -0,0 +1,13 @@
variable "project_id" {
description = "The ID of the project in which the resource belongs."
}
variable "tld" {
description = "The top level domain to use for the cluster. Should end with a dot, eg: 'app.firez.one.'"
type = string
}
variable "dnssec_enabled" {
description = "Whether or not to enable DNSSEC"
type = bool
}

View File

@@ -0,0 +1,36 @@
resource "google_project" "project" {
name = var.name
org_id = var.organization_id
billing_account = var.billing_account_id
project_id = var.id != "" ? var.id : replace(lower(var.name), " ", "-")
}
resource "google_project_service" "oslogin" {
project = google_project.project.project_id
service = "oslogin.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "iam" {
project = google_project.project.project_id
service = "iam.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "iamcredentials" {
project = google_project.project.project_id
service = "iamcredentials.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "serviceusage" {
project = google_project.project.project_id
service = "serviceusage.googleapis.com"
disable_on_destroy = false
}

View File

@@ -0,0 +1,9 @@
output "project" {
description = "Project struct which can be used to create resources in this project"
value = google_project.project
}
output "name" {
description = "The project name"
value = google_project.project.name
}

View File

@@ -0,0 +1,16 @@
variable "organization_id" {
description = "ID of a Google Cloud Organization"
}
variable "billing_account_id" {
description = "ID of a Google Cloud Billing Account which will be used to pay for resources"
}
variable "name" {
description = "Name of a Google Cloud Project"
}
variable "id" {
description = "ID of a Google Cloud Project. Can be omitted and will be generated automatically"
default = ""
}

View File

@@ -0,0 +1,198 @@
# Enable Cloud SQL for the Google Cloud project
resource "google_project_service" "sqladmin" {
project = var.project_id
service = "sqladmin.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "sql-component" {
project = var.project_id
service = "sql-component.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "servicenetworking" {
project = var.project_id
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}
# Create a reserved for Google Cloud SQL address range and connect it to VPC network
resource "google_compute_global_address" "private_ip_pool" {
project = var.project_id
network = var.network
name = "google-cloud-sql"
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
}
resource "google_service_networking_connection" "connection" {
network = var.network
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.private_ip_pool.name]
depends_on = [
google_project_service.servicenetworking,
]
}
# Create the main Cloud SQL instance
resource "google_sql_database_instance" "master" {
project = var.project_id
name = var.database_name
database_version = var.database_version
region = var.compute_region
settings {
tier = "db-custom-${var.compute_instance_cpu_count}-${var.compute_instance_memory_size}"
disk_type = "PD_SSD"
disk_autoresize = true
activation_policy = "ALWAYS"
availability_type = var.database_highly_available ? "REGIONAL" : "ZONAL"
deletion_protection_enabled = var.database_name == "production" ? true : false
location_preference {
zone = var.compute_availability_zone
}
backup_configuration {
enabled = length(var.database_read_replica_locations) > 0 ? true : var.database_backups_enabled
start_time = "10:00"
# PITR backups must be enabled if read replicas are enabled
point_in_time_recovery_enabled = length(var.database_read_replica_locations) > 0 ? true : var.database_backups_enabled
backup_retention_settings {
retained_backups = 7
}
}
ip_configuration {
ipv4_enabled = length(var.database_read_replica_locations) > 0 ? false : true
private_network = var.network
}
maintenance_window {
day = 7
hour = 8
update_track = "stable"
}
insights_config {
query_insights_enabled = true
record_application_tags = true
record_client_address = false
query_plans_per_minute = 20
query_string_length = 4500
}
password_validation_policy {
enable_password_policy = true
complexity = "COMPLEXITY_DEFAULT"
min_length = 16
disallow_username_substring = true
}
dynamic "database_flags" {
for_each = var.database_flags
content {
name = database_flags.key
value = database_flags.value
}
}
database_flags {
name = "maintenance_work_mem"
value = floor(var.compute_instance_memory_size * 1024 / 100 * 5)
}
}
lifecycle {
prevent_destroy = true
ignore_changes = []
}
depends_on = [
google_project_service.sqladmin,
google_project_service.sql-component,
google_service_networking_connection.connection,
]
}
# Create followers for the main Cloud SQL instance
resource "google_sql_database_instance" "read-replica" {
for_each = toset(var.database_read_replica_locations)
project = var.project_id
name = "${var.database_name}-read-replica-${each.key}"
database_version = var.database_version
region = each.value.region
master_instance_name = var.database_name
replica_configuration {
connect_retry_interval = "30"
}
settings {
# We must use the same tier as the master instance,
# otherwise it might be lagging behind during the replication and won't be usable
tier = "db-custom-${var.compute_instance_cpu_count}-${var.compute_instance_memory_size}"
disk_type = "PD_SSD"
disk_autoresize = true
activation_policy = "ALWAYS"
availability_type = "ZONAL"
location_preference {
zone = var.compute_availability_zone
}
ip_configuration {
ipv4_enabled = true
private_network = var.network
}
insights_config {
query_insights_enabled = true
record_application_tags = true
record_client_address = false
query_plans_per_minute = 25
query_string_length = 4500
}
dynamic "database_flags" {
for_each = var.database_flags
content {
name = database_flags.key
value = database_flags.value
}
}
}
lifecycle {
prevent_destroy = true
ignore_changes = []
}
depends_on = [google_sql_database_instance.master]
}

View File

@@ -0,0 +1,15 @@
output "master_instance_ip_address" {
value = google_sql_database_instance.master.private_ip_address
}
output "master_instance_name" {
value = google_sql_database_instance.master.name
}
output "master_instance_address" {
value = google_sql_database_instance.master.private_ip_address
}
output "read-replicas" {
value = google_sql_database_instance.read-replica
}

View File

@@ -0,0 +1,56 @@
variable "project_id" {
description = "The ID of the project in which the resource belongs."
}
variable "compute_region" {
description = "The region the instance will sit in."
}
variable "compute_availability_zone" {
description = "The preferred compute engine zone. See https://cloud.google.com/compute/docs/regions-zones?hl=en"
}
variable "compute_instance_memory_size" {
description = "Instance memory size. See https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create"
}
variable "compute_instance_cpu_count" {
description = "Count of CPUs. See https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create"
}
variable "network" {
description = "Full network identifier which is used to create private VPC connection with Cloud SQL instance"
}
variable "database_name" {
description = "Name of the Cloud SQL database"
}
variable "database_version" {
description = "Version of the Cloud SQL database"
default = "POSTGRES_15"
}
variable "database_highly_available" {
description = "Creates a failover copy for the master intancy and makes it availability regional."
default = false
}
variable "database_backups_enabled" {
description = "Should backups be enabled on this database?"
default = false
}
variable "database_read_replica_locations" {
description = "List of read-only replicas to create."
type = list(object({
region = string
}))
default = []
}
variable "database_flags" {
description = "List of PostgreSQL database flags. Can be used to install Postgres extensions."
type = map(string)
default = {}
}

View File

@@ -0,0 +1,15 @@
resource "google_project_service" "storage-api" {
project = var.project_id
service = "storage-api.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "storage-component" {
project = var.project_id
service = "storage-component.googleapis.com"
disable_on_destroy = false
}

View File

@@ -0,0 +1,3 @@
variable "project_id" {
description = "The ID of the project in which the resource belongs."
}

View File

@@ -0,0 +1,19 @@
resource "google_project_service" "compute" {
project = var.project_id
service = "compute.googleapis.com"
disable_on_destroy = false
}
resource "google_compute_network" "vpc_network" {
project = var.project_id
name = var.name
routing_mode = "GLOBAL"
auto_create_subnetworks = false
depends_on = [
google_project_service.compute
]
}

View File

@@ -0,0 +1,11 @@
output "id" {
value = google_compute_network.vpc_network.id
}
output "name" {
value = google_compute_network.vpc_network.name
}
output "self_link" {
value = google_compute_network.vpc_network.self_link
}

View File

@@ -0,0 +1,7 @@
variable "project_id" {
description = "The ID of the project in which the resource belongs."
}
variable "name" {
description = "Name of the resource. Provided by the client when the resource is created."
}