chore(infra): Move terraform/environments to submodule (#8168)

This moves our current GCP infra to a new firezone/environments repo.
The existing Git history is preserved, and CI config is updated to clone
this submodule before running any terraform jobs.
This commit is contained in:
Jamil
2025-02-18 06:01:13 -08:00
committed by GitHub
parent 3e4976e4ab
commit d99508ead5
38 changed files with 23 additions and 5570 deletions

View File

@@ -9,7 +9,7 @@ repos:
pass_filenames: false
- id: prettier-git-files
name: Prettier (only git-tracked files)
entry: bash -c 'git ls-files -z | xargs -0 prettier --check --ignore-unknown'
entry: bash -c 'git grep --cached -z -l '' | xargs -0 prettier --check --ignore-unknown'
language: system
pass_filenames: false

View File

@@ -114,16 +114,23 @@ jobs:
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
TF_WORKSPACE: "production"
steps:
# First, checkout the main ref for setting up Terraform
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
- name: Tool Versions
id: versions
uses: marocchino/tool-versions-action@18a164fa2b0db1cc1edf7305fcb17ace36d1c306 # v1.2.0
- uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
with:
terraform_version: ${{ steps.versions.outputs.terraform }}
# Then, checkout the ref specified in the workflow run
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ github.event.workflow_run.head_branch }}
submodules: true
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
- name: Upload Configuration
uses: hashicorp/tfc-workflows-github/actions/upload-configuration@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
id: apply-upload

View File

@@ -14,13 +14,16 @@ jobs:
TF_WORKSPACE: "staging"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
- run: ls -alR terraform/environments
- name: Tool Versions
id: versions
uses: marocchino/tool-versions-action@18a164fa2b0db1cc1edf7305fcb17ace36d1c306 # v1.2.0
- uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
with:
terraform_version: ${{ steps.versions.outputs.terraform }}
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Validate cloud-init
run: |
sudo apt-get update

View File

@@ -57,16 +57,23 @@ jobs:
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
TF_WORKSPACE: "staging"
steps:
# First, checkout the main ref for setting up Terraform
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: true
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
- name: Tool Versions
id: versions
uses: marocchino/tool-versions-action@18a164fa2b0db1cc1edf7305fcb17ace36d1c306 # v1.2.0
- uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
with:
terraform_version: ${{ steps.versions.outputs.terraform }}
# Then, checkout the ref specified in the workflow run
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ github.event.workflow_run.head_branch }}
submodules: true
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
- name: Upload Configuration
uses: hashicorp/tfc-workflows-github/actions/upload-configuration@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
id: apply-upload

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "terraform/environments"]
path = terraform/environments
url = git@github.com:firezone/environments.git

View File

@@ -1,2 +0,0 @@
# This dir is simply an example; don't commit the lockfile
/.terraform.lock.hcl

View File

@@ -1,59 +0,0 @@
# Performance Terraform environment
This directory contains terraform examples for spinning up VMs on Azure to be
used for performance testing.
This is primarily meant to be used internal by the Firezone Team at this time,
but anyone can use the scripts here by changing the variables in a local
`terraform.tfvars` as needed.
## Get started
1. [Install](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli)
Terraform if you haven't already.
1. [Install](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) the
Azure CLI if you haven't already.
1. Clone this repository, and `cd` to this directory.
1. Run `terraform init` to initialize the directory.
1. Login to Azure using the Azure CLI with `az login`.
1. Find the subscription ID you want to use with `az account subscription list`.
If unsure, contact your Azure admin to avoid incurring billing charges under
the wrong billing subscription.
1. Generate a keypair to use for your own admin SSH access (**must** be RSA):
```shell
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa.azure
```
1. Obtain `terraform.tfvars` using one of the following methods:
1. Your team's shared credentials vault (e.g. 1password)
1. Your Azure admin
1. Or, generate it by following the instructions at
https://developer.hashicorp.com/terraform/tutorials/azure-get-started/azure-build
and populating a `terraform.tfvars` file in this directory:
```hcl
# Azure billing subscription ID
subscription_id = "SUBSCRIPTION-ID-FROM-PREVIOUS-STEP"
# Obtain these variables by following the guide above
arm_client_id = "AZURE-SERVICE-PRINCIPAL-CLIENT-ID"
arm_client_secret = "AZURE-SERVICE-PRINCIPAL-CLIENT-SECRET"
arm_tenant_id = "AZURE-SERVICE-PRINCIPAL-TENANT-ID"
# All VMs need a public RSA SSH key specified for the admin user. Insert yours below.
admin_ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC7..."
# Set your own naming prefix to avoid clobbering others' resources
naming_prefix = "CHANGEME"
```
1. Run `terraform apply` to create the resources.
1. Done! You can now SSH into your VM like so:
```shell
# Login using the name of resources used in Terraform config above
az ssh vm \
--resource-group CHANGEME-rg-westus2 \
--vm-name CHANGEME-vm-westus2 \
--private-key-file ~/.ssh/id_rsa.azure \
--local-user adminuser
```

View File

@@ -1,116 +0,0 @@
locals {
# Find this with `az account subscription list`
arm_subscription_id = var.subscription_id
# Generate these by following
# https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret?ajs_aid=fdab1b75-b67a-4e43-8a41-7cb014d5c881&product_intent=terraform#creating-a-service-principal-using-the-azure-cli
#
# and then saving to terraform.tfvars in this directory:
#
# arm_client_id = "..."
# arm_client_secret = "..."
# arm_tenant_id = "..."
arm_client_id = var.arm_client_id
arm_client_secret = var.arm_client_secret
arm_tenant_id = var.arm_tenant_id
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.0.2"
}
}
required_version = ">= 1.1.0"
}
provider "azurerm" {
features {}
}
# US-west resource group
resource "azurerm_resource_group" "rg-westus2" {
name = "${var.naming_prefix}-rg-westus2"
location = "westus2"
}
# US-west virtual network
resource "azurerm_virtual_network" "vnet-westus2" {
name = "${var.naming_prefix}-vnet-westus2"
resource_group_name = azurerm_resource_group.rg-westus2.name
location = azurerm_resource_group.rg-westus2.location
address_space = ["10.0.0.0/16"]
}
# US-west subnet
resource "azurerm_subnet" "subnet-westus2" {
name = "${var.naming_prefix}-subnet-westus2"
resource_group_name = azurerm_resource_group.rg-westus2.name
virtual_network_name = azurerm_virtual_network.vnet-westus2.name
address_prefixes = ["10.0.0.0/24"]
}
# NIC for US-west VM
resource "azurerm_network_interface" "nic-westus2" {
name = "${var.naming_prefix}-nic-westus2"
location = azurerm_resource_group.rg-westus2.location
resource_group_name = azurerm_resource_group.rg-westus2.name
# Enable accelerated networking, can only be enabled on one NIC per VM
enable_accelerated_networking = true
ip_configuration {
name = "${var.naming_prefix}-ipconfig-westus2"
subnet_id = azurerm_subnet.subnet-westus2.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.publicip-westus2.id
}
}
# Public IP for US-west VM
resource "azurerm_public_ip" "publicip-westus2" {
name = "${var.naming_prefix}-publicip-westus2"
location = azurerm_resource_group.rg-westus2.location
resource_group_name = azurerm_resource_group.rg-westus2.name
allocation_method = "Dynamic"
}
# US-west VM
resource "azurerm_linux_virtual_machine" "vm-westus2" {
name = "${var.naming_prefix}-vm-westus2"
resource_group_name = azurerm_resource_group.rg-westus2.name
location = azurerm_resource_group.rg-westus2.location
# 16 vCPUs, 56 GB RAM, Premium SSD
size = "Standard_DS5_v2"
network_interface_ids = [
azurerm_network_interface.nic-westus2.id
]
# Username of the admin user
admin_username = "adminuser"
admin_ssh_key {
username = "adminuser"
public_key = var.admin_ssh_pubkey
}
# Allow others access to the VM
identity {
type = "SystemAssigned"
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts"
version = "latest"
}
}

View File

@@ -1,29 +0,0 @@
variable "subscription_id" {
description = "The Azure billing subscription to use"
type = string
}
variable "arm_client_id" {
description = "The Azure service principal client id"
type = string
}
variable "arm_client_secret" {
description = "The Azure service principal client secret"
type = string
}
variable "arm_tenant_id" {
description = "The Azure service principal tenant id"
type = string
}
variable "admin_ssh_pubkey" {
description = "The SSH public key to use for the admin user"
type = string
}
variable "naming_prefix" {
description = "The prefix to use for all resources"
type = string
}

View File

@@ -1,124 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/cyrilgdn/postgresql" {
version = "1.25.0"
constraints = "1.25.0"
hashes = [
"h1:S0Nv3pAGngEqGAniq2y1aINUL9IqftERPNNXJiHCTC8=",
"zh:0f9db6e1274603d642e96b58eaf6cc4223f7118f2d7ce909dc4812d332cc002a",
"zh:1819470f0304c6a60b2b51817cb43f6ff59a49e08cc9e50644b86b3a76c91601",
"zh:27bfb544983cac101a7c7c2e4cb9939a712dffcdd7ddcab83c2f8afc334e33c5",
"zh:46166f6f05771b0495df18459fdf3a63fae8b38e95a1b2754f03d006e17ea33d",
"zh:64d53afc52f26e8214990acc3e07f3b47bef628aa6b317595a8faec05b252209",
"zh:944d7ded418c022dd3ee513246677d601376fa38d76c9c4aecff2c2eefcaa35b",
"zh:9819551b61542a6d322d6a323bbb552ce02e769ce2222fd9bb1935473c7c4b3c",
"zh:c38bd73e208fe216efab48d099c85b8ad1e51ff102b3892443febc9778e7236e",
"zh:c73de133274dcc7a03e95f598550facc59315538f355e57e14b36e222b298826",
"zh:c7af02f5338bfe7f1976e01d3fcf82e05b3551893e732539a84c568d25571a84",
"zh:d1aa3d7432c7de883873f8f70e9a6207c7b536d874486d37aee0ca8c8853a890",
"zh:e17e9809fc7cc2d6f89078b8bfe6308930117b2270be8081820da40029b04828",
"zh:e1b21b7b7022e0d468d72f4534d226d57a7bfd8c96a4c7dc2c2fa0bb0b99298d",
"zh:f24b73645d8bc225f692bdf9c035411099ef57138569f45f3605ec79ac872e3b",
]
}
provider "registry.terraform.io/hashicorp/google" {
version = "6.20.0"
constraints = "~> 6.10"
hashes = [
"h1:Wo8fqu73OOylxVykdz593VVuWntAaHQMtRkh4eKow60=",
"zh:3dbdc8b3c7d9ef13ec3f69607232f23d29ed85df871d017a9a2338427dfae19b",
"zh:4ac9a128c3a957e22100eb3f813f62ffa0ab47d2e1ac0c9a1370779bafcd5fac",
"zh:4be504e8267709295708b12aa1ab9fbc93dbb801600347cfd82a27a204c3e04c",
"zh:5c65452be9ec6ccf728fc122616dff284d3497591736228d7571222511926ede",
"zh:76eca544918f9ed4bcf62178644216ef27163d6c763b11ee161fa654f7794d8a",
"zh:7c142461e3059b709cb84dae9a135c72769c127986eede40516664ea25ff18d5",
"zh:9636dcbf914d47e54ffdcf3a27c6dbbba63ea83e690fa8bf640bdf0421f352be",
"zh:a307597b41e08281120e182aa58cb77490fd3fddeb4131daa967025e039aad9b",
"zh:b6087dbaf762f4848449910476f22571712f22b7108c68ab2c173a8fff185518",
"zh:d0ff076ad9d2c5b85b24d39b104a2ac255588aa1ea6a2eaf5344445de81c8006",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fb727a1cc6f2238059aa61baa84658dc5c348823d796f72921d030ff35a2723c",
]
}
provider "registry.terraform.io/hashicorp/google-beta" {
version = "6.20.0"
constraints = "~> 6.10"
hashes = [
"h1:AgnC4F95gu1clyMeCqTg2H8oqtlI2cARqoc5C0d0DcM=",
"zh:0500efa8db90436f3d0dc86f40e3c351249545a3d5927a9df6b8248d25214061",
"zh:18fdf86beb5fc9fce0a7c053d5629c230a3795d07327f0241a2d4bac90d7366e",
"zh:1a9a378679ac913b2c66197a6194250f2a601ea3c42829127a020814470b6866",
"zh:24f2bccd99cc419ac8f72064f3e3cca93903b89b98b26f6e1bb41cf944f2bf53",
"zh:44835cae5a5eae707330985423284191c34043ec2cf5a7a92e0370e2960f7541",
"zh:48a2196edc1a88a31c1ce5dec1f084b8821f1d41d9fbf83c38738f6c1b5e27d8",
"zh:592317cec2a061bcfd538c4ca7d4b5733c47d70a378086287732f4c6dc8d8f25",
"zh:8f6743bf5ae0754e4ea5c6cb4f40114a24e2eb3823b332fe0cb8fa9e777bcd2c",
"zh:a54164bc670c658efb3183a24785e383b1c516dc82b374103a3b587a9c3d9d80",
"zh:b5ab872bb770de30b1a4e0e5b396f14aed333541c7373bb71ced38d06666bddd",
"zh:d7a2c540a21eeb8ac933736e795fccc7592e7bcf69b48cdf79fd47240f977383",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.3"
constraints = "~> 3.2"
hashes = [
"h1:I0Um8UkrMUb81Fxq/dxbr3HLP2cecTH2WMJiwKSrwQY=",
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
constraints = "~> 3.6"
hashes = [
"h1:zG9uFP8l9u+yGZZvi5Te7PV62j50azpgwPunq2vTm1E=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.6"
constraints = "~> 4.0"
hashes = [
"h1:n3M50qfWfRSpQV9Pwcvuse03pEizqrmYEryxKky4so4=",
"zh:10de0d8af02f2e578101688fd334da3849f56ea91b0d9bd5b1f7a243417fdda8",
"zh:37fc01f8b2bc9d5b055dc3e78bfd1beb7c42cfb776a4c81106e19c8911366297",
"zh:4578ca03d1dd0b7f572d96bd03f744be24c726bfd282173d54b100fd221608bb",
"zh:6c475491d1250050765a91a493ef330adc24689e8837a0f07da5a0e1269e11c1",
"zh:81bde94d53cdababa5b376bbc6947668be4c45ab655de7aa2e8e4736dfd52509",
"zh:abdce260840b7b050c4e401d4f75c7a199fafe58a8b213947a258f75ac18b3e8",
"zh:b754cebfc5184873840f16a642a7c9ef78c34dc246a8ae29e056c79939963c7a",
"zh:c928b66086078f9917aef0eec15982f2e337914c5c4dbc31dd4741403db7eb18",
"zh:cded27bee5f24de6f2ee0cfd1df46a7f88e84aaffc2ecbf3ff7094160f193d50",
"zh:d65eb3867e8f69aaf1b8bb53bd637c99c6b649ba3db16ded50fa9a01076d1a27",
"zh:ecb0c8b528c7a619fa71852bb3fb5c151d47576c5aab2bf3af4db52588722eeb",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

View File

@@ -1,200 +0,0 @@
# Deploy our Metabase instance
locals {
metabase_region = local.region
metabase_zone = local.availability_zone
}
resource "random_password" "metabase_db_password" {
length = 16
min_lower = 1
min_upper = 1
min_numeric = 1
min_special = 1
}
resource "google_sql_user" "metabase" {
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
name = "metabase"
password = random_password.metabase_db_password.result
}
resource "google_sql_database" "metabase" {
project = module.google-cloud-project.project.project_id
name = "metabase"
instance = module.google-cloud-sql.master_instance_name
}
resource "postgresql_grant" "grant_select_on_all_tables_schema_to_metabase" {
database = google_sql_database.firezone.name
privileges = ["SELECT"]
objects = [] # ALL
object_type = "table"
schema = "public"
role = google_sql_user.metabase.name
depends_on = [
google_sql_user.metabase
]
}
resource "postgresql_grant" "grant_execute_on_all_functions_schema_to_metabase" {
database = google_sql_database.firezone.name
privileges = ["EXECUTE"]
objects = [] # ALL
object_type = "function"
schema = "public"
role = google_sql_user.metabase.name
depends_on = [
google_sql_user.metabase
]
}
module "metabase" {
source = "../../modules/google-cloud/apps/metabase"
project_id = module.google-cloud-project.project.project_id
compute_network = module.google-cloud-vpc.id
compute_subnetwork = google_compute_subnetwork.tools.self_link
compute_instance_type = "n1-standard-1"
compute_region = local.metabase_region
compute_instance_availability_zone = local.metabase_zone
image_repo = "metabase"
image = "metabase"
image_tag = var.metabase_image_tag
application_name = "metabase"
application_version = replace(replace(var.metabase_image_tag, ".", "-"), "v", "")
application_environment_variables = [
{
name = "MB_DB_TYPE"
value = "postgres"
},
{
name = "MB_DB_TYPE"
value = "postgres"
},
{
name = "MB_DB_DBNAME"
value = google_sql_database.metabase.name
},
{
name = "MB_DB_PORT"
value = "5432"
},
{
name = "MB_DB_USER"
value = google_sql_user.metabase.name
},
{
name = "MB_DB_PASS"
value = random_password.metabase_db_password.result
},
{
name = "MB_DB_HOST"
value = module.google-cloud-sql.master_instance_ip_address
},
{
name = "MB_SITE_NAME"
value = module.google-cloud-project.project.project_id
},
{
name = "MB_ANON_TRACKING_ENABLED"
value = "false"
},
# {
# name = "MB_JETTY_PORT"
# value = "80"
# }
]
health_check = {
name = "health"
protocol = "TCP"
port = 3000
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
}
# Allow outbound traffic
resource "google_compute_firewall" "egress-ipv4" {
project = module.google-cloud-project.project.project_id
name = "metabase-egress-ipv4"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.metabase.target_tags
destination_ranges = ["0.0.0.0/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "egress-ipv6" {
project = module.google-cloud-project.project.project_id
name = "metabase-egress-ipv6"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.metabase.target_tags
destination_ranges = ["::/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "metabase-ssh-ipv4" {
project = module.google-cloud-project.project.project_id
name = "metabase-ssh-ipv4"
network = module.google-cloud-vpc.id
allow {
protocol = "tcp"
ports = [22]
}
allow {
protocol = "udp"
ports = [22]
}
allow {
protocol = "sctp"
ports = [22]
}
log_config {
metadata = "INCLUDE_ALL_METADATA"
}
# Only allows connections using IAP
source_ranges = local.iap_ipv4_ranges
target_tags = module.metabase.target_tags
}

View File

@@ -1,215 +0,0 @@
# Allow Google Cloud and Let's Encrypt to issue certificates for our domain
resource "google_dns_record_set" "dns-caa" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CAA"
name = module.google-cloud-dns.dns_name
rrdatas = [
"0 issue \"pki.goog\"",
"0 iodef \"mailto:security@firezone.dev\""
]
ttl = 3600
}
resource "google_dns_record_set" "website-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = module.google-cloud-dns.dns_name
rrdatas = [google_compute_global_address.tld-ipv4.address]
ttl = 3600
}
# Website
resource "google_dns_record_set" "website-www-redirect" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "www.${module.google-cloud-dns.dns_name}"
rrdatas = ["cname.vercel-dns.com."]
ttl = 3600
}
resource "google_dns_record_set" "blog-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "blog.${module.google-cloud-dns.dns_name}"
rrdatas = [google_compute_global_address.tld-ipv4.address]
ttl = 3600
}
resource "google_dns_record_set" "docs-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "docs.${module.google-cloud-dns.dns_name}"
rrdatas = [google_compute_global_address.tld-ipv4.address]
ttl = 3600
}
# Third-party services
# Mailgun
resource "google_dns_record_set" "mailgun-dkim" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "kone._domainkey.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
# Reference: https://groups.google.com/g/cloud-dns-discuss/c/k_l6JP-H29Y
# Individual strings cannot exceed 255 characters in length, or "Invalid record data" results
# DKIM clients concatenate all of the strings in the client before parsing tags, so to workaround the limit
# all you need to do is add whitespace within the p= tag such that each string fits within the 255 character limit.
rrdatas = [
"\"k=rsa;\" \"p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwhTddUFz+LHTx63SpYvoAc4UzPgXy71Lq950bgVgrwHqLiktRnXFliKGbwL/QPyzOOWBYd1B3brC81B0IoBZkNxFj1mA1EKd8oFi8GMaKA5YuPbrkTT9AGXx0VpMMqDUcYoGWplXnMSY2ICdSRxOdQ5sXLdLqEyIVWm8WiF2+U7Zq15PSNr1VigByCknc7N0Pes0qbVTuWVNd\" \"BBYFO5igHpRaHZtYU/dT5ebXxcvZJgQinW23erS6fFgNuUOOwhGJCay5ahpAnufuQB52eEkM/AHb9cXxVG5g04+6xZSMT7/aI7m1IOzulOds71RAn7FN4LJhdI0DgOmIUVj4G32OwIDAQAB\""
]
}
# GitHub
resource "google_dns_record_set" "github-verification" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "_github-challenge-firezone-organization.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"ca4903847a"
]
}
# Google Workspace
resource "google_dns_record_set" "google-mail" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = module.google-cloud-dns.dns_name
type = "MX"
ttl = 3600
rrdatas = [
"1 aspmx.l.google.com.",
"5 alt1.aspmx.l.google.com.",
"5 alt2.aspmx.l.google.com.",
"10 alt3.aspmx.l.google.com.",
"10 alt4.aspmx.l.google.com."
]
}
resource "google_dns_record_set" "google-dmarc" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "_dmarc.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=DMARC1;\" \"p=reject;\" \"rua=mailto:dmarc-reports@firezone.dev;\" \"pct=100;\" \"adkim=r;\" \"aspf=r\""
]
}
resource "google_dns_record_set" "root-verifications" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = module.google-cloud-dns.dns_name
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=spf1 mx include:23723443.spf07.hubspotemail.net include:sendgrid.net include:_spf.google.com include:mailgun.org ~all\"",
# TODO: only keep the last one needed
"google-site-verification=hbBLPfTlejIaxyFTPZN0RaIk6Y6qhQTG2yma7I06Emo",
"google-site-verification=oAugt2Arr7OyWaqJ0bkytkmIE-VQ8D_IFa-rdNiqa8s",
"google-site-verification=VDl82gbqVHJW6un8Mcki6qDhL_OGK6G8ByOB6qhaVbg",
"oneleet-domain-verification-72120df0-57da-4da7-b7bf-e26eaee9dd85",
# Microsoft 365
"MS=ms19826180"
]
}
resource "google_dns_record_set" "google-dkim" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "google._domainkey.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=DKIM1;\" \"k=rsa;\" \"p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAi1bjDNWHAhpLro2nw6WJ4Ye+JyA0gsMLHx1g+oS\" \"uGC6V0zo0Ftdt/tgvieaWbArClrz7Ce8986mih1P6iEESehTSarDrLlHPstIEI6UnjP7sAuIZtRsIrUI4NJM0Jg96uS4ezxIza3bzNxk3atMp0laCt+\" \"tbCeGLCPt4r9aygWIT/CRuNHZUm3CVwemN0celflXZF+FEg+mEJrkekasNtVJJ//XAdimvwe9CWOF/VoC+ZP0ocac3CFzng7NzSqYnCiaAZqJ3Pss0ueq0K/kqUxy8vh25Kd\" \"gyvdHSWdgnMFD251I/TBueScPZoUmo3ueYqwKxmW1J1uCkVx4NQ1xK2QIDAQAB\""
]
}
# Oneleet Trust page
resource "google_dns_record_set" "oneleet-trust" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "trust.${module.google-cloud-dns.dns_name}"
type = "CNAME"
ttl = 3600
rrdatas = [
"trust.oneleet.com."
]
}
# Stripe checkout pages
resource "google_dns_record_set" "stripe-checkout" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "billing.${module.google-cloud-dns.dns_name}"
rrdatas = ["hosted-checkout.stripecdn.com."]
ttl = 300
}
resource "google_dns_record_set" "stripe-checkout-acme" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "TXT"
name = "_acme-challenge.billing.${module.google-cloud-dns.dns_name}"
rrdatas = ["YXH57351vMR9L5prjMoetmpktg1K65i6HkK0ZlLlF1g"]
ttl = 300
}
# HubSpot
resource "google_dns_record_set" "hubspot-domainkey1" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "hs1-23723443._domainkey.${module.google-cloud-dns.dns_name}"
rrdatas = ["firezone-dev.hs07a.dkim.hubspotemail.net."]
ttl = 3600
}
resource "google_dns_record_set" "hubspot-domainkey2" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CNAME"
name = "hs2-23723443._domainkey.${module.google-cloud-dns.dns_name}"
rrdatas = ["firezone-dev.hs07b.dkim.hubspotemail.net."]
ttl = 3600
}

View File

@@ -1,138 +0,0 @@
# Deploy our dogfood gateways
locals {
gateways_region = local.region
gateways_zone = local.availability_zone
gateways_instance_type = "e2-micro"
gateways_instance_count = 2
}
# Reserve instances for the gateway
# If you don't reserve them deployment takes much longer and there is no guarantee that instances will be created at all,
# Google Cloud Platform does not guarantee that instances will be available when you need them.
resource "google_compute_reservation" "gateways_reservation" {
project = module.google-cloud-project.project.project_id
name = "gateways-${local.availability_zone}-${local.gateways_instance_type}"
zone = local.gateways_zone
specific_reservation {
count = local.gateways_instance_count
instance_properties {
machine_type = local.gateways_instance_type
}
}
}
module "gateways" {
count = var.gateway_token != null ? 1 : 0
source = "../../modules/google-cloud/apps/gateway-region-instance-group"
project_id = module.google-cloud-project.project.project_id
compute_network = module.google-cloud-vpc.id
compute_subnetwork = google_compute_subnetwork.tools.self_link
compute_instance_type = local.gateways_instance_type
compute_region = local.gateways_region
compute_instance_availability_zones = [local.gateways_zone]
compute_instance_replicas = local.gateways_instance_count
observability_log_level = "info"
name = "gateway"
api_url = "wss://api.${local.tld}"
token = var.gateway_token
vsn = local.gateway_image_tag
depends_on = [
google_compute_reservation.gateways_reservation
]
}
# Allow gateways to access the Metabase
resource "google_compute_firewall" "gateways-metabase-access" {
count = var.gateway_token != null ? 1 : 0
project = module.google-cloud-project.project.project_id
name = "gateways-metabase-access"
network = module.google-cloud-vpc.id
direction = "INGRESS"
source_tags = module.gateways[0].target_tags
target_tags = module.metabase.target_tags
allow {
protocol = "tcp"
}
}
# Allow outbound traffic
resource "google_compute_firewall" "gateways-egress-ipv4" {
count = var.gateway_token != null ? 1 : 0
project = module.google-cloud-project.project.project_id
name = "gateways-egress-ipv4"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.gateways[0].target_tags
destination_ranges = ["0.0.0.0/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "gateways-egress-ipv6" {
count = var.gateway_token != null ? 1 : 0
project = module.google-cloud-project.project.project_id
name = "gateways-egress-ipv6"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.gateways[0].target_tags
destination_ranges = ["::/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "gateways-ssh-ipv4" {
count = length(module.gateways) > 0 ? 1 : 0
project = module.google-cloud-project.project.project_id
name = "gateways-ssh-ipv4"
network = module.google-cloud-vpc.id
allow {
protocol = "tcp"
ports = [22]
}
allow {
protocol = "udp"
ports = [22]
}
allow {
protocol = "sctp"
ports = [22]
}
log_config {
metadata = "INCLUDE_ALL_METADATA"
}
# Only allows connections using IAP
source_ranges = local.iap_ipv4_ranges
target_tags = module.gateways[0].target_tags
}

View File

@@ -1,300 +0,0 @@
locals {
project_owners = [
"bmanifold@firezone.dev",
"jamil@firezone.dev",
]
region = "us-east1"
availability_zone = "us-east1-d"
tld = "firezone.dev"
iap_ipv4_ranges = [
"35.235.240.0/20"
]
iap_ipv6_ranges = [
"2600:2d00:1:7::/64"
]
gateway_image_tag = var.gateway_image_tag != null ? var.gateway_image_tag : var.image_tag
relay_image_tag = var.relay_image_tag != null ? var.relay_image_tag : var.image_tag
portal_image_tag = var.portal_image_tag != null ? var.portal_image_tag : var.image_tag
}
terraform {
cloud {
organization = "firezone"
hostname = "app.terraform.io"
workspaces {
name = "production"
}
}
}
provider "random" {}
provider "null" {}
provider "google" {}
provider "google-beta" {}
# Create the project
module "google-cloud-project" {
source = "../../modules/google-cloud/project"
id = "firezone-prod"
name = "Production Environment"
organization_id = "335836213177"
billing_account_id = "0199BA-489CDD-F385C8"
auto_create_network = false
}
# Enable audit logs for the production project
resource "google_project_iam_audit_config" "audit" {
project = module.google-cloud-project.project.project_id
service = "allServices"
audit_log_config {
log_type = "ADMIN_READ"
}
audit_log_config {
log_type = "DATA_READ"
exempted_members = concat(
[
module.web.service_account.member,
module.api.service_account.member,
module.metabase.service_account.member,
],
module.gateways[*].service_account.member,
module.relays[*].service_account.member
)
}
audit_log_config {
log_type = "DATA_WRITE"
exempted_members = concat(
[
module.web.service_account.member,
module.api.service_account.member,
module.metabase.service_account.member,
],
module.gateways[*].service_account.member,
module.relays[*].service_account.member
)
}
}
# Grant owner access to the project
resource "google_project_iam_binding" "project_owners" {
project = module.google-cloud-project.project.project_id
role = "roles/owner"
members = formatlist("user:%s", local.project_owners)
}
# Grant GitHub Actions ability to write to the container registry
module "google-artifact-registry" {
source = "../../modules/google-cloud/artifact-registry"
project_id = module.google-cloud-project.project.project_id
project_name = module.google-cloud-project.name
region = local.region
writers = [
# This is GitHub Actions service account configured manually
# in the project github-iam-387915
"serviceAccount:github-actions@github-iam-387915.iam.gserviceaccount.com"
]
}
# Bucket where CI stores binary artifacts (eg. gateway or client)
resource "google_storage_bucket" "firezone-binary-artifacts" {
project = module.google-cloud-project.project.project_id
name = "${module.google-cloud-project.project.project_id}-artifacts"
location = "US"
lifecycle_rule {
condition {
age = 365
}
action {
type = "Delete"
}
}
lifecycle_rule {
condition {
age = 1
}
action {
type = "AbortIncompleteMultipartUpload"
}
}
public_access_prevention = "inherited"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_iam_member" "public-firezone-binary-artifacts" {
bucket = google_storage_bucket.firezone-binary-artifacts.name
role = "roles/storage.objectViewer"
member = "allUsers"
}
# Create a VPCs
module "google-cloud-vpc" {
source = "../../modules/google-cloud/vpc"
project_id = module.google-cloud-project.project.project_id
name = module.google-cloud-project.project.project_id
nat_region = local.region
}
# Enable Google Cloud Storage for the project
module "google-cloud-storage" {
source = "../../modules/google-cloud/storage"
project_id = module.google-cloud-project.project.project_id
}
# Create DNS managed zone
module "google-cloud-dns" {
source = "../../modules/google-cloud/dns"
project_id = module.google-cloud-project.project.project_id
tld = local.tld
dnssec_enabled = true
}
# Create the Cloud SQL database
module "google-cloud-sql" {
source = "../../modules/google-cloud/sql"
project_id = module.google-cloud-project.project.project_id
network = module.google-cloud-vpc.id
compute_region = local.region
compute_availability_zone = local.availability_zone
compute_instance_cpu_count = "2"
compute_instance_memory_size = "7680"
database_name = module.google-cloud-project.project.project_id
database_highly_available = true
database_backups_enabled = true
database_read_replica_locations = [
{
ipv4_enabled = true
region = local.region
network = module.google-cloud-vpc.id
}
]
database_flags = {
# Increase the connections count a bit, but we need to set it to Ecto ((pool_count * pool_size) + 50)
"max_connections" = "500"
# Sets minimum threshold on dead tuples to prevent autovaccum running too often on small tables
# where 5% is less than 50 records
"autovacuum_vacuum_threshold" = "50"
# Trigger autovaccum for every 5% of the table changed
"autovacuum_vacuum_scale_factor" = "0.05"
"autovacuum_analyze_scale_factor" = "0.05"
# Give autovacuum 4x the cost limit to prevent it from never finishing
# on big tables
"autovacuum_vacuum_cost_limit" = "800"
# Give hash joins a bit more memory to work with
# "hash_mem_multiplier" = "3"
# This is standard value for work_mem
"work_mem" = "4096"
}
}
resource "google_compute_firewall" "ssh-ipv4" {
project = module.google-cloud-project.project.project_id
name = "iap-ssh-ipv4"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [22]
}
log_config {
metadata = "INCLUDE_ALL_METADATA"
}
# Only allows connections using IAP
source_ranges = local.iap_ipv4_ranges
target_tags = concat(
module.web.target_tags,
module.api.target_tags,
module.domain.target_tags,
module.relays[0].target_tags
)
}
resource "google_compute_firewall" "ssh-ipv6" {
project = module.google-cloud-project.project.project_id
name = "iap-ssh-ipv6"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [22]
}
log_config {
metadata = "INCLUDE_ALL_METADATA"
}
source_ranges = local.iap_ipv6_ranges
target_tags = concat(
module.web.target_tags,
module.api.target_tags,
module.domain.target_tags,
module.relays[0].target_tags
)
}
module "ops" {
source = "../../modules/google-cloud/ops"
project_id = module.google-cloud-project.project.project_id
slack_alerts_auth_token = var.slack_alerts_auth_token
slack_alerts_channel = var.slack_alerts_channel
pagerduty_auth_token = var.pagerduty_auth_token
# Feel free to add your Mobile App (due to SOC requirements, only can be used from work devices)
# or SMS notification (recommended for personal devices) channels
additional_notification_channels = [
# Brian
## Mobile App
# "projects/firezone-prod/notificationChannels/16177228986287373178",
# Jamil
## Mobile App
# "projects/firezone-prod/notificationChannels/1608881766413151733"
]
api_host = module.api.host
web_host = module.web.host
}

View File

@@ -1,19 +0,0 @@
output "dns_name_servers" {
value = module.google-cloud-dns.name_servers
}
output "image_tag" {
value = var.image_tag
}
output "gateway_image_tag" {
value = local.gateway_image_tag
}
output "relay_image_tag" {
value = local.relay_image_tag
}
output "portal_image_tag" {
value = local.portal_image_tag
}

View File

@@ -1,699 +0,0 @@
locals {
# The version of the Erlang cluster state,
# change this to prevent new nodes from joining the cluster of the old ones,
# ie. when some internal messages introduced a breaking change.
cluster_version = "1_0"
}
# Generate secrets
resource "random_password" "erlang_cluster_cookie" {
length = 64
special = false
}
resource "random_password" "tokens_key_base" {
length = 64
special = false
}
resource "random_password" "tokens_salt" {
length = 32
special = false
}
resource "random_password" "secret_key_base" {
length = 64
special = false
}
resource "random_password" "live_view_signing_salt" {
length = 32
special = false
}
resource "random_password" "cookie_signing_salt" {
length = 32
special = false
}
resource "random_password" "cookie_encryption_salt" {
length = 32
special = false
}
# Create VPC subnet for the application instances,
# we want all apps to be in the same VPC in order for Erlang clustering to work
resource "google_compute_subnetwork" "apps" {
project = module.google-cloud-project.project.project_id
name = "app"
stack_type = "IPV4_IPV6"
ip_cidr_range = "10.128.0.0/20"
region = local.region
network = module.google-cloud-vpc.id
ipv6_access_type = "EXTERNAL"
log_config {
aggregation_interval = "INTERVAL_5_MIN"
metadata = "INCLUDE_ALL_METADATA"
}
private_ip_google_access = true
}
# Create VPN subnet for tooling instances
resource "google_compute_subnetwork" "tools" {
project = module.google-cloud-project.project.project_id
name = "tooling"
stack_type = "IPV4_IPV6"
ip_cidr_range = "10.129.0.0/20"
region = local.region
network = module.google-cloud-vpc.id
ipv6_access_type = "EXTERNAL"
log_config {
aggregation_interval = "INTERVAL_5_MIN"
metadata = "INCLUDE_ALL_METADATA"
}
private_ip_google_access = true
}
# Create SQL user and database
resource "random_password" "firezone_db_password" {
length = 16
min_lower = 1
min_upper = 1
min_numeric = 1
min_special = 1
lifecycle {
ignore_changes = [min_lower, min_upper, min_numeric, min_special]
}
}
resource "google_sql_user" "firezone" {
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
name = "firezone"
password = random_password.firezone_db_password.result
}
resource "google_sql_database" "firezone" {
project = module.google-cloud-project.project.project_id
name = "firezone"
instance = module.google-cloud-sql.master_instance_name
}
# Create IAM users for the database for all project owners
resource "google_sql_user" "iam_users" {
for_each = toset(local.project_owners)
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
type = "CLOUD_IAM_USER"
name = each.value
}
# We can't remove passwords complete because for IAM users we still need to execute those GRANT statements
provider "postgresql" {
scheme = "gcppostgres"
host = "${module.google-cloud-project.project.project_id}:${local.region}:${module.google-cloud-sql.master_instance_name}"
port = 5432
username = google_sql_user.firezone.name
password = random_password.firezone_db_password.result
superuser = false
sslmode = "disable"
}
resource "postgresql_grant" "grant_select_on_all_tables_schema_to_iam_users" {
for_each = toset(local.project_owners)
database = google_sql_database.firezone.name
privileges = ["SELECT", "INSERT", "UPDATE", "DELETE"]
objects = [] # ALL
object_type = "table"
schema = "public"
role = each.key
depends_on = [
google_sql_user.iam_users
]
}
resource "postgresql_grant" "grant_execute_on_all_functions_schema_to_iam_users" {
for_each = toset(local.project_owners)
database = google_sql_database.firezone.name
privileges = ["EXECUTE"]
objects = [] # ALL
object_type = "function"
schema = "public"
role = each.key
depends_on = [
google_sql_user.iam_users
]
}
# Create bucket for client logs
resource "google_storage_bucket" "client-logs" {
project = module.google-cloud-project.project.project_id
name = "${module.google-cloud-project.project.project_id}-client-logs"
location = "US"
lifecycle_rule {
condition {
age = 3
}
action {
type = "Delete"
}
}
lifecycle_rule {
condition {
age = 1
}
action {
type = "AbortIncompleteMultipartUpload"
}
}
logging {
log_bucket = true
log_object_prefix = "firezone.dev/clients"
}
public_access_prevention = "enforced"
uniform_bucket_level_access = true
lifecycle {
prevent_destroy = true
ignore_changes = []
}
}
locals {
cluster = {
name = "firezone"
cookie = base64encode(random_password.erlang_cluster_cookie.result)
}
shared_application_environment_variables = [
# Apps
{
name = "WEB_EXTERNAL_URL"
value = "https://app.${local.tld}"
},
{
name = "API_EXTERNAL_URL"
value = "https://api.${local.tld}"
},
{
name = "PHOENIX_HTTP_WEB_PORT"
value = "8080"
},
{
name = "PHOENIX_HTTP_API_PORT"
value = "8080"
},
# Database
{
name = "DATABASE_HOST"
value = module.google-cloud-sql.master_instance_ip_address
},
{
name = "DATABASE_NAME"
value = google_sql_database.firezone.name
},
{
name = "DATABASE_USER"
value = google_sql_user.firezone.name
},
{
name = "DATABASE_PASSWORD"
value = google_sql_user.firezone.password
},
# Secrets
{
name = "TOKENS_KEY_BASE"
value = base64encode(random_password.tokens_key_base.result)
},
{
name = "TOKENS_SALT"
value = base64encode(random_password.tokens_salt.result)
},
{
name = "SECRET_KEY_BASE"
value = base64encode(random_password.secret_key_base.result)
},
{
name = "LIVE_VIEW_SIGNING_SALT"
value = base64encode(random_password.live_view_signing_salt.result)
},
{
name = "COOKIE_SIGNING_SALT"
value = base64encode(random_password.cookie_signing_salt.result)
},
{
name = "COOKIE_ENCRYPTION_SALT"
value = base64encode(random_password.cookie_encryption_salt.result)
},
# Erlang
{
name = "ERLANG_DISTRIBUTION_PORT"
value = "9000"
},
{
name = "CLUSTER_NAME"
value = local.cluster.name
},
{
name = "ERLANG_CLUSTER_ADAPTER"
value = "Elixir.Domain.Cluster.GoogleComputeLabelsStrategy"
},
{
name = "ERLANG_CLUSTER_ADAPTER_CONFIG"
value = jsonencode({
project_id = module.google-cloud-project.project.project_id
cluster_name = local.cluster.name
cluster_name_label = "cluster_name"
cluster_version_label = "cluster_version"
cluster_version = local.cluster_version
node_name_label = "application"
polling_interval_ms = 10000
})
},
{
name = "RELEASE_COOKIE"
value = local.cluster.cookie
},
# Auth
{
name = "AUTH_PROVIDER_ADAPTERS"
value = "email,openid_connect,google_workspace,token,microsoft_entra,okta,jumpcloud"
},
# Registry from which Docker install scripts pull from
{
name = "DOCKER_REGISTRY"
value = "ghcr.io/firezone"
},
# Directory Sync
{
name = "WORKOS_API_KEY"
value = var.workos_api_key
},
{
name = "WORKOS_CLIENT_ID"
value = var.workos_client_id
},
{
name = "WORKOS_BASE_URL"
value = var.workos_base_url
},
# Billing system
{
name = "BILLING_ENABLED"
value = "true"
},
{
name = "STRIPE_SECRET_KEY"
value = var.stripe_secret_key
},
{
name = "STRIPE_WEBHOOK_SIGNING_SECRET"
value = var.stripe_webhook_signing_secret
},
{
name = "STRIPE_DEFAULT_PRICE_ID"
value = var.stripe_default_price_id
},
# Telemetry
{
name = "INSTRUMENTATION_CLIENT_LOGS_ENABLED"
value = true
},
{
name = "INSTRUMENTATION_CLIENT_LOGS_BUCKET"
value = google_storage_bucket.client-logs.name
},
# Analytics
{
name = "MIXPANEL_TOKEN"
# Note: this token is public
value = "b0ab1d66424a27555ed45a27a4fd0cd2"
},
{
name = "HUBSPOT_WORKSPACE_ID"
value = "23723443"
},
# Emails
{
name = "OUTBOUND_EMAIL_ADAPTER"
value = "Elixir.Swoosh.Adapters.Mailgun"
},
{
name = "OUTBOUND_EMAIL_FROM"
value = "notifications@firezone.dev"
},
{
name = "OUTBOUND_EMAIL_ADAPTER_OPTS"
value = jsonencode({
api_key = var.mailgun_server_api_token,
domain = local.tld
})
},
# Feature Flags
{
name = "FEATURE_FLOW_ACTIVITIES_ENABLED"
value = true
},
{
name = "FEATURE_SELF_HOSTED_RELAYS_ENABLED"
value = true
},
{
name = "FEATURE_POLICY_CONDITIONS_ENABLED"
value = true
},
{
name = "FEATURE_MULTI_SITE_RESOURCES_ENABLED"
value = true
},
{
name = "FEATURE_SIGN_UP_ENABLED"
value = true
},
{
name = "FEATURE_REST_API_ENABLED"
value = true
},
{
name = "FEATURE_INTERNET_RESOURCE_ENABLED"
value = true
},
{
name = "FEATURE_TEMP_ACCOUNTS"
value = true
}
]
}
module "domain" {
source = "../../modules/google-cloud/apps/elixir"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n4-standard-2"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
compute_boot_disk_type = "hyperdisk-balanced"
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "domain"
image_tag = local.portal_image_tag
scaling_horizontal_replicas = 2
observability_log_level = "info"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "domain"
application_version = replace(local.portal_image_tag, ".", "-")
application_ports = [
{
name = "http"
protocol = "TCP"
port = 4000
health_check = {
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 2
http_health_check = {
request_path = "/healthz"
}
}
}
]
application_environment_variables = concat([
# Background Jobs
{
name = "BACKGROUND_JOBS_ENABLED"
value = "true"
},
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
"cluster_version" = local.cluster_version
}
}
module "web" {
source = "../../modules/google-cloud/apps/elixir"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n4-standard-2"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
compute_boot_disk_type = "hyperdisk-balanced"
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "web"
image_tag = local.portal_image_tag
scaling_horizontal_replicas = 2
scaling_max_horizontal_replicas = 4
observability_log_level = "info"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "web"
application_version = replace(local.portal_image_tag, ".", "-")
application_dns_tld = "app.${local.tld}"
application_cdn_enabled = true
application_ports = [
{
name = "http"
protocol = "TCP"
port = 8080
health_check = {
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 2
http_health_check = {
request_path = "/healthz"
}
}
}
]
application_environment_variables = concat([
# Web Server
{
name = "BACKGROUND_JOBS_ENABLED"
value = "false"
}
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
"cluster_version" = local.cluster_version
}
}
module "api" {
source = "../../modules/google-cloud/apps/elixir"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n4-standard-2"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
compute_boot_disk_type = "hyperdisk-balanced"
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "api"
image_tag = local.portal_image_tag
scaling_horizontal_replicas = 2
scaling_max_horizontal_replicas = 4
observability_log_level = "info"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "api"
application_version = replace(local.portal_image_tag, ".", "-")
application_dns_tld = "api.${local.tld}"
application_ports = [
{
name = "http"
protocol = "TCP"
port = 8080
health_check = {
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
}
]
application_environment_variables = concat([
# Web Server
{
name = "BACKGROUND_JOBS_ENABLED"
value = "false"
},
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
"cluster_version" = local.cluster_version
}
application_token_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
}
## Allow API nodes to sign URLs for Google Cloud Storage
resource "google_storage_bucket_iam_member" "sign-urls" {
bucket = google_storage_bucket.client-logs.name
role = "roles/storage.objectAdmin"
member = "serviceAccount:${module.api.service_account.email}"
}
resource "google_project_iam_custom_role" "sign-urls" {
project = module.google-cloud-project.project.project_id
title = "Sign URLs for Google Cloud Storage"
role_id = "iam.sign_urls"
permissions = [
"iam.serviceAccounts.signBlob"
]
}
resource "google_project_iam_member" "sign-urls" {
project = module.google-cloud-project.project.project_id
role = "projects/${module.google-cloud-project.project.project_id}/roles/${google_project_iam_custom_role.sign-urls.role_id}"
member = "serviceAccount:${module.api.service_account.email}"
}
# Erlang Cluster
## Allow traffic between Elixir apps for Erlang clustering
resource "google_compute_firewall" "erlang-distribution" {
project = module.google-cloud-project.project.project_id
name = "erlang-distribution"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [4369, 9000]
}
allow {
protocol = "udp"
ports = [4369, 9000]
}
source_ranges = [google_compute_subnetwork.apps.ip_cidr_range]
target_tags = concat(module.web.target_tags, module.api.target_tags, module.domain.target_tags)
}
## Allow service account to list running instances
resource "google_project_iam_custom_role" "erlang-discovery" {
project = module.google-cloud-project.project.project_id
title = "Read list of Compute instances"
description = "This role is used for Erlang Cluster discovery and allows to list running instances."
role_id = "compute.list_instances"
permissions = [
"compute.instances.list",
"compute.zones.list"
]
}
resource "google_project_iam_member" "application" {
for_each = {
api = module.api.service_account.email
web = module.web.service_account.email
domain = module.domain.service_account.email
}
project = module.google-cloud-project.project.project_id
role = "projects/${module.google-cloud-project.project.project_id}/roles/${google_project_iam_custom_role.erlang-discovery.role_id}"
member = "serviceAccount:${each.value}"
}

View File

@@ -1,166 +0,0 @@
resource "google_project_service" "compute" {
project = module.google-cloud-project.project.project_id
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "servicenetworking" {
project = module.google-cloud-project.project.project_id
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}
# Create a global address that will be used for the load balancer
resource "google_compute_global_address" "tld-ipv4" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
}
# Create a SSL policy
resource "google_compute_ssl_policy" "tld" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
min_tls_version = "TLS_1_2"
profile = "RESTRICTED"
depends_on = [
google_project_service.compute,
google_project_service.servicenetworking,
]
}
# Create a managed SSL certificate
resource "google_compute_managed_ssl_certificate" "tld" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
type = "MANAGED"
managed {
domains = [
local.tld,
"docs.${local.tld}",
"blog.${local.tld}",
]
}
depends_on = [
google_project_service.compute,
google_project_service.servicenetworking,
]
}
# URL maps are used to define redirect rules for incoming requests
resource "google_compute_url_map" "redirects" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-www-redirect"
# docs.firezone.dev -> https://www.firezone.dev/docs{uri}
host_rule {
hosts = ["docs.${local.tld}"]
path_matcher = "firezone-docs-redirects"
}
path_matcher {
name = "firezone-docs-redirects"
default_url_redirect {
host_redirect = "www.firezone.dev"
prefix_redirect = "/docs"
https_redirect = true
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
strip_query = false
}
}
# blog.firezone.dev -> https://www.firezone.dev/blog{uri}
host_rule {
hosts = ["blog.${local.tld}"]
path_matcher = "firezone-blog-redirects"
}
path_matcher {
name = "firezone-blog-redirects"
default_url_redirect {
host_redirect = "www.firezone.dev"
prefix_redirect = "/blog"
https_redirect = true
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
strip_query = false
}
}
# rest of the hosts -> https://www.firezone.dev{uri}
default_url_redirect {
host_redirect = "www.${local.tld}"
https_redirect = true
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
strip_query = false
}
depends_on = [
google_project_service.compute,
google_project_service.servicenetworking,
]
}
# HTTP(s) proxies are used to route requests to the appropriate URL maps
resource "google_compute_target_http_proxy" "tld" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-http"
url_map = google_compute_url_map.redirects.self_link
}
resource "google_compute_target_https_proxy" "tld" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-https"
url_map = google_compute_url_map.redirects.self_link
ssl_policy = google_compute_ssl_policy.tld.self_link
ssl_certificates = [
google_compute_managed_ssl_certificate.tld.self_link,
]
quic_override = "NONE"
}
# Forwarding rules are used to route incoming requests to the appropriate proxies
resource "google_compute_global_forwarding_rule" "http" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
labels = {
managed_by = "terraform"
}
target = google_compute_target_http_proxy.tld.self_link
ip_address = google_compute_global_address.tld-ipv4.address
port_range = "80"
load_balancing_scheme = "EXTERNAL"
}
resource "google_compute_global_forwarding_rule" "https" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-https"
labels = {
managed_by = "terraform"
}
target = google_compute_target_https_proxy.tld.self_link
ip_address = google_compute_global_address.tld-ipv4.address
port_range = "443"
load_balancing_scheme = "EXTERNAL"
}

View File

@@ -1,422 +0,0 @@
locals {
subnet_ip_cidr_ranges = {
"africa-south1" = "10.240.0.0/24",
"asia-east1" = "10.240.1.0/24",
"asia-east2" = "10.240.2.0/24",
"asia-northeast1" = "10.240.3.0/24",
"asia-northeast2" = "10.240.4.0/24",
"asia-northeast3" = "10.240.5.0/24",
"asia-south1" = "10.240.6.0/24",
"asia-south2" = "10.240.7.0/24",
"asia-southeast1" = "10.240.8.0/24",
"asia-southeast2" = "10.240.9.0/24",
"australia-southeast1" = "10.240.10.0/24",
"australia-southeast2" = "10.240.11.0/24",
"europe-central2" = "10.240.12.0/24",
"europe-north1" = "10.240.13.0/24",
"europe-southwest1" = "10.240.14.0/24",
"europe-west1" = "10.240.15.0/24",
"europe-west2" = "10.240.16.0/24",
"europe-west3" = "10.240.17.0/24",
"europe-west4" = "10.240.18.0/24",
"europe-west6" = "10.240.19.0/24",
"europe-west8" = "10.240.20.0/24",
"europe-west9" = "10.240.21.0/24",
"europe-west10" = "10.240.22.0/24",
"europe-west12" = "10.240.23.0/24",
"me-central1" = "10.240.24.0/24",
"me-west1" = "10.240.25.0/24",
"northamerica-northeast1" = "10.240.26.0/24",
"northamerica-northeast2" = "10.240.27.0/24",
"northamerica-south1" = "10.240.28.0/24",
"southamerica-east1" = "10.240.29.0/24",
"southamerica-west1" = "10.240.30.0/24",
"us-central1" = "10.240.31.0/24",
"us-east1" = "10.240.32.0/24",
"us-east4" = "10.240.33.0/24",
"us-east5" = "10.240.34.0/24",
"us-south1" = "10.240.35.0/24",
"us-west1" = "10.240.36.0/24",
"us-west2" = "10.240.37.0/24",
"us-west3" = "10.240.38.0/24",
"us-west4" = "10.240.39.0/24"
}
}
# GCP requires networks and subnets to have globally unique names.
# This causes an issue if their configuration changes because we
# use create_before_destroy to avoid downtime on deploys.
#
# To work around this, we use a random suffix in the name and rotate
# it whenever the subnet IP CIDR ranges change. It's not a perfect
# solution, but it should cover most cases.
resource "random_string" "naming_suffix" {
length = 8
special = false
upper = false
keepers = {
# must be a string
subnet_ip_cidr_ranges = jsonencode(local.subnet_ip_cidr_ranges)
}
}
# Create networks
resource "google_compute_network" "network" {
project = module.google-cloud-project.project.project_id
name = "relays-network-${random_string.naming_suffix.result}"
routing_mode = "GLOBAL"
auto_create_subnetworks = false
depends_on = [
google_project_service.compute
]
}
resource "google_compute_subnetwork" "subnetwork" {
for_each = local.subnet_ip_cidr_ranges
project = module.google-cloud-project.project.project_id
name = "relays-subnet-${each.key}-${random_string.naming_suffix.result}"
region = each.key
network = google_compute_network.network.self_link
log_config {
aggregation_interval = "INTERVAL_10_MIN"
metadata = "INCLUDE_ALL_METADATA"
}
stack_type = "IPV4_IPV6"
# Sequentially numbered /24s given an offset
ip_cidr_range = each.value
ipv6_access_type = "EXTERNAL"
private_ip_google_access = true
}
module "relays" {
count = var.relay_token != null ? 1 : 0
source = "../../modules/google-cloud/apps/relay"
project_id = module.google-cloud-project.project.project_id
# Remember to update the following published documentation when this changes:
# - /website/src/app/kb/deploy/gateways/readme.mdx
# - /website/src/app/kb/architecture/tech-stack/readme.mdx
instances = {
"africa-south1" = {
subnet = google_compute_subnetwork.subnetwork["africa-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["africa-south1-a"]
}
"asia-east1" = {
subnet = google_compute_subnetwork.subnetwork["asia-east1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-east1-a"]
}
"asia-east2" = {
subnet = google_compute_subnetwork.subnetwork["asia-east2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-east2-a"]
}
"asia-northeast1" = {
subnet = google_compute_subnetwork.subnetwork["asia-northeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-northeast1-a"]
}
"asia-northeast2" = {
subnet = google_compute_subnetwork.subnetwork["asia-northeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-northeast2-a"]
}
"asia-northeast3" = {
subnet = google_compute_subnetwork.subnetwork["asia-northeast3"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-northeast3-a"]
}
"asia-south1" = {
subnet = google_compute_subnetwork.subnetwork["asia-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-south1-a"]
}
"asia-south2" = {
subnet = google_compute_subnetwork.subnetwork["asia-south2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-south2-a"]
}
"asia-southeast1" = {
subnet = google_compute_subnetwork.subnetwork["asia-southeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-southeast1-a"]
}
"asia-southeast2" = {
subnet = google_compute_subnetwork.subnetwork["asia-southeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-southeast2-a"]
}
"australia-southeast1" = {
subnet = google_compute_subnetwork.subnetwork["australia-southeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["australia-southeast1-a"]
}
"australia-southeast2" = {
subnet = google_compute_subnetwork.subnetwork["australia-southeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["australia-southeast2-a"]
}
"europe-central2" = {
subnet = google_compute_subnetwork.subnetwork["europe-central2"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-central2-a"]
}
"europe-north1" = {
subnet = google_compute_subnetwork.subnetwork["europe-north1"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-north1-a"]
}
"europe-southwest1" = {
subnet = google_compute_subnetwork.subnetwork["europe-southwest1"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-southwest1-a"]
}
"europe-west1" = {
subnet = google_compute_subnetwork.subnetwork["europe-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west1-b"]
}
"europe-west2" = {
subnet = google_compute_subnetwork.subnetwork["europe-west2"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west2-a"]
}
"europe-west3" = {
subnet = google_compute_subnetwork.subnetwork["europe-west3"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west3-a"]
}
"europe-west4" = {
subnet = google_compute_subnetwork.subnetwork["europe-west4"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west4-a"]
}
"europe-west6" = {
subnet = google_compute_subnetwork.subnetwork["europe-west6"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west6-a"]
}
"europe-west8" = {
subnet = google_compute_subnetwork.subnetwork["europe-west8"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west8-a"]
}
"europe-west9" = {
subnet = google_compute_subnetwork.subnetwork["europe-west9"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west9-a"]
}
"europe-west10" = {
subnet = google_compute_subnetwork.subnetwork["europe-west10"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west10-a"]
}
"europe-west12" = {
subnet = google_compute_subnetwork.subnetwork["europe-west12"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west12-a"]
}
"me-central1" = {
subnet = google_compute_subnetwork.subnetwork["me-central1"].self_link
type = "e2-micro"
replicas = 1
zones = ["me-central1-a"]
}
# Fails with:
# Access to the region is unavailable. Please contact our sales team at https://cloud.google.com/contact for further assistance."
# "me-central2" = {
# type = "e2-micro"
# replicas = 1
# zones = ["me-central2-a"]
# }
"me-west1" = {
subnet = google_compute_subnetwork.subnetwork["me-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["me-west1-a"]
}
"northamerica-northeast1" = {
subnet = google_compute_subnetwork.subnetwork["northamerica-northeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["northamerica-northeast1-a"]
}
"northamerica-northeast2" = {
subnet = google_compute_subnetwork.subnetwork["northamerica-northeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["northamerica-northeast2-a"]
}
"northamerica-south1" = {
subnet = google_compute_subnetwork.subnetwork["northamerica-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["northamerica-south1-a"]
}
"southamerica-east1" = {
subnet = google_compute_subnetwork.subnetwork["southamerica-east1"].self_link
type = "e2-micro"
replicas = 1
zones = ["southamerica-east1-a"]
}
"southamerica-west1" = {
subnet = google_compute_subnetwork.subnetwork["southamerica-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["southamerica-west1-a"]
}
"us-central1" = {
subnet = google_compute_subnetwork.subnetwork["us-central1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-central1-a"]
}
"us-east1" = {
subnet = google_compute_subnetwork.subnetwork["us-east1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-east1-b"]
}
"us-east4" = {
subnet = google_compute_subnetwork.subnetwork["us-east4"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-east4-a"]
}
"us-east5" = {
subnet = google_compute_subnetwork.subnetwork["us-east5"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-east5-a"]
}
"us-south1" = {
subnet = google_compute_subnetwork.subnetwork["us-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-south1-a"]
}
"us-west1" = {
subnet = google_compute_subnetwork.subnetwork["us-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west1-a"]
}
"us-west2" = {
subnet = google_compute_subnetwork.subnetwork["us-west2"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west2-a"]
}
"us-west3" = {
subnet = google_compute_subnetwork.subnetwork["us-west3"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west3-a"]
}
"us-west4" = {
subnet = google_compute_subnetwork.subnetwork["us-west4"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west4-a"]
}
}
network = google_compute_network.network.self_link
naming_suffix = random_string.naming_suffix.result
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "relay"
image_tag = local.relay_image_tag
observability_log_level = "info,hyper=off,h2=warn,tower=warn"
application_name = "relay"
application_version = replace(local.relay_image_tag, ".", "-")
application_environment_variables = [
{
name = "FIREZONE_TELEMETRY"
value = "true"
}
]
health_check = {
name = "health"
protocol = "TCP"
port = 8080
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
api_url = "wss://api.${local.tld}"
token = var.relay_token
}
# Trigger an alert when there is at least one region without a healthy relay
resource "google_monitoring_alert_policy" "connected_relays_count" {
project = module.google-cloud-project.project.project_id
display_name = "Relays are down"
combiner = "OR"
notification_channels = module.ops.notification_channels
conditions {
display_name = "Relay Instances"
condition_threshold {
filter = "resource.type = \"gce_instance\" AND metric.type = \"custom.googleapis.com/elixir/domain/relays/online_relays_count/last_value\""
comparison = "COMPARISON_LT"
# at least one relay per region must be always online
threshold_value = length(module.relays[0].instances)
duration = "0s"
trigger {
count = 1
}
aggregations {
alignment_period = "60s"
cross_series_reducer = "REDUCE_MAX"
per_series_aligner = "ALIGN_MEAN"
}
}
}
alert_strategy {
auto_close = "172800s"
}
}

View File

@@ -1,99 +0,0 @@
variable "image_tag" {
type = string
description = "Image tag for all services. Notice: we assume all services are deployed with the same version"
}
variable "metabase_image_tag" {
type = string
default = "v0.47.6"
}
variable "relay_token" {
type = string
default = null
sensitive = true
}
variable "gateway_token" {
type = string
default = null
sensitive = true
}
variable "slack_alerts_channel" {
type = string
description = "Slack channel which will receive monitoring alerts"
default = "#feed-production"
}
variable "slack_alerts_auth_token" {
type = string
description = "Slack auth token for the infra alerts channel"
sensitive = true
}
variable "postmark_server_api_token" {
type = string
sensitive = true
}
variable "mailgun_server_api_token" {
type = string
sensitive = true
}
variable "pagerduty_auth_token" {
type = string
sensitive = true
}
variable "stripe_secret_key" {
type = string
sensitive = true
}
variable "stripe_webhook_signing_secret" {
type = string
sensitive = true
}
variable "stripe_default_price_id" {
type = string
}
variable "workos_api_key" {
type = string
sensitive = true
}
variable "workos_client_id" {
type = string
sensitive = true
}
variable "workos_base_url" {
type = string
}
# Version overrides
#
# This section should be used to bind a specific version of the Firezone component
# (eg. during rollback) to ensure it's not replaced by a new one until a manual action
#
# To update them go to Terraform Cloud and change/delete the following variables,
# if they are unset `var.image_tag` will be used.
variable "relay_image_tag" {
type = string
default = null
}
variable "gateway_image_tag" {
type = string
default = null
}
variable "portal_image_tag" {
type = string
default = null
}

View File

@@ -1,35 +0,0 @@
terraform {
required_version = "~> 1.10.0"
required_providers {
random = {
source = "hashicorp/random"
version = "~> 3.6"
}
null = {
source = "hashicorp/null"
version = "~> 3.2"
}
google = {
source = "hashicorp/google"
version = "~> 6.10"
}
google-beta = {
source = "hashicorp/google-beta"
version = "~> 6.10"
}
tls = {
source = "hashicorp/tls"
version = "~> 4.0"
}
postgresql = {
source = "cyrilgdn/postgresql"
version = "1.25.0"
}
}
}

View File

@@ -1,76 +0,0 @@
resource "google_monitoring_uptime_check_config" "website-https" {
project = module.google-cloud-project.project.project_id
display_name = "website-https"
timeout = "60s"
http_check {
port = "443"
use_ssl = true
validate_ssl = true
request_method = "GET"
path = "/"
accepted_response_status_codes {
status_class = "STATUS_CLASS_2XX"
}
}
monitored_resource {
type = "uptime_url"
labels = {
project_id = module.google-cloud-project.project.project_id
host = local.tld
}
}
content_matchers {
matcher = "CONTAINS_STRING"
content = "firezone"
}
checker_type = "STATIC_IP_CHECKERS"
}
resource "google_monitoring_alert_policy" "website-downtime" {
project = module.google-cloud-project.project.project_id
display_name = "Website is DOWN!"
combiner = "OR"
notification_channels = module.ops.notification_channels
conditions {
display_name = "Uptime Health Check on website-https"
condition_threshold {
filter = "resource.type = \"uptime_url\" AND metric.type = \"monitoring.googleapis.com/uptime_check/check_passed\" AND metric.labels.check_id = \"${reverse(split("/", google_monitoring_uptime_check_config.website-https.id))[0]}\""
comparison = "COMPARISON_GT"
threshold_value = 1
duration = "0s"
trigger {
count = 1
}
aggregations {
alignment_period = "60s"
cross_series_reducer = "REDUCE_COUNT_FALSE"
per_series_aligner = "ALIGN_NEXT_OLDER"
group_by_fields = [
"resource.label.project_id",
"resource.label.host"
]
}
}
}
alert_strategy {
auto_close = "28800s"
}
}

View File

@@ -1,147 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/cyrilgdn/postgresql" {
version = "1.25.0"
constraints = "1.25.0"
hashes = [
"h1:S0Nv3pAGngEqGAniq2y1aINUL9IqftERPNNXJiHCTC8=",
"zh:0f9db6e1274603d642e96b58eaf6cc4223f7118f2d7ce909dc4812d332cc002a",
"zh:1819470f0304c6a60b2b51817cb43f6ff59a49e08cc9e50644b86b3a76c91601",
"zh:27bfb544983cac101a7c7c2e4cb9939a712dffcdd7ddcab83c2f8afc334e33c5",
"zh:46166f6f05771b0495df18459fdf3a63fae8b38e95a1b2754f03d006e17ea33d",
"zh:64d53afc52f26e8214990acc3e07f3b47bef628aa6b317595a8faec05b252209",
"zh:944d7ded418c022dd3ee513246677d601376fa38d76c9c4aecff2c2eefcaa35b",
"zh:9819551b61542a6d322d6a323bbb552ce02e769ce2222fd9bb1935473c7c4b3c",
"zh:c38bd73e208fe216efab48d099c85b8ad1e51ff102b3892443febc9778e7236e",
"zh:c73de133274dcc7a03e95f598550facc59315538f355e57e14b36e222b298826",
"zh:c7af02f5338bfe7f1976e01d3fcf82e05b3551893e732539a84c568d25571a84",
"zh:d1aa3d7432c7de883873f8f70e9a6207c7b536d874486d37aee0ca8c8853a890",
"zh:e17e9809fc7cc2d6f89078b8bfe6308930117b2270be8081820da40029b04828",
"zh:e1b21b7b7022e0d468d72f4534d226d57a7bfd8c96a4c7dc2c2fa0bb0b99298d",
"zh:f24b73645d8bc225f692bdf9c035411099ef57138569f45f3605ec79ac872e3b",
]
}
provider "registry.terraform.io/hashicorp/aws" {
version = "5.86.1"
constraints = ">= 3.29.0, >= 5.79.0"
hashes = [
"h1:IekGV22ML8NcKlhaAceeWdHdXAWfFLJYaslIEkpMHps=",
"zh:0c5901c55f9bc0d353c48aa29e08d7152055dd296f3b60e1fe1634af8a7d32e4",
"zh:26ddfc89d2a410492e31f1014bbf5388f871cb67d01e80255bde3e22a468e8a6",
"zh:380c57474796e680c4477c4a69810db9389ce2717ff2da8d0f06716247dd1295",
"zh:53bf6f567be4348ddd566792fccddd9db6104111e619aa4042afb594b9a5cc75",
"zh:575c41544fd4ac969d59ecdff66428583c228a20a4893d238414e932bb2f2dc0",
"zh:63d9473a2f55f4941e98cb2fcc7031b4266c1cdc40a8f96d52b7d29504984da3",
"zh:6ec72fbc68f608a4e947a0b1356b14791330a425b7ebd3125e8023693bb37ec8",
"zh:729a0853f9ca42b60993d6233b80e1fea52cc5c9401693cef83ade502f51e3e8",
"zh:750eda82a9bde02a999677cdeb1e6d69b0d7af783e8d629c813da9be3ee6d493",
"zh:90f70d5b31bdae6b7f3aee9b2b618168a32f434eb976b935d907c95271e7e692",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
"zh:9cbf0464984b19a5a9027e8b03ebf1b56761c73f97171013b29f2d525ba91587",
"zh:aec08a2374a5cdaac3df3d6a39d98aaf58a3e0a500259b791a2dc5693280bc4b",
"zh:b638d8bd8ad11f14f7811696edcf744df07ea0f5c6033f59f3b325f921b7f54c",
"zh:bb862a4d11da06fff7c04978769cd100547bbf4735f64bfe2374b289e41a5147",
]
}
provider "registry.terraform.io/hashicorp/google" {
version = "6.20.0"
constraints = "~> 6.9"
hashes = [
"h1:Wo8fqu73OOylxVykdz593VVuWntAaHQMtRkh4eKow60=",
"zh:3dbdc8b3c7d9ef13ec3f69607232f23d29ed85df871d017a9a2338427dfae19b",
"zh:4ac9a128c3a957e22100eb3f813f62ffa0ab47d2e1ac0c9a1370779bafcd5fac",
"zh:4be504e8267709295708b12aa1ab9fbc93dbb801600347cfd82a27a204c3e04c",
"zh:5c65452be9ec6ccf728fc122616dff284d3497591736228d7571222511926ede",
"zh:76eca544918f9ed4bcf62178644216ef27163d6c763b11ee161fa654f7794d8a",
"zh:7c142461e3059b709cb84dae9a135c72769c127986eede40516664ea25ff18d5",
"zh:9636dcbf914d47e54ffdcf3a27c6dbbba63ea83e690fa8bf640bdf0421f352be",
"zh:a307597b41e08281120e182aa58cb77490fd3fddeb4131daa967025e039aad9b",
"zh:b6087dbaf762f4848449910476f22571712f22b7108c68ab2c173a8fff185518",
"zh:d0ff076ad9d2c5b85b24d39b104a2ac255588aa1ea6a2eaf5344445de81c8006",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
"zh:fb727a1cc6f2238059aa61baa84658dc5c348823d796f72921d030ff35a2723c",
]
}
provider "registry.terraform.io/hashicorp/google-beta" {
version = "6.20.0"
constraints = "~> 6.9"
hashes = [
"h1:AgnC4F95gu1clyMeCqTg2H8oqtlI2cARqoc5C0d0DcM=",
"zh:0500efa8db90436f3d0dc86f40e3c351249545a3d5927a9df6b8248d25214061",
"zh:18fdf86beb5fc9fce0a7c053d5629c230a3795d07327f0241a2d4bac90d7366e",
"zh:1a9a378679ac913b2c66197a6194250f2a601ea3c42829127a020814470b6866",
"zh:24f2bccd99cc419ac8f72064f3e3cca93903b89b98b26f6e1bb41cf944f2bf53",
"zh:44835cae5a5eae707330985423284191c34043ec2cf5a7a92e0370e2960f7541",
"zh:48a2196edc1a88a31c1ce5dec1f084b8821f1d41d9fbf83c38738f6c1b5e27d8",
"zh:592317cec2a061bcfd538c4ca7d4b5733c47d70a378086287732f4c6dc8d8f25",
"zh:8f6743bf5ae0754e4ea5c6cb4f40114a24e2eb3823b332fe0cb8fa9e777bcd2c",
"zh:a54164bc670c658efb3183a24785e383b1c516dc82b374103a3b587a9c3d9d80",
"zh:b5ab872bb770de30b1a4e0e5b396f14aed333541c7373bb71ced38d06666bddd",
"zh:d7a2c540a21eeb8ac933736e795fccc7592e7bcf69b48cdf79fd47240f977383",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/null" {
version = "3.2.3"
constraints = "~> 3.2"
hashes = [
"h1:I0Um8UkrMUb81Fxq/dxbr3HLP2cecTH2WMJiwKSrwQY=",
"zh:22d062e5278d872fe7aed834f5577ba0a5afe34a3bdac2b81f828d8d3e6706d2",
"zh:23dead00493ad863729495dc212fd6c29b8293e707b055ce5ba21ee453ce552d",
"zh:28299accf21763ca1ca144d8f660688d7c2ad0b105b7202554ca60b02a3856d3",
"zh:55c9e8a9ac25a7652df8c51a8a9a422bd67d784061b1de2dc9fe6c3cb4e77f2f",
"zh:756586535d11698a216291c06b9ed8a5cc6a4ec43eee1ee09ecd5c6a9e297ac1",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9d5eea62fdb587eeb96a8c4d782459f4e6b73baeece4d04b4a40e44faaee9301",
"zh:a6355f596a3fb8fc85c2fb054ab14e722991533f87f928e7169a486462c74670",
"zh:b5a65a789cff4ada58a5baffc76cb9767dc26ec6b45c00d2ec8b1b027f6db4ed",
"zh:db5ab669cf11d0e9f81dc380a6fdfcac437aea3d69109c7aef1a5426639d2d65",
"zh:de655d251c470197bcbb5ac45d289595295acb8f829f6c781d4a75c8c8b7c7dd",
"zh:f5c68199f2e6076bce92a12230434782bf768103a427e9bb9abee99b116af7b5",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.3"
constraints = "~> 3.6"
hashes = [
"h1:zG9uFP8l9u+yGZZvi5Te7PV62j50azpgwPunq2vTm1E=",
"zh:04ceb65210251339f07cd4611885d242cd4d0c7306e86dda9785396807c00451",
"zh:448f56199f3e99ff75d5c0afacae867ee795e4dfda6cb5f8e3b2a72ec3583dd8",
"zh:4b4c11ccfba7319e901df2dac836b1ae8f12185e37249e8d870ee10bb87a13fe",
"zh:4fa45c44c0de582c2edb8a2e054f55124520c16a39b2dfc0355929063b6395b1",
"zh:588508280501a06259e023b0695f6a18149a3816d259655c424d068982cbdd36",
"zh:737c4d99a87d2a4d1ac0a54a73d2cb62974ccb2edbd234f333abd079a32ebc9e",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:a357ab512e5ebc6d1fda1382503109766e21bbfdfaa9ccda43d313c122069b30",
"zh:c51bfb15e7d52cc1a2eaec2a903ac2aff15d162c172b1b4c17675190e8147615",
"zh:e0951ee6fa9df90433728b96381fb867e3db98f66f735e0c3e24f8f16903f0ad",
"zh:e3cdcb4e73740621dabd82ee6a37d6cfce7fee2a03d8074df65086760f5cf556",
"zh:eff58323099f1bd9a0bec7cb04f717e7f1b2774c7d612bf7581797e1622613a0",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.6"
constraints = "~> 4.0"
hashes = [
"h1:n3M50qfWfRSpQV9Pwcvuse03pEizqrmYEryxKky4so4=",
"zh:10de0d8af02f2e578101688fd334da3849f56ea91b0d9bd5b1f7a243417fdda8",
"zh:37fc01f8b2bc9d5b055dc3e78bfd1beb7c42cfb776a4c81106e19c8911366297",
"zh:4578ca03d1dd0b7f572d96bd03f744be24c726bfd282173d54b100fd221608bb",
"zh:6c475491d1250050765a91a493ef330adc24689e8837a0f07da5a0e1269e11c1",
"zh:81bde94d53cdababa5b376bbc6947668be4c45ab655de7aa2e8e4736dfd52509",
"zh:abdce260840b7b050c4e401d4f75c7a199fafe58a8b213947a258f75ac18b3e8",
"zh:b754cebfc5184873840f16a642a7c9ef78c34dc246a8ae29e056c79939963c7a",
"zh:c928b66086078f9917aef0eec15982f2e337914c5c4dbc31dd4741403db7eb18",
"zh:cded27bee5f24de6f2ee0cfd1df46a7f88e84aaffc2ecbf3ff7094160f193d50",
"zh:d65eb3867e8f69aaf1b8bb53bd637c99c6b649ba3db16ded50fa9a01076d1a27",
"zh:ecb0c8b528c7a619fa71852bb3fb5c151d47576c5aab2bf3af4db52588722eeb",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

View File

@@ -1,42 +0,0 @@
# Staging environment
This directory houses the Firezone staging environment.
## SSH access to the staging Gateway on AWS
1. [Create a new AWS Access Key and Secret Key in the AWS IAM console.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey)
1. [Install the aws CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)
and then run `aws configure` to set up your credentials. Choose `us-east-1`
as the default region.
1. SSH to the Gateway using instance connect:
```
aws ec2-instance-connect ssh --instance-id \
$(aws ec2 describe-instances --filters "Name=tag:Name,Values=gateway - staging" "Name=instance-state-name,Values=running" --query "Reservations[*].Instances[*].InstanceId" --output text) \
--os-user ubuntu --connection-type eice
```
## Set NAT type on AWS NAT gateway VM
Note: The NAT gateway VM will default to using a non-symmetric NAT when deployed or restarted.
### Enable Symmetric NAT
1. SSH in to the NAT gateway VM using the instructions above by replacing `gateway` with `nat`
1. Run the following:
```
sudo iptables -t nat -F && sudo iptables -t nat -A POSTROUTING -o ens5 -j MASQUERADE --random
```
### Enable Non-Symmetric NAT
1. SSH in to the NAT gateway VM using the instructions above by replacing `gateway` with `nat`
1. Run the following:
```
sudo iptables -t nat -F && sudo iptables -t nat -A POSTROUTING -o ens5 -j MASQUERADE
```
## View gateway logs
1. SSH into the Gateway instance as per the above instructions.
1. Run `sudo journalctl -u gateway.service --no-hostname --output cat`.
This will give you coloured logs without additional clutter like duplicate timestamps or hostnames.

View File

@@ -1,278 +0,0 @@
provider "aws" {
region = local.aws_region
}
locals {
aws_region = "us-east-1"
environment = "staging"
vpc_name = "Staging"
vpc_cidr = "10.0.0.0/16"
num_azs = 2
azs = slice(data.aws_availability_zones.available.names, 0, local.num_azs)
ssh_keypair_name = "fz-staging"
tags = {
Terraform = true
Environment = local.environment
}
}
################################################################################
# Networking
################################################################################
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
name = local.vpc_name
cidr = local.vpc_cidr
azs = local.azs
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k + local.num_azs)]
enable_ipv6 = true
public_subnet_assign_ipv6_address_on_creation = true
private_subnet_assign_ipv6_address_on_creation = true
private_subnet_enable_dns64 = false # DNS64 without a NAT64 gateway breaks IPv4-only resources
public_subnet_ipv6_prefixes = [0, 1]
private_subnet_ipv6_prefixes = [2, 3]
tags = local.tags
}
resource "aws_route" "private_nat_instance" {
count = local.num_azs
route_table_id = element(module.vpc.private_route_table_ids, count.index)
destination_cidr_block = "0.0.0.0/0"
network_interface_id = module.aws_nat.primary_network_interface_id
timeouts {
create = "5m"
}
}
################################################################################
# EC2 Instance Connect Endpoint
################################################################################
resource "aws_ec2_instance_connect_endpoint" "this" {
subnet_id = module.vpc.public_subnets[0]
preserve_client_ip = false
security_group_ids = [
module.sg_allow_vpc_egress.security_group_id
]
tags = merge(
local.tags,
{
Name = "staging-ec2-instance-connect"
}
)
}
################################################################################
# Compute
################################################################################
module "aws_nat" {
source = "../../modules/aws/nat"
ami = data.aws_ami.ubuntu.id
name = "nat - ${local.environment}"
associate_public_ip_address = true
instance_type = "t3.micro"
key_name = aws_key_pair.staging.id
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [
module.sg_allow_all_egress.security_group_id,
module.sg_allow_subnet_ingress.security_group_id
]
tags = local.tags
}
module "aws_httpbin" {
source = "../../modules/aws/httpbin"
ami = data.aws_ami.ubuntu.id
name = "httpbin - ${local.environment}"
associate_public_ip_address = false
instance_type = "t3.micro"
key_name = aws_key_pair.staging.id
subnet_id = element(module.vpc.private_subnets, 0)
private_ip = cidrhost(element(module.vpc.private_subnets_cidr_blocks, 0), 100)
vpc_security_group_ids = [
module.sg_allow_all_egress.security_group_id,
module.sg_allow_subnet_ingress.security_group_id
]
tags = local.tags
}
module "aws_iperf" {
source = "../../modules/aws/iperf"
ami = data.aws_ami.ubuntu.id
name = "iperf - ${local.environment}"
associate_public_ip_address = false
instance_type = "t3.micro"
key_name = aws_key_pair.staging.id
subnet_id = element(module.vpc.private_subnets, 0)
private_ip = cidrhost(element(module.vpc.private_subnets_cidr_blocks, 0), 101)
vpc_security_group_ids = [
module.sg_allow_all_egress.security_group_id,
module.sg_allow_subnet_ingress.security_group_id
]
tags = local.tags
}
module "aws_gateway" {
source = "../../modules/aws/gateway"
ami = data.aws_ami.ubuntu.id
name = "gateway - ${local.environment}"
associate_public_ip_address = false
instance_type = "t3.micro"
key_name = aws_key_pair.staging.id
subnet_id = element(module.vpc.private_subnets, 0)
private_ip = cidrhost(element(module.vpc.private_subnets_cidr_blocks, 0), 50)
vpc_security_group_ids = [
module.sg_allow_all_egress.security_group_id,
module.sg_allow_subnet_ingress.security_group_id
]
# Gateway specific vars
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "gateway"
image_tag = var.image_tag
observability_log_level = "wire::api=trace,phoenix_channel=debug,firezone_gateway=debug,boringtun=debug,snownet=debug,str0m=info,connlib_gateway_shared=debug,firezone_tunnel=debug,connlib_shared=debug,warn"
application_name = "gateway"
application_version = replace(var.image_tag, ".", "-")
api_url = "wss://api.${local.tld}"
token = var.aws_gateway_token
tags = local.tags
}
module "aws_coredns" {
source = "../../modules/aws/coredns"
ami = data.aws_ami.ubuntu.id
name = "coredns - ${local.environment}"
associate_public_ip_address = false
instance_type = "t3.micro"
key_name = aws_key_pair.staging.id
subnet_id = element(module.vpc.private_subnets, 0)
private_ip = cidrhost(element(module.vpc.private_subnets_cidr_blocks, 0), 10)
application_name = "coredns"
dns_records = [
{
name = "gateway",
value = module.aws_gateway.private_ip
},
{
name = "httpbin",
value = module.aws_httpbin.private_ip
},
{
name = "iperf",
value = module.aws_iperf.private_ip
},
]
vpc_security_group_ids = [
module.sg_allow_all_egress.security_group_id,
module.sg_allow_subnet_ingress.security_group_id
]
tags = local.tags
}
################################################################################
# Security Groups
################################################################################
module "sg_allow_all_egress" {
source = "terraform-aws-modules/security-group/aws"
name = "allow all egress"
description = "Security group to allow all egress"
vpc_id = module.vpc.vpc_id
egress_with_cidr_blocks = [
{
rule = "all-all"
cidr_blocks = "0.0.0.0/0"
},
]
egress_with_ipv6_cidr_blocks = [
{
rule = "all-all"
ipv6_cidr_blocks = "::/0"
},
]
}
module "sg_allow_vpc_egress" {
source = "terraform-aws-modules/security-group/aws"
name = "allow egress to all vpc subnets"
description = "Security group to egress to all vpc subnets. Created for use with EC2 Instance Connect Endpoint."
vpc_id = module.vpc.vpc_id
egress_with_cidr_blocks = [
{
rule = "all-all"
cidr_blocks = local.vpc_cidr
},
]
}
module "sg_allow_subnet_ingress" {
source = "terraform-aws-modules/security-group/aws"
name = "allow ingress from subnet"
description = "Security group to allow all ingress from other machines on the subnet"
vpc_id = module.vpc.vpc_id
ingress_with_cidr_blocks = [
{
rule = "all-all"
cidr_blocks = join(",", module.vpc.public_subnets_cidr_blocks)
},
{
rule = "all-all",
cidr_blocks = join(",", module.vpc.private_subnets_cidr_blocks)
}
]
}
################################################################################
# SSH Keys
################################################################################
resource "aws_key_pair" "staging" {
key_name = "fz-staging"
public_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBI0vUtLcJqkqIK7xRgfu68fLnP+x7r+W4Bs2bCUxq8F fz-staging-aws"
tags = local.tags
}

View File

@@ -1,17 +0,0 @@
data "aws_availability_zones" "available" {}
data "aws_ami" "ubuntu" {
most_recent = true
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["099720109477"] # Canonical as shown here: https://ubuntu.com/server/docs/cloud-images/amazon-ec2
}

View File

@@ -1,195 +0,0 @@
# Deploy our Metabase instance
locals {
metabase_region = local.region
metabase_zone = local.availability_zone
}
resource "random_password" "metabase_db_password" {
length = 16
min_lower = 1
min_upper = 1
min_numeric = 1
min_special = 1
}
resource "google_sql_user" "metabase" {
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
name = "metabase"
password = random_password.metabase_db_password.result
}
resource "google_sql_database" "metabase" {
project = module.google-cloud-project.project.project_id
name = "metabase"
instance = module.google-cloud-sql.master_instance_name
}
resource "postgresql_grant" "grant_select_on_all_tables_schema_to_metabase" {
database = google_sql_database.firezone.name
privileges = ["SELECT"]
objects = [] # ALL
object_type = "table"
schema = "public"
role = google_sql_user.metabase.name
depends_on = [
google_sql_user.metabase
]
}
resource "postgresql_grant" "grant_execute_on_all_functions_schema_to_metabase" {
database = google_sql_database.firezone.name
privileges = ["EXECUTE"]
objects = [] # ALL
object_type = "function"
schema = "public"
role = google_sql_user.metabase.name
depends_on = [
google_sql_user.metabase
]
}
module "metabase" {
source = "../../modules/google-cloud/apps/metabase"
project_id = module.google-cloud-project.project.project_id
compute_network = module.google-cloud-vpc.id
compute_subnetwork = google_compute_subnetwork.apps.self_link
compute_instance_type = "n1-standard-1"
compute_region = local.metabase_region
compute_instance_availability_zone = local.metabase_zone
image_repo = "metabase"
image = "metabase"
image_tag = var.metabase_image_tag
application_name = "metabase"
application_version = replace(replace(var.metabase_image_tag, ".", "-"), "v", "")
application_environment_variables = [
{
name = "MB_DB_TYPE"
value = "postgres"
},
{
name = "MB_DB_TYPE"
value = "postgres"
},
{
name = "MB_DB_DBNAME"
value = google_sql_database.metabase.name
},
{
name = "MB_DB_PORT"
value = "5432"
},
{
name = "MB_DB_USER"
value = google_sql_user.metabase.name
},
{
name = "MB_DB_PASS"
value = random_password.metabase_db_password.result
},
{
name = "MB_DB_HOST"
value = module.google-cloud-sql.bi_instance_ip_address
},
{
name = "MB_SITE_NAME"
value = module.google-cloud-project.project.project_id
},
{
name = "MB_ANON_TRACKING_ENABLED"
value = "false"
},
# {
# name = "MB_JETTY_PORT"
# value = "80"
# }
]
health_check = {
name = "health"
protocol = "TCP"
port = 3000
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
}
# Allow outbound traffic
resource "google_compute_firewall" "egress-ipv4" {
project = module.google-cloud-project.project.project_id
name = "metabase-egress-ipv4"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.metabase.target_tags
destination_ranges = ["0.0.0.0/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "egress-ipv6" {
project = module.google-cloud-project.project.project_id
name = "metabase-egress-ipv6"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.metabase.target_tags
destination_ranges = ["::/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "metabase-ssh-ipv4" {
project = module.google-cloud-project.project.project_id
name = "metabase-ssh-ipv4"
network = module.google-cloud-vpc.id
allow {
protocol = "tcp"
ports = [22]
}
allow {
protocol = "udp"
ports = [22]
}
allow {
protocol = "sctp"
ports = [22]
}
# Only allows connections using IAP
source_ranges = local.iap_ipv4_ranges
target_tags = module.metabase.target_tags
}

View File

@@ -1,164 +0,0 @@
# Bucket where CI stores binary artifacts (eg. gateway or client)
resource "google_storage_bucket" "firezone-binary-artifacts" {
project = module.google-cloud-project.project.project_id
name = "${module.google-cloud-project.project.project_id}-artifacts"
location = "US"
lifecycle_rule {
condition {
age = 365
}
action {
type = "Delete"
}
}
lifecycle_rule {
condition {
age = 1
}
action {
type = "AbortIncompleteMultipartUpload"
}
}
public_access_prevention = "inherited"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_iam_member" "public-firezone-binary-artifacts" {
bucket = google_storage_bucket.firezone-binary-artifacts.name
role = "roles/storage.objectViewer"
member = "allUsers"
}
# Docker layer caching
resource "google_artifact_registry_repository" "cache" {
provider = google-beta
project = module.google-cloud-project.project.project_id
location = local.region
repository_id = "cache"
description = "Repository for storing Docker images in the ${module.google-cloud-project.name}."
format = "DOCKER"
cleanup_policies {
id = "keep-latest-release"
action = "KEEP"
condition {
tag_state = "TAGGED"
tag_prefixes = ["latest"]
}
}
cleanup_policies {
id = "keep-minimum-versions"
action = "KEEP"
most_recent_versions {
keep_count = 5
}
}
cleanup_policies {
id = "gc-untagged"
action = "DELETE"
condition {
tag_state = "UNTAGGED"
older_than = "${14 * 24 * 60 * 60}s"
}
}
cleanup_policies {
id = "gc-cache"
action = "DELETE"
condition {
tag_state = "ANY"
older_than = "${30 * 24 * 60 * 60}s"
}
}
depends_on = [
module.google-artifact-registry
]
}
data "google_iam_policy" "caches_policy" {
binding {
role = "roles/artifactregistry.reader"
members = ["allUsers"]
}
binding {
role = "roles/artifactregistry.writer"
members = local.ci_iam_members
}
}
resource "google_artifact_registry_repository_iam_policy" "policy" {
project = google_artifact_registry_repository.cache.project
location = google_artifact_registry_repository.cache.location
repository = google_artifact_registry_repository.cache.name
policy_data = data.google_iam_policy.caches_policy.policy_data
}
# sccache is used by Rust CI jobs
resource "google_storage_bucket" "sccache" {
project = module.google-cloud-project.project.project_id
name = "${module.google-cloud-project.project.project_id}-sccache"
location = "US"
lifecycle_rule {
condition {
age = 30
}
action {
type = "Delete"
}
}
lifecycle_rule {
condition {
age = 1
}
action {
type = "AbortIncompleteMultipartUpload"
}
}
public_access_prevention = "inherited"
uniform_bucket_level_access = true
}
resource "google_storage_bucket_iam_member" "public-sccache" {
bucket = google_storage_bucket.sccache.name
role = "roles/storage.objectViewer"
member = "allUsers"
}
resource "google_storage_bucket_iam_member" "github-actions-sccache-access" {
for_each = toset(local.ci_iam_members)
bucket = google_storage_bucket.sccache.name
role = "roles/storage.objectAdmin"
member = each.key
}
resource "google_storage_bucket_iam_member" "github-actions-firezone-binary-artifacts-access" {
for_each = toset(local.ci_iam_members)
bucket = google_storage_bucket.firezone-binary-artifacts.name
role = "roles/storage.objectAdmin"
member = each.key
}

View File

@@ -1,111 +0,0 @@
# This module deploys an empty VM ready to run Docker commands to deploy our Relay or Gateway,
# it's used weekly for internal demos and testing, and reset after each use by rebooting the VM.
module "demo" {
source = "../../modules/google-cloud/apps/vm"
project_id = module.google-cloud-project.project.project_id
compute_network = module.google-cloud-vpc.id
compute_subnetwork = google_compute_subnetwork.apps.self_link
compute_region = local.region
compute_instance_availability_zone = "${local.region}-d"
compute_instance_type = "f1-micro"
vm_name = "demo"
vm_network_tag = "app-demo"
cloud_init = <<EOT
#cloud-config
runcmd:
- sudo apt install postgresql-client jq iperf3 -y
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg \
| sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \
| sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
- sudo apt update -y
- sudo apt install docker-ce docker-ce-cli containerd.io -y
- sudo usermod -aG docker $(whoami)
- sudo systemctl enable docker
- sudo systemctl start docker
- sudo docker run -d --restart always --name=httpbin -p 80:80 kennethreitz/httpbin
- echo ${module.metabase.internal_ip} metabase.fz >> /etc/hosts
- echo 127.0.0.1 host.firezone.local >> /etc/hosts
EOT
}
# Create a demo DB and PostgreSQL user so that we can demo accessing the database
resource "random_password" "demo_db_password" {
length = 16
min_lower = 1
min_upper = 1
min_numeric = 1
min_special = 1
}
resource "google_sql_user" "demo" {
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
name = "demo"
password = random_password.demo_db_password.result
}
resource "google_sql_database" "demo" {
project = module.google-cloud-project.project.project_id
name = "demo"
instance = module.google-cloud-sql.master_instance_name
}
resource "google_compute_firewall" "demo-access-to-bi" {
project = module.google-cloud-project.project.project_id
name = "demo-access-to-bi"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
source_ranges = [google_compute_subnetwork.apps.ip_cidr_range]
target_tags = module.metabase.target_tags
}
resource "google_compute_firewall" "demo-ssh-ipv4" {
project = module.google-cloud-project.project.project_id
name = "staging-demo-ssh-ipv4"
network = module.google-cloud-vpc.id
allow {
protocol = "tcp"
ports = [22]
}
allow {
protocol = "udp"
ports = [22]
}
allow {
protocol = "sctp"
ports = [22]
}
source_ranges = ["0.0.0.0/0"]
target_tags = module.demo.target_tags
}

View File

@@ -1,218 +0,0 @@
# Allow Google Cloud and Let's Encrypt to issue certificates for our domain
resource "google_dns_record_set" "dns-caa" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "CAA"
name = module.google-cloud-dns.dns_name
rrdatas = [
"0 issue \"letsencrypt.org\"",
"0 issue \"pki.goog\"",
"0 iodef \"mailto:security@firezone.dev\""
]
ttl = 3600
}
# Website -- these redirect to firezone.dev
resource "google_dns_record_set" "website-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = module.google-cloud-dns.dns_name
rrdatas = [google_compute_global_address.tld-ipv4.address]
ttl = 3600
}
resource "google_dns_record_set" "website-www-redirect" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "www.${module.google-cloud-dns.dns_name}"
rrdatas = [google_compute_global_address.tld-ipv4.address]
ttl = 3600
}
# Our community forum, discourse
resource "google_dns_record_set" "discourse" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "discourse.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.77.86.150"]
ttl = 300
}
# Connectivity check servers
resource "google_dns_record_set" "ping-backend" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "ping-backend.${module.google-cloud-dns.dns_name}"
rrdatas = ["149.28.197.67"]
ttl = 3600
}
resource "google_dns_record_set" "ping-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "ping.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "ping-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = "ping.${module.google-cloud-dns.dns_name}"
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
# Telemetry servers
resource "google_dns_record_set" "t-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "t.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "t-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = "t.${module.google-cloud-dns.dns_name}"
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
resource "google_dns_record_set" "telemetry-ipv4" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "A"
name = "telemetry.${module.google-cloud-dns.dns_name}"
rrdatas = ["45.63.84.183"]
ttl = 3600
}
resource "google_dns_record_set" "telemetry-ipv6" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
type = "AAAA"
name = "telemetry.${module.google-cloud-dns.dns_name}"
rrdatas = ["2001:19f0:ac02:bb:5400:4ff:fe47:6bdf"]
ttl = 3600
}
# Third-party services
# Mailgun
resource "google_dns_record_set" "mailgun-dkim" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "kone._domainkey.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
# Reference: https://groups.google.com/g/cloud-dns-discuss/c/k_l6JP-H29Y
# Individual strings cannot exceed 255 characters in length, or "Invalid record data" results
# DKIM clients concatenate all of the strings in the client before parsing tags, so to workaround the limit
# all you need to do is add whitespace within the p= tag such that each string fits within the 255 character limit.
rrdatas = [
"\"k=rsa;\" \"p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwYyTkBcuzLi1l+bHezuxJlmmpSdabjHY67YxWG8chz7pd12IfbE7JDM4Qi+AYq6Wp6ZDqEukFHIMJjz2PceHuf/5sgJazWLwBWp6DN6J2/WXgs2vWBWYJ0Kpj6l+p2t8jNrPNNVZrkO7BT2AmJAV5c9bemXkY801XkATAvAzvHs7pMsvjVmALWhh9eQoflVjYZUBwSDWjItd\" \"flK4IlrU5+yM5xHRIshazUmWiM8b6lBzV7WKLrDir+Td8NdBAwkFnlxIuqePlfXqIA3190Mk03PqOjlqhuqjZVg441e4A2TwlSShOv9EWtwseKwO1uWiky5uKGo4mlNPU4aZAi/UFwIDAQAB\""
]
}
# Google Workspace
resource "google_dns_record_set" "google-mail" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = module.google-cloud-dns.dns_name
type = "MX"
ttl = 3600
rrdatas = [
"1 aspmx.l.google.com.",
"5 alt1.aspmx.l.google.com.",
"5 alt2.aspmx.l.google.com.",
"10 alt3.aspmx.l.google.com.",
"10 alt4.aspmx.l.google.com."
]
}
resource "google_dns_record_set" "google-dmarc" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "_dmarc.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=DMARC1;\" \"p=reject;\" \"rua=mailto:dmarc-reports@firezone.dev;\" \"pct=100;\" \"adkim=s;\" \"aspf=s\""
]
}
resource "google_dns_record_set" "google-spf" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "try.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=spf1 include:_spf.google.com ~all\""
]
}
resource "google_dns_record_set" "google-dkim" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = "20190728104345pm._domainkey.${module.google-cloud-dns.dns_name}"
type = "TXT"
ttl = 3600
rrdatas = [
"\"v=DKIM1;\" \"k=rsa;\" \"p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlrJHV7oQ63ebQcZ7fsvo+kjb1R9UrkpcdAOkOeN74qMjypQA+hKVV9F2aDM8hFeZoQH9zwIgQi+\" \"0TcDKRr1O7BklmbSkoMaqM5gH2OQTqQWwU0v49POHiL6yWKO4L68peJMMEVX+xFcjxHI5j6dkLMmv+Y6IxrzsqgeXx7V6cFt5V1G8lr0DWC+yzhPioda+S21dWl1GwPdLBbQb80GV1mpV2rGImzeiZVv4/4Et7w0M55Rfy\" \"m4JICJ89FmjC1Ua05CvrD4dvugWqfVoGuP3nyQXEqP8wgyoPuOZPrcEQXu+IlBrWMRBKv7slI571YnUznwoKlkourgB+7qC/zU8KQIDAQAB\""
]
}
resource "google_dns_record_set" "root-verifications" {
project = module.google-cloud-project.project.project_id
managed_zone = module.google-cloud-dns.zone_name
name = module.google-cloud-dns.dns_name
type = "TXT"
ttl = 3600
rrdatas = [
"google-site-verification=NbGHbeX7TprsiSQfxz2JVtP7xrPJE5Orej2_Ip8JHyo",
"\"v=spf1 include:mailgun.org ~all\"",
"oneleet-domain-verification-b98be3d1-70c2-4cdb-b444-8dac1ee7b8d4"
]
}

View File

@@ -1,214 +0,0 @@
locals {
project_owners = [
"bmanifold@firezone.dev",
"jamil@firezone.dev",
"thomas@firezone.dev",
]
# list of emails for users that should be able to SSH into a demo instance
demo_access = []
region = "us-east1"
availability_zone = "us-east1-d"
tld = "firez.one"
# This is GitHub Actions service account configured manually
# in the project github-iam-387915
ci_iam_members = [
"serviceAccount:github-actions@github-iam-387915.iam.gserviceaccount.com"
]
iap_ipv4_ranges = [
"35.235.240.0/20"
]
iap_ipv6_ranges = [
"2600:2d00:1:7::/64"
]
gateway_image_tag = var.gateway_image_tag != null ? var.gateway_image_tag : var.image_tag
relay_image_tag = var.relay_image_tag != null ? var.relay_image_tag : var.image_tag
portal_image_tag = var.portal_image_tag != null ? var.portal_image_tag : var.image_tag
}
terraform {
cloud {
organization = "firezone"
hostname = "app.terraform.io"
workspaces {
name = "staging"
}
}
}
provider "random" {}
provider "null" {}
provider "google" {}
provider "google-beta" {}
# Create the project
module "google-cloud-project" {
source = "../../modules/google-cloud/project"
id = "firezone-staging"
name = "Staging Environment"
organization_id = "335836213177"
billing_account_id = "01DFC9-3D6951-579BE1"
auto_create_network = false
}
# Grant owner access to the project
resource "google_project_iam_binding" "project_owners" {
project = module.google-cloud-project.project.project_id
role = "roles/owner"
members = formatlist("user:%s", local.project_owners)
}
# Grant GitHub Actions ability to write to the container registry
module "google-artifact-registry" {
source = "../../modules/google-cloud/artifact-registry"
project_id = module.google-cloud-project.project.project_id
project_name = module.google-cloud-project.name
region = local.region
store_tagged_artifacts_for = "${90 * 24 * 60 * 60}s"
store_untagged_artifacts_for = "${90 * 24 * 60 * 60}s"
writers = local.ci_iam_members
}
# Create a VPC
module "google-cloud-vpc" {
source = "../../modules/google-cloud/vpc"
project_id = module.google-cloud-project.project.project_id
name = module.google-cloud-project.project.project_id
nat_region = local.region
}
# Enable Google Cloud Storage for the project
module "google-cloud-storage" {
source = "../../modules/google-cloud/storage"
project_id = module.google-cloud-project.project.project_id
}
# Create DNS managed zone
module "google-cloud-dns" {
source = "../../modules/google-cloud/dns"
project_id = module.google-cloud-project.project.project_id
tld = local.tld
dnssec_enabled = false
}
# Create the Cloud SQL database
module "google-cloud-sql" {
source = "../../modules/google-cloud/sql"
project_id = module.google-cloud-project.project.project_id
network = module.google-cloud-vpc.id
compute_region = local.region
compute_availability_zone = local.availability_zone
compute_instance_cpu_count = "2"
compute_instance_memory_size = "7680"
database_name = module.google-cloud-project.project.project_id
database_highly_available = false
database_backups_enabled = false
database_read_replica_locations = []
database_flags = {
# Increase the connections count a bit, but we need to set it to Ecto ((pool_count * pool_size) + 50)
"max_connections" = "500"
# Sets minimum threshold on dead tuples to prevent autovaccum running too often on small tables
# where 5% is less than 50 records
"autovacuum_vacuum_threshold" = "50"
# Trigger autovaccum for every 5% of the table changed
"autovacuum_vacuum_scale_factor" = "0.05"
"autovacuum_analyze_scale_factor" = "0.05"
# Give autovacuum 4x the cost limit to prevent it from never finishing
# on big tables
"autovacuum_vacuum_cost_limit" = "800"
# Give hash joins a bit more memory to work with
# "hash_mem_multiplier" = "3"
# This is standard value for work_mem
"work_mem" = "4096"
}
}
# Enable SSH on staging
resource "google_compute_firewall" "ssh-ipv4" {
project = module.google-cloud-project.project.project_id
name = "iap-ssh-ipv4"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [22]
}
log_config {
metadata = "INCLUDE_ALL_METADATA"
}
source_ranges = local.iap_ipv4_ranges
target_tags = concat(
module.web.target_tags,
module.api.target_tags,
module.domain.target_tags,
module.relays[0].target_tags
)
}
resource "google_compute_firewall" "ssh-ipv6" {
project = module.google-cloud-project.project.project_id
name = "iap-ssh-ipv6"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [22]
}
log_config {
metadata = "INCLUDE_ALL_METADATA"
}
source_ranges = local.iap_ipv6_ranges
target_tags = concat(
module.web.target_tags,
module.api.target_tags,
module.domain.target_tags,
module.relays[0].target_tags
)
}
module "ops" {
source = "../../modules/google-cloud/ops"
project_id = module.google-cloud-project.project.project_id
slack_alerts_auth_token = var.slack_alerts_auth_token
slack_alerts_channel = var.slack_alerts_channel
api_host = module.api.host
web_host = module.web.host
}

View File

@@ -1,91 +0,0 @@
# Deploy our Firezone monitor instance
locals {
client_monitor_region = local.region
client_monitor_zone = local.availability_zone
}
module "client_monitor" {
source = "../../modules/google-cloud/apps/client-monitor"
project_id = module.google-cloud-project.project.project_id
compute_network = module.google-cloud-vpc.id
compute_subnetwork = google_compute_subnetwork.apps.self_link
compute_instance_type = "f1-micro"
compute_region = local.client_monitor_region
compute_instance_availability_zone = local.client_monitor_zone
container_registry = module.google-artifact-registry.url
firezone_client_id = "gcp-client-monitor-main"
firezone_api_url = "wss://api.firez.one"
firezone_token = var.firezone_client_token
image_repo = module.google-artifact-registry.repo
image = "client"
image_tag = var.image_tag
application_name = "client-monitor"
application_environment_variables = []
health_check = {
name = "health"
protocol = "TCP"
port = 3000
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
}
# Allow outbound traffic
resource "google_compute_firewall" "client-monitor-egress-ipv4" {
project = module.google-cloud-project.project.project_id
name = "client-monitor-egress-ipv4"
network = module.google-cloud-vpc.id
direction = "EGRESS"
target_tags = module.client_monitor.target_tags
destination_ranges = ["0.0.0.0/0"]
allow {
protocol = "all"
}
}
resource "google_compute_firewall" "client-monitor-ssh-ipv4" {
project = module.google-cloud-project.project.project_id
name = "client-monitor-ssh-ipv4"
network = module.google-cloud-vpc.id
allow {
protocol = "tcp"
ports = [22]
}
allow {
protocol = "udp"
ports = [22]
}
allow {
protocol = "sctp"
ports = [22]
}
# Only allows connections using IAP
source_ranges = local.iap_ipv4_ranges
target_tags = module.client_monitor.target_tags
}

View File

@@ -1,29 +0,0 @@
output "dns_name_servers" {
value = module.google-cloud-dns.name_servers
}
output "demo_postgresql_instance_ip" {
sensitive = true
value = module.google-cloud-sql.master_instance_ip_address
}
output "demo_postgresql_connection_url" {
sensitive = true
value = "postgres://${google_sql_user.demo.name}:${random_password.demo_db_password.result}@${module.google-cloud-sql.master_instance_ip_address}/${google_sql_database.demo.name}"
}
output "image_tag" {
value = var.image_tag
}
output "gateway_image_tag" {
value = local.gateway_image_tag
}
output "relay_image_tag" {
value = local.relay_image_tag
}
output "portal_image_tag" {
value = local.portal_image_tag
}

View File

@@ -1,681 +0,0 @@
locals {
# The version of the Erlang cluster state,
# change this to prevent new nodes from joining the cluster of the old ones,
# ie. when some internal messages introduced a breaking change.
cluster_version = "1_0"
}
# Generate secrets
resource "random_password" "erlang_cluster_cookie" {
length = 64
special = false
}
resource "random_password" "tokens_key_base" {
length = 64
special = false
}
resource "random_password" "tokens_salt" {
length = 32
special = false
}
resource "random_password" "secret_key_base" {
length = 64
special = false
}
resource "random_password" "live_view_signing_salt" {
length = 32
special = false
}
resource "random_password" "cookie_signing_salt" {
length = 32
special = false
}
resource "random_password" "cookie_encryption_salt" {
length = 32
special = false
}
# Create VPC subnet for the application instances,
# we want all apps to be in the same VPC in order for Erlang clustering to work
resource "google_compute_subnetwork" "apps" {
project = module.google-cloud-project.project.project_id
name = "app"
stack_type = "IPV4_IPV6"
ip_cidr_range = "10.128.0.0/20"
region = local.region
network = module.google-cloud-vpc.id
ipv6_access_type = "EXTERNAL"
private_ip_google_access = true
}
# Deploy the web app to the GCE
resource "random_password" "web_db_password" {
length = 16
min_lower = 1
min_upper = 1
min_numeric = 1
min_special = 1
lifecycle {
ignore_changes = [min_lower, min_upper, min_numeric, min_special]
}
}
# TODO: raname it to "firezone"
resource "google_sql_user" "web" {
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
name = "web"
password = random_password.web_db_password.result
}
resource "google_sql_database" "firezone" {
project = module.google-cloud-project.project.project_id
name = "firezone"
instance = module.google-cloud-sql.master_instance_name
}
# Create IAM users for the database for all project owners
resource "google_sql_user" "iam_users" {
for_each = toset(local.project_owners)
project = module.google-cloud-project.project.project_id
instance = module.google-cloud-sql.master_instance_name
type = "CLOUD_IAM_USER"
name = each.value
}
# We can't remove passwords complete because for IAM users we still need to execute those GRANT statements
provider "postgresql" {
scheme = "gcppostgres"
host = "${module.google-cloud-project.project.project_id}:${local.region}:${module.google-cloud-sql.master_instance_name}"
port = 5432
username = google_sql_user.web.name
password = random_password.web_db_password.result
superuser = false
sslmode = "disable"
}
resource "postgresql_grant" "grant_select_on_all_tables_schema_to_iam_users" {
for_each = toset(local.project_owners)
database = google_sql_database.firezone.name
privileges = ["SELECT", "INSERT", "UPDATE", "DELETE"]
objects = [] # ALL
object_type = "table"
schema = "public"
role = each.key
depends_on = [
google_sql_user.iam_users
]
}
resource "postgresql_grant" "grant_execute_on_all_functions_schema_to_iam_users" {
for_each = toset(local.project_owners)
database = google_sql_database.firezone.name
privileges = ["EXECUTE"]
objects = [] # ALL
object_type = "function"
schema = "public"
role = each.key
depends_on = [
google_sql_user.iam_users
]
}
resource "google_storage_bucket" "client-logs" {
project = module.google-cloud-project.project.project_id
name = "${module.google-cloud-project.project.project_id}-client-logs"
location = "US"
lifecycle_rule {
condition {
age = 3
}
action {
type = "Delete"
}
}
lifecycle_rule {
condition {
age = 1
}
action {
type = "AbortIncompleteMultipartUpload"
}
}
logging {
log_bucket = true
log_object_prefix = "firezone.dev/clients"
}
public_access_prevention = "enforced"
uniform_bucket_level_access = true
lifecycle {
prevent_destroy = true
ignore_changes = []
}
}
locals {
cluster = {
name = "firezone"
cookie = base64encode(random_password.erlang_cluster_cookie.result)
}
shared_application_environment_variables = [
# Apps
{
name = "WEB_EXTERNAL_URL"
value = "https://app.${local.tld}"
},
{
name = "API_EXTERNAL_URL"
value = "https://api.${local.tld}"
},
{
name = "PHOENIX_HTTP_WEB_PORT"
value = "8080"
},
{
name = "PHOENIX_HTTP_API_PORT"
value = "8080"
},
# Database
{
name = "DATABASE_HOST"
value = module.google-cloud-sql.master_instance_ip_address
},
{
name = "DATABASE_NAME"
value = google_sql_database.firezone.name
},
{
name = "DATABASE_USER"
value = google_sql_user.web.name
},
{
name = "DATABASE_PASSWORD"
value = google_sql_user.web.password
},
# Secrets
{
name = "TOKENS_KEY_BASE"
value = base64encode(random_password.tokens_key_base.result)
},
{
name = "TOKENS_SALT"
value = base64encode(random_password.tokens_salt.result)
},
{
name = "SECRET_KEY_BASE"
value = base64encode(random_password.secret_key_base.result)
},
{
name = "LIVE_VIEW_SIGNING_SALT"
value = base64encode(random_password.live_view_signing_salt.result)
},
{
name = "COOKIE_SIGNING_SALT"
value = base64encode(random_password.cookie_signing_salt.result)
},
{
name = "COOKIE_ENCRYPTION_SALT"
value = base64encode(random_password.cookie_encryption_salt.result)
},
# Erlang
{
name = "ERLANG_DISTRIBUTION_PORT"
value = "9000"
},
{
name = "CLUSTER_NAME"
value = local.cluster.name
},
{
name = "ERLANG_CLUSTER_ADAPTER"
value = "Elixir.Domain.Cluster.GoogleComputeLabelsStrategy"
},
{
name = "ERLANG_CLUSTER_ADAPTER_CONFIG"
value = jsonencode({
project_id = module.google-cloud-project.project.project_id
cluster_name = local.cluster.name
cluster_name_label = "cluster_name"
cluster_version_label = "cluster_version"
cluster_version = local.cluster_version
node_name_label = "application"
polling_interval_ms = 7000
})
},
{
name = "RELEASE_COOKIE"
value = local.cluster.cookie
},
# Auth
{
name = "AUTH_PROVIDER_ADAPTERS"
value = "email,openid_connect,google_workspace,token,microsoft_entra,okta,jumpcloud"
},
# Directory Sync
{
name = "WORKOS_API_KEY"
value = var.workos_api_key
},
{
name = "WORKOS_CLIENT_ID"
value = var.workos_client_id
},
{
name = "WORKOS_BASE_URL"
value = var.workos_base_url
},
# Registry from which Docker install scripts pull from
{
name = "DOCKER_REGISTRY"
value = "${module.google-artifact-registry.url}/${module.google-artifact-registry.repo}"
},
# Billing system
{
name = "BILLING_ENABLED"
value = "true"
},
{
name = "STRIPE_SECRET_KEY"
value = var.stripe_secret_key
},
{
name = "STRIPE_WEBHOOK_SIGNING_SECRET"
value = var.stripe_webhook_signing_secret
},
{
name = "STRIPE_DEFAULT_PRICE_ID"
value = var.stripe_default_price_id
},
# Telemetry
{
name = "INSTRUMENTATION_CLIENT_LOGS_ENABLED"
value = true
},
{
name = "INSTRUMENTATION_CLIENT_LOGS_BUCKET"
value = google_storage_bucket.client-logs.name
},
# Analytics
{
name = "MIXPANEL_TOKEN"
# Note: this token is public
value = "313bdddc66b911f4afeb2c3242a78113"
},
# Emails
{
name = "OUTBOUND_EMAIL_ADAPTER"
value = "Elixir.Swoosh.Adapters.Mailgun"
},
{
name = "OUTBOUND_EMAIL_FROM"
value = "notifications@firez.one"
},
{
name = "OUTBOUND_EMAIL_ADAPTER_OPTS"
value = jsonencode({
api_key = var.mailgun_server_api_token,
domain = local.tld
})
},
# Feature Flags
{
name = "FEATURE_FLOW_ACTIVITIES_ENABLED"
value = true
},
{
name = "FEATURE_SELF_HOSTED_RELAYS_ENABLED"
value = true
},
{
name = "FEATURE_POLICY_CONDITIONS_ENABLED"
value = true
},
{
name = "FEATURE_MULTI_SITE_RESOURCES_ENABLED"
value = true
},
{
name = "FEATURE_SIGN_UP_ENABLED"
value = true
},
# Sign Up
{
name = "SIGN_UP_WHITELISTED_DOMAINS"
value = "firezone.dev,firez.one,firezonedemo.com"
},
{
name = "FEATURE_REST_API_ENABLED"
value = true
},
{
name = "FEATURE_INTERNET_RESOURCE_ENABLED"
value = true
},
{
name = "FEATURE_TEMP_ACCOUNTS"
value = true
}
]
}
module "domain" {
source = "../../modules/google-cloud/apps/elixir"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n4-standard-2"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
compute_boot_disk_type = "hyperdisk-balanced"
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "domain"
image_tag = var.image_tag
scaling_horizontal_replicas = 2
observability_log_level = "debug"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "domain"
application_version = replace(var.image_tag, ".", "-")
application_ports = [
{
name = "http"
protocol = "TCP"
port = 4000
health_check = {
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 2
http_health_check = {
request_path = "/healthz"
}
}
}
]
application_environment_variables = concat([
# Background Jobs
{
name = "BACKGROUND_JOBS_ENABLED"
value = "true"
},
# Pool size is increased because background jobs are holding
# the connections for a long time
{
name = "DATABASE_POOL_SIZE"
value = "15"
},
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
"cluster_version" = local.cluster_version
}
}
module "web" {
source = "../../modules/google-cloud/apps/elixir"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n4-standard-2"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
compute_boot_disk_type = "hyperdisk-balanced"
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "web"
image_tag = var.image_tag
scaling_horizontal_replicas = 2
observability_log_level = "debug"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "web"
application_version = replace(var.image_tag, ".", "-")
application_dns_tld = "app.${local.tld}"
application_cdn_enabled = true
application_ports = [
{
name = "http"
protocol = "TCP"
port = 8080
health_check = {
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 2
http_health_check = {
request_path = "/healthz"
}
}
}
]
application_environment_variables = concat([
# Web Server
{
name = "API_URL_OVERRIDE"
value = "wss://api.${local.tld}"
},
{
name = "BACKGROUND_JOBS_ENABLED"
value = "false"
},
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
"cluster_version" = local.cluster_version
}
}
module "api" {
source = "../../modules/google-cloud/apps/elixir"
project_id = module.google-cloud-project.project.project_id
compute_instance_type = "n4-standard-2"
compute_instance_region = local.region
compute_instance_availability_zones = ["${local.region}-d"]
compute_boot_disk_type = "hyperdisk-balanced"
dns_managed_zone_name = module.google-cloud-dns.zone_name
vpc_network = module.google-cloud-vpc.self_link
vpc_subnetwork = google_compute_subnetwork.apps.self_link
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "api"
image_tag = var.image_tag
scaling_horizontal_replicas = 2
observability_log_level = "debug"
erlang_release_name = "firezone"
erlang_cluster_cookie = random_password.erlang_cluster_cookie.result
application_name = "api"
application_version = replace(var.image_tag, ".", "-")
application_dns_tld = "api.${local.tld}"
application_ports = [
{
name = "http"
protocol = "TCP"
port = 8080
health_check = {
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
}
]
application_environment_variables = concat([
# Web Server
{
name = "BACKGROUND_JOBS_ENABLED"
value = "false"
},
], local.shared_application_environment_variables)
application_labels = {
"cluster_name" = local.cluster.name
"cluster_version" = local.cluster_version
}
application_token_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
}
## Allow API nodes to sign URLs for Google Cloud Storage
resource "google_storage_bucket_iam_member" "sign-urls" {
bucket = google_storage_bucket.client-logs.name
role = "roles/storage.objectAdmin"
member = "serviceAccount:${module.api.service_account.email}"
}
resource "google_project_iam_custom_role" "sign-urls" {
project = module.google-cloud-project.project.project_id
title = "Sign URLs for Google Cloud Storage"
role_id = "iam.sign_urls"
permissions = [
"iam.serviceAccounts.signBlob"
]
}
resource "google_project_iam_member" "sign-urls" {
project = module.google-cloud-project.project.project_id
role = "projects/${module.google-cloud-project.project.project_id}/roles/${google_project_iam_custom_role.sign-urls.role_id}"
member = "serviceAccount:${module.api.service_account.email}"
}
# Erlang Cluster
## Allow traffic between Elixir apps for Erlang clustering
resource "google_compute_firewall" "erlang-distribution" {
project = module.google-cloud-project.project.project_id
name = "erlang-distribution"
network = module.google-cloud-vpc.self_link
allow {
protocol = "tcp"
ports = [4369, 9000]
}
allow {
protocol = "udp"
ports = [4369, 9000]
}
source_ranges = [google_compute_subnetwork.apps.ip_cidr_range]
target_tags = concat(module.web.target_tags, module.api.target_tags, module.domain.target_tags)
}
## Allow service account to list running instances
resource "google_project_iam_custom_role" "erlang-discovery" {
project = module.google-cloud-project.project.project_id
title = "Read list of Compute instances"
description = "This role is used for Erlang Cluster discovery and allows to list running instances."
role_id = "compute.list_instances"
permissions = [
"compute.instances.list",
"compute.zones.list"
]
}
resource "google_project_iam_member" "application" {
for_each = {
api = module.api.service_account.email
web = module.web.service_account.email
domain = module.domain.service_account.email
}
project = module.google-cloud-project.project.project_id
role = "projects/${module.google-cloud-project.project.project_id}/roles/${google_project_iam_custom_role.erlang-discovery.role_id}"
member = "serviceAccount:${each.value}"
}

View File

@@ -1,124 +0,0 @@
resource "google_project_service" "compute" {
project = module.google-cloud-project.project.project_id
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}
resource "google_project_service" "servicenetworking" {
project = module.google-cloud-project.project.project_id
service = "servicenetworking.googleapis.com"
disable_on_destroy = false
}
# Create a global address that will be used for the load balancer
resource "google_compute_global_address" "tld-ipv4" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
}
# Create a SSL policy
resource "google_compute_ssl_policy" "tld" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
min_tls_version = "TLS_1_2"
profile = "RESTRICTED"
depends_on = [
google_project_service.compute,
google_project_service.servicenetworking,
]
}
# Create a managed SSL certificate
resource "google_compute_managed_ssl_certificate" "tld" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
type = "MANAGED"
managed {
domains = [
local.tld,
]
}
depends_on = [
google_project_service.compute,
google_project_service.servicenetworking,
]
}
# URL maps are used to define redirect rules for incoming requests
resource "google_compute_url_map" "redirects" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-production-redirect"
default_url_redirect {
host_redirect = "www.firezone.dev"
https_redirect = true
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
strip_query = false
}
depends_on = [
google_project_service.compute,
google_project_service.servicenetworking,
]
}
# HTTP(s) proxies are used to route requests to the appropriate URL maps
resource "google_compute_target_http_proxy" "tld" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-http"
url_map = google_compute_url_map.redirects.self_link
}
resource "google_compute_target_https_proxy" "tld" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-https"
url_map = google_compute_url_map.redirects.self_link
ssl_certificates = [google_compute_managed_ssl_certificate.tld.self_link]
ssl_policy = google_compute_ssl_policy.tld.self_link
quic_override = "NONE"
}
# Forwarding rules are used to route incoming requests to the appropriate proxies
resource "google_compute_global_forwarding_rule" "http" {
project = module.google-cloud-project.project.project_id
name = replace(local.tld, ".", "-")
labels = {
managed_by = "terraform"
}
target = google_compute_target_http_proxy.tld.self_link
ip_address = google_compute_global_address.tld-ipv4.address
port_range = "80"
load_balancing_scheme = "EXTERNAL"
}
resource "google_compute_global_forwarding_rule" "https" {
project = module.google-cloud-project.project.project_id
name = "${replace(local.tld, ".", "-")}-https"
labels = {
managed_by = "terraform"
}
target = google_compute_target_https_proxy.tld.self_link
ip_address = google_compute_global_address.tld-ipv4.address
port_range = "443"
load_balancing_scheme = "EXTERNAL"
}

View File

@@ -1,422 +0,0 @@
locals {
subnet_ip_cidr_ranges = {
"africa-south1" = "10.240.0.0/24",
"asia-east1" = "10.240.1.0/24",
"asia-east2" = "10.240.2.0/24",
"asia-northeast1" = "10.240.3.0/24",
"asia-northeast2" = "10.240.4.0/24",
"asia-northeast3" = "10.240.5.0/24",
"asia-south1" = "10.240.6.0/24",
"asia-south2" = "10.240.7.0/24",
"asia-southeast1" = "10.240.8.0/24",
"asia-southeast2" = "10.240.9.0/24",
"australia-southeast1" = "10.240.10.0/24",
"australia-southeast2" = "10.240.11.0/24",
"europe-central2" = "10.240.12.0/24",
"europe-north1" = "10.240.13.0/24",
"europe-southwest1" = "10.240.14.0/24",
"europe-west1" = "10.240.15.0/24",
"europe-west2" = "10.240.16.0/24",
"europe-west3" = "10.240.17.0/24",
"europe-west4" = "10.240.18.0/24",
"europe-west6" = "10.240.19.0/24",
"europe-west8" = "10.240.20.0/24",
"europe-west9" = "10.240.21.0/24",
"europe-west10" = "10.240.22.0/24",
"europe-west12" = "10.240.23.0/24",
"me-central1" = "10.240.24.0/24",
"me-west1" = "10.240.25.0/24",
"northamerica-northeast1" = "10.240.26.0/24",
"northamerica-northeast2" = "10.240.27.0/24",
"northamerica-south1" = "10.240.28.0/24",
"southamerica-east1" = "10.240.29.0/24",
"southamerica-west1" = "10.240.30.0/24",
"us-central1" = "10.240.31.0/24",
"us-east1" = "10.240.32.0/24",
"us-east4" = "10.240.33.0/24",
"us-east5" = "10.240.34.0/24",
"us-south1" = "10.240.35.0/24",
"us-west1" = "10.240.36.0/24",
"us-west2" = "10.240.37.0/24",
"us-west3" = "10.240.38.0/24",
"us-west4" = "10.240.39.0/24"
}
}
# GCP requires networks and subnets to have globally unique names.
# This causes an issue if their configuration changes because we
# use create_before_destroy to avoid downtime on deploys.
#
# To work around this, we use a random suffix in the name and rotate
# it whenever the subnet IP CIDR ranges change. It's not a perfect
# solution, but it should cover most cases.
resource "random_string" "naming_suffix" {
length = 8
special = false
upper = false
keepers = {
# must be a string
subnet_ip_cidr_ranges = jsonencode(local.subnet_ip_cidr_ranges)
}
}
# Create networks
resource "google_compute_network" "network" {
project = module.google-cloud-project.project.project_id
name = "relays-network-${random_string.naming_suffix.result}"
routing_mode = "GLOBAL"
auto_create_subnetworks = false
depends_on = [
google_project_service.compute
]
}
resource "google_compute_subnetwork" "subnetwork" {
for_each = local.subnet_ip_cidr_ranges
project = module.google-cloud-project.project.project_id
name = "relays-subnet-${each.key}-${random_string.naming_suffix.result}"
region = each.key
network = google_compute_network.network.self_link
log_config {
aggregation_interval = "INTERVAL_10_MIN"
metadata = "INCLUDE_ALL_METADATA"
}
stack_type = "IPV4_IPV6"
# Sequentially numbered /24s given an offset
ip_cidr_range = each.value
ipv6_access_type = "EXTERNAL"
private_ip_google_access = true
}
module "relays" {
count = var.relay_token != null ? 1 : 0
source = "../../modules/google-cloud/apps/relay"
project_id = module.google-cloud-project.project.project_id
# Remember to update the following published documentation when this changes:
# - /website/src/app/kb/deploy/gateways/readme.mdx
# - /website/src/app/kb/architecture/tech-stack/readme.mdx
instances = {
"africa-south1" = {
subnet = google_compute_subnetwork.subnetwork["africa-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["africa-south1-a"]
}
"asia-east1" = {
subnet = google_compute_subnetwork.subnetwork["asia-east1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-east1-a"]
}
"asia-east2" = {
subnet = google_compute_subnetwork.subnetwork["asia-east2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-east2-a"]
}
"asia-northeast1" = {
subnet = google_compute_subnetwork.subnetwork["asia-northeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-northeast1-a"]
}
"asia-northeast2" = {
subnet = google_compute_subnetwork.subnetwork["asia-northeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-northeast2-a"]
}
"asia-northeast3" = {
subnet = google_compute_subnetwork.subnetwork["asia-northeast3"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-northeast3-a"]
}
"asia-south1" = {
subnet = google_compute_subnetwork.subnetwork["asia-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-south1-a"]
}
"asia-south2" = {
subnet = google_compute_subnetwork.subnetwork["asia-south2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-south2-a"]
}
"asia-southeast1" = {
subnet = google_compute_subnetwork.subnetwork["asia-southeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-southeast1-a"]
}
"asia-southeast2" = {
subnet = google_compute_subnetwork.subnetwork["asia-southeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["asia-southeast2-a"]
}
"australia-southeast1" = {
subnet = google_compute_subnetwork.subnetwork["australia-southeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["australia-southeast1-a"]
}
"australia-southeast2" = {
subnet = google_compute_subnetwork.subnetwork["australia-southeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["australia-southeast2-a"]
}
"europe-central2" = {
subnet = google_compute_subnetwork.subnetwork["europe-central2"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-central2-a"]
}
"europe-north1" = {
subnet = google_compute_subnetwork.subnetwork["europe-north1"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-north1-a"]
}
"europe-southwest1" = {
subnet = google_compute_subnetwork.subnetwork["europe-southwest1"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-southwest1-a"]
}
"europe-west1" = {
subnet = google_compute_subnetwork.subnetwork["europe-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west1-b"]
}
"europe-west2" = {
subnet = google_compute_subnetwork.subnetwork["europe-west2"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west2-a"]
}
"europe-west3" = {
subnet = google_compute_subnetwork.subnetwork["europe-west3"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west3-a"]
}
"europe-west4" = {
subnet = google_compute_subnetwork.subnetwork["europe-west4"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west4-a"]
}
"europe-west6" = {
subnet = google_compute_subnetwork.subnetwork["europe-west6"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west6-a"]
}
"europe-west8" = {
subnet = google_compute_subnetwork.subnetwork["europe-west8"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west8-a"]
}
"europe-west9" = {
subnet = google_compute_subnetwork.subnetwork["europe-west9"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west9-a"]
}
"europe-west10" = {
subnet = google_compute_subnetwork.subnetwork["europe-west10"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west10-a"]
}
"europe-west12" = {
subnet = google_compute_subnetwork.subnetwork["europe-west12"].self_link
type = "e2-micro"
replicas = 1
zones = ["europe-west12-a"]
}
"me-central1" = {
subnet = google_compute_subnetwork.subnetwork["me-central1"].self_link
type = "e2-micro"
replicas = 1
zones = ["me-central1-a"]
}
# Fails with:
# Access to the region is unavailable. Please contact our sales team at https://cloud.google.com/contact for further assistance."
# "me-central2" = {
# type = "e2-micro"
# replicas = 1
# zones = ["me-central2-a"]
# }
"me-west1" = {
subnet = google_compute_subnetwork.subnetwork["me-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["me-west1-a"]
}
"northamerica-northeast1" = {
subnet = google_compute_subnetwork.subnetwork["northamerica-northeast1"].self_link
type = "e2-micro"
replicas = 1
zones = ["northamerica-northeast1-a"]
}
"northamerica-northeast2" = {
subnet = google_compute_subnetwork.subnetwork["northamerica-northeast2"].self_link
type = "e2-micro"
replicas = 1
zones = ["northamerica-northeast2-a"]
}
"northamerica-south1" = {
subnet = google_compute_subnetwork.subnetwork["northamerica-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["northamerica-south1-a"]
}
"southamerica-east1" = {
subnet = google_compute_subnetwork.subnetwork["southamerica-east1"].self_link
type = "e2-micro"
replicas = 1
zones = ["southamerica-east1-a"]
}
"southamerica-west1" = {
subnet = google_compute_subnetwork.subnetwork["southamerica-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["southamerica-west1-a"]
}
"us-central1" = {
subnet = google_compute_subnetwork.subnetwork["us-central1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-central1-a"]
}
"us-east1" = {
subnet = google_compute_subnetwork.subnetwork["us-east1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-east1-b"]
}
"us-east4" = {
subnet = google_compute_subnetwork.subnetwork["us-east4"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-east4-a"]
}
"us-east5" = {
subnet = google_compute_subnetwork.subnetwork["us-east5"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-east5-a"]
}
"us-south1" = {
subnet = google_compute_subnetwork.subnetwork["us-south1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-south1-a"]
}
"us-west1" = {
subnet = google_compute_subnetwork.subnetwork["us-west1"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west1-a"]
}
"us-west2" = {
subnet = google_compute_subnetwork.subnetwork["us-west2"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west2-a"]
}
"us-west3" = {
subnet = google_compute_subnetwork.subnetwork["us-west3"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west3-a"]
}
"us-west4" = {
subnet = google_compute_subnetwork.subnetwork["us-west4"].self_link
type = "e2-micro"
replicas = 1
zones = ["us-west4-a"]
}
}
network = google_compute_network.network.self_link
naming_suffix = random_string.naming_suffix.result
container_registry = module.google-artifact-registry.url
image_repo = module.google-artifact-registry.repo
image = "relay"
image_tag = local.relay_image_tag
observability_log_level = "info,hyper=off,h2=warn,tower=warn"
application_name = "relay"
application_version = replace(local.relay_image_tag, ".", "-")
application_environment_variables = [
{
name = "FIREZONE_TELEMETRY"
value = "true"
}
]
health_check = {
name = "health"
protocol = "TCP"
port = 8080
initial_delay_sec = 60
check_interval_sec = 15
timeout_sec = 10
healthy_threshold = 1
unhealthy_threshold = 3
http_health_check = {
request_path = "/healthz"
}
}
api_url = "wss://api.${local.tld}"
token = var.relay_token
}
# Trigger an alert when there is at least one region without a healthy relay
resource "google_monitoring_alert_policy" "connected_relays_count" {
project = module.google-cloud-project.project.project_id
display_name = "Relays are down"
combiner = "OR"
notification_channels = module.ops.notification_channels
conditions {
display_name = "Relay Instances"
condition_threshold {
filter = "resource.type = \"gce_instance\" AND metric.type = \"custom.googleapis.com/elixir/domain/relays/online_relays_count/last_value\""
comparison = "COMPARISON_LT"
# at least one relay per region must be always online
threshold_value = length(module.relays[0].instances)
duration = "0s"
trigger {
count = 1
}
aggregations {
alignment_period = "60s"
cross_series_reducer = "REDUCE_MAX"
per_series_aligner = "ALIGN_MEAN"
}
}
}
alert_strategy {
auto_close = "172800s"
}
}

View File

@@ -1,101 +0,0 @@
variable "aws_gateway_token" {
type = string
description = "Firezone Gateway token for AWS gateway"
default = null
sensitive = true
}
variable "image_tag" {
type = string
description = "Image tag for all services. Notice: we assume all services are deployed with the same version"
}
variable "metabase_image_tag" {
type = string
default = "v0.47.6"
}
variable "relay_token" {
type = string
default = null
sensitive = true
}
variable "slack_alerts_channel" {
type = string
description = "Slack channel which will receive monitoring alerts"
default = "#feed-staging"
}
variable "slack_alerts_auth_token" {
type = string
description = "Slack auth token for the infra alerts channel"
sensitive = true
}
variable "postmark_server_api_token" {
type = string
sensitive = true
}
variable "mailgun_server_api_token" {
type = string
sensitive = true
}
variable "stripe_secret_key" {
type = string
sensitive = true
}
variable "stripe_webhook_signing_secret" {
type = string
sensitive = true
}
variable "stripe_default_price_id" {
type = string
}
variable "firezone_client_token" {
type = string
sensitive = true
}
variable "workos_api_key" {
type = string
sensitive = true
}
variable "workos_client_id" {
type = string
sensitive = true
}
variable "workos_base_url" {
type = string
}
# Version overrides
#
#
# This section should be used to bind a specific version of the Firezone component
# (eg. during rollback) to ensure it's not replaced by a new one until a manual action
#
# To update them go to Terraform Cloud and change/delete the following variables,
# if they are unset `var.image_tag` will be used.
variable "relay_image_tag" {
type = string
default = null
}
variable "gateway_image_tag" {
type = string
default = null
}
variable "portal_image_tag" {
type = string
default = null
}

View File

@@ -1,35 +0,0 @@
terraform {
required_version = "~> 1.10.0"
required_providers {
random = {
source = "hashicorp/random"
version = "~> 3.6"
}
null = {
source = "hashicorp/null"
version = "~> 3.2"
}
google = {
source = "hashicorp/google"
version = "~> 6.9"
}
google-beta = {
source = "hashicorp/google-beta"
version = "~> 6.9"
}
tls = {
source = "hashicorp/tls"
version = "~> 4.0"
}
postgresql = {
source = "cyrilgdn/postgresql"
version = "1.25.0"
}
}
}