mirror of
https://github.com/outbackdingo/firezone.git
synced 2026-01-27 02:18:47 +00:00
chore: move terraform/ to private repo (#9421)
Since we'll be adding ops playbooks and other things here, it makes sense to separate infra from product source. --------- Signed-off-by: Jamil <jamilbk@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
@@ -13,10 +13,5 @@ www
|
||||
.gitmodules
|
||||
.github
|
||||
|
||||
# Terraform
|
||||
.terraform
|
||||
*.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
rust/connlib/clients/android/connlib/build/
|
||||
rust/connlib/clients/android/connlib/jniLibs/
|
||||
|
||||
13
.github/CODEOWNERS
vendored
13
.github/CODEOWNERS
vendored
@@ -1,7 +1,6 @@
|
||||
# .github/ @AndrewDryga @jamilbk
|
||||
# elixir/ @AndrewDryga @bmanifold
|
||||
# terraform/ @AndrewDryga @bmanifold
|
||||
# website/ @jamilbk @AndrewDryga
|
||||
# rust/ @thomaseizinger @conectado @ReactorScram
|
||||
# swift/ @jamilbk @ReactorScram
|
||||
# kotlin/ @jamilbk @conectado
|
||||
# .github/ @jamilbk @bmanifold @thomaseizinger
|
||||
# elixir/ @jamilbk @bmanifold
|
||||
# website/ @jamilbk @bmanifold @thomaseizinger
|
||||
# rust/ @jamilbk @thomaseizinger
|
||||
# swift/ @jamilbk @thomaseizinger
|
||||
# kotlin/ @jamilbk @thomaseizinger
|
||||
|
||||
16
.github/dependabot.yml
vendored
16
.github/dependabot.yml
vendored
@@ -144,19 +144,3 @@ updates:
|
||||
directory: elixir/apps/web/assets/
|
||||
schedule:
|
||||
interval: monthly
|
||||
- package-ecosystem: terraform
|
||||
directory: terraform/environments/staging/
|
||||
schedule:
|
||||
interval: monthly
|
||||
groups:
|
||||
google:
|
||||
patterns:
|
||||
- hashicorp/google*
|
||||
- package-ecosystem: terraform
|
||||
directory: terraform/environments/production/
|
||||
schedule:
|
||||
interval: monthly
|
||||
groups:
|
||||
google:
|
||||
patterns:
|
||||
- hashicorp/google*
|
||||
|
||||
203
.github/workflows/_deploy_production.yml
vendored
203
.github/workflows/_deploy_production.yml
vendored
@@ -1,203 +0,0 @@
|
||||
name: Deploy Production
|
||||
run-name: Triggered by ${{ github.actor }}
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Image tag to deploy. Defaults to the last commit SHA in the branch."
|
||||
type: string
|
||||
default: ${{ github.sha }}
|
||||
required: false
|
||||
|
||||
concurrency:
|
||||
group: "production-deploy"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
sanity-check:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Ensure CI passed for the given sha
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh api \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
"repos/firezone/firezone/actions/runs?head_sha=${{ inputs.tag }}&status=success" \
|
||||
| jq -e '.workflow_runs | length > 0' || exit 1
|
||||
|
||||
push:
|
||||
needs: sanity-check
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
packages: write
|
||||
id-token: write
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
image: [domain, api, web, gateway, relay, client]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Login to staging registry
|
||||
uses: ./.github/actions/gcp-docker-login
|
||||
id: login-staging
|
||||
with:
|
||||
project: firezone-staging
|
||||
- name: Login to production registry
|
||||
uses: ./.github/actions/gcp-docker-login
|
||||
id: login-production
|
||||
with:
|
||||
project: firezone-prod
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- name: Pull and push images
|
||||
run: |
|
||||
set -xe
|
||||
|
||||
SOURCE_TAG=${{ steps.login-staging.outputs.registry }}/firezone/${{ matrix.image }}:${{ inputs.tag }}
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ${{ steps.login-production.outputs.registry }}/firezone/${{ matrix.image }}:${{ inputs.tag }} \
|
||||
$SOURCE_TAG
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@ba79af03959ebeac9769e648f473a284504d9193 # v2.1.10
|
||||
with:
|
||||
workload_identity_provider: "projects/397012414171/locations/global/workloadIdentityPools/github-actions-pool/providers/github-actions"
|
||||
service_account: "github-actions@github-iam-387915.iam.gserviceaccount.com"
|
||||
export_environment_variables: true
|
||||
create_credentials_file: true
|
||||
- name: Copy Google Cloud Storage binaries to "edge" version
|
||||
# TODO: Add relay here when we deploy Relay from prod artifacts instead of Docker
|
||||
# To do that properly we need to:
|
||||
# - Update publish.yml to publish versioned Relays too (and start versioning Relay changes)
|
||||
# - Add arm64 and armv7l architectures to the Relay builds (we only build for amd64 currently because that's all we need to)
|
||||
if: ${{ matrix.image == 'gateway' || matrix.image == 'client' }}
|
||||
run: |
|
||||
set -xe
|
||||
|
||||
ARCHITECTURES=(x86_64 aarch64 armv7)
|
||||
|
||||
for arch in "${ARCHITECTURES[@]}"; do
|
||||
# Copy sha256sum.txt
|
||||
gcloud storage cp \
|
||||
gs://firezone-staging-artifacts/firezone-${{ matrix.image }}/${{ github.sha }}/${arch}.sha256sum.txt \
|
||||
gs://firezone-prod-artifacts/firezone-${{ matrix.image }}/edge/${arch}.sha256sum.txt
|
||||
gcloud storage cp \
|
||||
gs://firezone-staging-artifacts/firezone-${{ matrix.image }}/${{ github.sha }}/${arch}.sha256sum.txt \
|
||||
gs://firezone-prod-artifacts/firezone-${{ matrix.image }}/${{ github.sha }}/${arch}.sha256sum.txt
|
||||
|
||||
# Copy binaries
|
||||
gcloud storage cp \
|
||||
gs://firezone-staging-artifacts/firezone-${{ matrix.image }}/${{ github.sha }}/${arch} \
|
||||
gs://firezone-prod-artifacts/firezone-${{ matrix.image }}/edge/${arch}
|
||||
gcloud storage cp \
|
||||
gs://firezone-staging-artifacts/firezone-${{ matrix.image }}/${{ github.sha }}/${arch} \
|
||||
gs://firezone-prod-artifacts/firezone-${{ matrix.image }}/${{ github.sha }}/${arch}
|
||||
done
|
||||
|
||||
deploy-production:
|
||||
needs: push
|
||||
runs-on: ubuntu-22.04
|
||||
environment: gcp_production
|
||||
permissions:
|
||||
contents: write
|
||||
env:
|
||||
TF_CLOUD_ORGANIZATION: "firezone"
|
||||
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
|
||||
TF_WORKSPACE: "production"
|
||||
steps:
|
||||
# First, checkout the main ref for setting up Terraform
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
|
||||
- name: Tool Versions
|
||||
id: versions
|
||||
uses: marocchino/tool-versions-action@18a164fa2b0db1cc1edf7305fcb17ace36d1c306 # v1.2.0
|
||||
- uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: ${{ steps.versions.outputs.terraform }}
|
||||
# Then, checkout the ref specified in the workflow run
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.workflow_run.head_branch }}
|
||||
submodules: true
|
||||
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
|
||||
- name: Upload Configuration
|
||||
uses: hashicorp/tfc-workflows-github/actions/upload-configuration@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: apply-upload
|
||||
with:
|
||||
workspace: ${{ env.TF_WORKSPACE }}
|
||||
# Subdirectory is set in the project settings:
|
||||
# https://app.terraform.io/app/firezone/workspaces/production/settings/general
|
||||
directory: "./"
|
||||
- name: Create Plan Run
|
||||
uses: hashicorp/tfc-workflows-github/actions/create-run@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: apply-run
|
||||
env:
|
||||
TF_VAR_image_tag: '"${{ inputs.tag }}"'
|
||||
with:
|
||||
workspace: ${{ env.TF_WORKSPACE }}
|
||||
configuration_version: ${{ steps.apply-upload.outputs.configuration_version_id }}
|
||||
- name: Apply
|
||||
uses: hashicorp/tfc-workflows-github/actions/apply-run@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
if: fromJSON(steps.apply-run.outputs.payload).data.attributes.actions.IsConfirmable
|
||||
id: apply
|
||||
with:
|
||||
run: ${{ steps.apply-run.outputs.run_id }}
|
||||
comment: "Apply Run from GitHub Actions CI ${{ inputs.tag }}"
|
||||
|
||||
# Some intrepid users are self-hosting these, so support them as best we can by making our
|
||||
# infrastructure images available to them.
|
||||
publish-infra-images:
|
||||
# Only publish if our own deploy was successful
|
||||
needs: deploy-production
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
packages: write
|
||||
id-token: write
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
image: [domain, api, web, relay]
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Login to staging registry
|
||||
uses: ./.github/actions/gcp-docker-login
|
||||
id: login-staging
|
||||
with:
|
||||
project: firezone-staging
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
- name: Pull and push
|
||||
run: |
|
||||
set -xe
|
||||
|
||||
SOURCE_TAG=${{ steps.login-staging.outputs.registry }}/firezone/${{ matrix.image }}:${{ inputs.tag }}
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ghcr.io/firezone/${{ matrix.image }}:${{ inputs.tag }} \
|
||||
-t ghcr.io/firezone/${{ matrix.image }}:latest \
|
||||
$SOURCE_TAG
|
||||
|
||||
update-vercel:
|
||||
needs: deploy-production
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
VERCEL_TEAM_ID: firezone
|
||||
VERCEL_EDGE_CONFIG_ID: ecfg_hmorgeez26rwyncgsuj1yaibfx4p
|
||||
steps:
|
||||
- name: Update FIREZONE_DEPLOYED_SHA
|
||||
run: |
|
||||
curl --fail -X PATCH "https://api.vercel.com/v1/edge-config/${VERCEL_EDGE_CONFIG_ID}/items?teamId=${VERCEL_TEAM_ID}" \
|
||||
-H "Authorization: Bearer ${{ secrets.VERCEL_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{ "items": [ { "operation": "upsert", "key": "deployed_sha", "value": "${{ inputs.tag }}" } ] }'
|
||||
109
.github/workflows/_terraform.yml
vendored
109
.github/workflows/_terraform.yml
vendored
@@ -1,109 +0,0 @@
|
||||
name: Terraform
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
plan-deploy:
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
env:
|
||||
TF_CLOUD_ORGANIZATION: "firezone"
|
||||
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
|
||||
TF_WORKSPACE: "staging"
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
|
||||
- run: ls -alR terraform/environments
|
||||
- name: Tool Versions
|
||||
id: versions
|
||||
uses: marocchino/tool-versions-action@18a164fa2b0db1cc1edf7305fcb17ace36d1c306 # v1.2.0
|
||||
- uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: ${{ steps.versions.outputs.terraform }}
|
||||
- name: Validate cloud-init
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y cloud-init
|
||||
cloud-init schema --config-file terraform/modules/google-cloud/apps/relay/templates/cloud-init.yaml
|
||||
# This doesn't work if the file contains interpolated variables
|
||||
# cloud-init schema --config-file terraform/modules/google-cloud/apps/elixir/templates/cloud-init.yaml
|
||||
- name: Check Formatting
|
||||
working-directory: terraform
|
||||
run: |
|
||||
terraform fmt --check --recursive
|
||||
- uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
|
||||
id: changes
|
||||
with:
|
||||
filters: |
|
||||
terraform:
|
||||
- 'terraform/**'
|
||||
- if: steps.changes.outputs.terraform == 'true'
|
||||
name: Upload Configuration
|
||||
uses: hashicorp/tfc-workflows-github/actions/upload-configuration@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: plan-upload
|
||||
with:
|
||||
workspace: ${{ env.TF_WORKSPACE }}
|
||||
# Subdirectory is set in the project settings:
|
||||
# https://app.terraform.io/app/firezone/workspaces/staging/settings/general
|
||||
directory: "./"
|
||||
speculative: true
|
||||
- if: steps.changes.outputs.terraform == 'true'
|
||||
name: Create Plan Run
|
||||
uses: hashicorp/tfc-workflows-github/actions/create-run@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: plan-run
|
||||
env:
|
||||
TF_VAR_image_tag: '"${{ github.sha }}"'
|
||||
with:
|
||||
workspace: ${{ env.TF_WORKSPACE }}
|
||||
configuration_version: ${{ steps.plan-upload.outputs.configuration_version_id }}
|
||||
plan_only: true
|
||||
- if: steps.changes.outputs.terraform == 'true'
|
||||
name: Get Plan Output
|
||||
uses: hashicorp/tfc-workflows-github/actions/plan-output@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: plan-output
|
||||
with:
|
||||
plan: ${{ fromJSON(steps.plan-run.outputs.payload).data.relationships.plan.data.id }}
|
||||
- name: Update PR
|
||||
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
|
||||
id: plan-comment
|
||||
if: ${{ github.event_name == 'pull_request' && steps.changes.outputs.terraform == 'true' }}
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
// 1. Retrieve existing bot comments for the PR
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
});
|
||||
const botComment = comments.find(comment => {
|
||||
return comment.user.type === 'Bot' && comment.body.includes('Terraform Cloud Plan Output')
|
||||
});
|
||||
const output = `#### Terraform Cloud Plan Output
|
||||
|
||||
\`\`\`
|
||||
Plan: ${{ steps.plan-output.outputs.add }} to add, ${{ steps.plan-output.outputs.change }} to change, ${{ steps.plan-output.outputs.destroy }} to destroy.
|
||||
\`\`\`
|
||||
|
||||
[Terraform Cloud Plan](${{ steps.plan-run.outputs.run_link }})
|
||||
`;
|
||||
// 3. Update previous comment or create new one
|
||||
if (botComment) {
|
||||
github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: botComment.id,
|
||||
body: output
|
||||
});
|
||||
} else {
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: output
|
||||
});
|
||||
}
|
||||
63
.github/workflows/cd.yml
vendored
63
.github/workflows/cd.yml
vendored
@@ -2,12 +2,6 @@ name: Continuous Delivery
|
||||
on:
|
||||
# Used for debugging the workflow by manually calling it
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
deploy-staging:
|
||||
description: "Also deploy to staging. By default the deploy is not executed when triggering this workflow manually."
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
@@ -40,60 +34,3 @@ jobs:
|
||||
with:
|
||||
profile: "release"
|
||||
stage: "release"
|
||||
|
||||
deploy-staging:
|
||||
if: ${{ github.event_name != 'workflow_dispatch' || inputs.deploy-staging }}
|
||||
runs-on: ubuntu-22.04
|
||||
environment: gcp_staging
|
||||
permissions:
|
||||
contents: write
|
||||
# Cancel old workflow runs if new code is pushed
|
||||
concurrency:
|
||||
group: "staging-deploy-${{ github.workflow }}-${{ github.ref }}"
|
||||
cancel-in-progress: false
|
||||
needs: ci
|
||||
env:
|
||||
TF_CLOUD_ORGANIZATION: "firezone"
|
||||
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
|
||||
TF_WORKSPACE: "staging"
|
||||
steps:
|
||||
# First, checkout the main ref for setting up Terraform
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
submodules: true
|
||||
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
|
||||
- name: Tool Versions
|
||||
id: versions
|
||||
uses: marocchino/tool-versions-action@18a164fa2b0db1cc1edf7305fcb17ace36d1c306 # v1.2.0
|
||||
- uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
|
||||
with:
|
||||
terraform_version: ${{ steps.versions.outputs.terraform }}
|
||||
# Then, checkout the ref specified in the workflow run
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.workflow_run.head_branch }}
|
||||
submodules: true
|
||||
ssh-key: ${{ secrets.ENVIRONMENTS_REPO_DEPLOY_KEY }}
|
||||
- name: Upload Configuration
|
||||
uses: hashicorp/tfc-workflows-github/actions/upload-configuration@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: apply-upload
|
||||
with:
|
||||
workspace: ${{ env.TF_WORKSPACE }}
|
||||
# Subdirectory is set in the project settings:
|
||||
# https://app.terraform.io/app/firezone/workspaces/staging/settings/general
|
||||
directory: "./"
|
||||
- name: Create Plan Run
|
||||
uses: hashicorp/tfc-workflows-github/actions/create-run@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
id: apply-run
|
||||
env:
|
||||
TF_VAR_image_tag: '"${{ github.sha }}"'
|
||||
with:
|
||||
workspace: ${{ env.TF_WORKSPACE }}
|
||||
configuration_version: ${{ steps.apply-upload.outputs.configuration_version_id }}
|
||||
- name: Apply
|
||||
uses: hashicorp/tfc-workflows-github/actions/apply-run@8e08d1ba957673f5fbf971a22b3219639dc45661 # v1.3.2
|
||||
if: fromJSON(steps.apply-run.outputs.payload).data.attributes.actions.IsConfirmable
|
||||
id: apply
|
||||
with:
|
||||
run: ${{ steps.apply-run.outputs.run_id }}
|
||||
comment: "Apply Run from GitHub Actions CI ${{ github.sha }}"
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -32,9 +32,6 @@ jobs:
|
||||
secrets: inherit
|
||||
static-analysis:
|
||||
uses: ./.github/workflows/_static-analysis.yml
|
||||
terraform:
|
||||
uses: ./.github/workflows/_terraform.yml
|
||||
secrets: inherit
|
||||
codeql:
|
||||
uses: ./.github/workflows/_codeql.yml
|
||||
secrets: inherit
|
||||
|
||||
39
.github/workflows/deploy.yml
vendored
39
.github/workflows/deploy.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Deploy Production
|
||||
run-name: Triggered by ${{ github.actor }} on ${{ github.event_name }}
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
confirmation:
|
||||
description: "Are you SURE you want to deploy all changes from the selected commit to production?"
|
||||
type: boolean
|
||||
tag:
|
||||
description: "Image tag to deploy. Defaults to the last commit SHA in the branch."
|
||||
type: string
|
||||
required: false
|
||||
|
||||
concurrency:
|
||||
group: "deploy-production-${{ github.event_name }}-${{ github.workflow }}-${{ github.ref }}"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# This is *not* run in CI on main in order to allow
|
||||
# breaking changes to be merged as administrator and have the
|
||||
# resulting CI green on main.
|
||||
# So run them here.
|
||||
compatibility-tests:
|
||||
uses: ./.github/workflows/_integration_tests.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
relay_image: "us-east1-docker.pkg.dev/firezone-staging/firezone/relay"
|
||||
gateway_image: "ghcr.io/firezone/gateway"
|
||||
gateway_tag: "latest"
|
||||
client_image: "ghcr.io/firezone/client"
|
||||
client_tag: "latest"
|
||||
|
||||
deploy-production:
|
||||
if: ${{ inputs.confirmation }}
|
||||
needs: compatibility-tests
|
||||
secrets: inherit
|
||||
uses: ./.github/workflows/_deploy_production.yml
|
||||
with:
|
||||
tag: ${{ inputs.tag || github.sha }}
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "terraform/environments"]
|
||||
path = terraform/environments
|
||||
url = git@github.com:firezone/environments.git
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
/*
|
||||
/*/
|
||||
!/terraform/
|
||||
@@ -3,7 +3,6 @@
|
||||
nodejs 20.14.0
|
||||
elixir 1.18.2-otp-27
|
||||
erlang 27.2.1
|
||||
terraform 1.10.4
|
||||
|
||||
# Used for static analysis
|
||||
python 3.11.9
|
||||
|
||||
@@ -94,13 +94,6 @@ product documentation, organized as follows:
|
||||
- [swift/](../swift/apple): macOS / iOS clients.
|
||||
- [kotlin/](../kotlin/android): Android / ChromeOS clients.
|
||||
- [website/](../website): Marketing website and product documentation.
|
||||
- [terraform/](../terraform): Terraform files for various example deployments.
|
||||
- [terraform/examples/google-cloud/nat-gateway](../terraform/examples/google-cloud/nat-gateway):
|
||||
Example Terraform configuration for deploying a cluster of Firezone Gateways
|
||||
behind a NAT gateway on GCP with a single egress IP.
|
||||
- [terraform/modules/google-cloud/apps/gateway-region-instance-group](../terraform/modules/google-cloud/apps/gateway-region-instance-group):
|
||||
Production-ready Terraform module for deploying regional Firezone Gateways
|
||||
to Google Cloud Compute using Regional Instance Groups.
|
||||
|
||||
## Quickstart
|
||||
|
||||
|
||||
@@ -18,8 +18,3 @@ docs
|
||||
.gitignore
|
||||
.gitmodules
|
||||
.github
|
||||
|
||||
# Terraform
|
||||
.terraform
|
||||
*.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
681
elixir/README.md
681
elixir/README.md
@@ -1,680 +1 @@
|
||||
# Welcome to Elixir-land!
|
||||
|
||||
This README provides an overview for running and managing Firezone's
|
||||
Elixir-based control plane.
|
||||
|
||||
## Running Control Plane for local development
|
||||
|
||||
You can use the [Top-Level Docker Compose](../docker-compose.yml) to start any
|
||||
services locally. The `web` and `api` compose services are built application
|
||||
releases that are pretty much the same as the ones we run in production, while
|
||||
the `elixir` compose service runs raw Elixir code, without a built release.
|
||||
|
||||
This means you'll want to use the `elixir` compose service to run Mix tasks and
|
||||
any Elixir code on-the-fly, but you can't do that in `web`/`api` so easily
|
||||
because Elixir strips out Mix and other tooling
|
||||
[when building an application release](https://hexdocs.pm/mix/Mix.Tasks.Release.html).
|
||||
|
||||
`elixir` additionally caches `_build` and `node_modules` to speed up compilation
|
||||
time and syncs `/apps`, `/config` and other folders with the host machine.
|
||||
|
||||
```bash
|
||||
# Make sure to run this every time code in elixir/ changes,
|
||||
# docker doesn't do that for you!
|
||||
❯ docker-compose build
|
||||
|
||||
# Create the database
|
||||
#
|
||||
# Hint: you can run any mix commands like this,
|
||||
# eg. mix ecto.reset will reset your database
|
||||
#
|
||||
# Also to drop the database you need to stop all active connections,
|
||||
# so if you get an error stop all services first:
|
||||
#
|
||||
# docker-compose down
|
||||
#
|
||||
# Or you can just run both reset and seed in one-liner:
|
||||
#
|
||||
# docker-compose run elixir /bin/sh -c "cd apps/domain && mix do ecto.reset, ecto.seed"
|
||||
#
|
||||
❯ docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.create"
|
||||
|
||||
# Ensure database is migrated before running seeds
|
||||
❯ docker-compose run api bin/migrate
|
||||
# or
|
||||
❯ docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.migrate"
|
||||
|
||||
# Seed the database
|
||||
# Hint: some access tokens will be generated and written to stdout,
|
||||
# don't forget to save them for later
|
||||
❯ docker-compose run api bin/seed
|
||||
# or
|
||||
❯ docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.seed"
|
||||
|
||||
# Start the API service for control plane sockets while listening to STDIN
|
||||
# (where you will see all the logs)
|
||||
❯ docker-compose up api --build
|
||||
```
|
||||
|
||||
Now you can verify that it's working by connecting to a websocket:
|
||||
|
||||
<details>
|
||||
<summary>Gateway</summary>
|
||||
|
||||
```bash
|
||||
# Note: The token value below is an example. The token value you will need is generated and printed out when seeding the database, as described earlier in the document.
|
||||
❯ export GATEWAY_TOKEN_FROM_SEEDS=".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkMjI3NDU2MGItZTk3Yi00NWU0LThiMzQtNjc5Yzc2MTdlOThkbQAAADhPMDJMN1VTMkozVklOT01QUjlKNklMODhRSVFQNlVPOEFRVk82VTVJUEwwVkpDMjJKR0gwPT09PW4GAF3gLBONAWIAAVGA.DCT0Qv80qzF5OQ6CccLKXPLgzC3Rzx5DqzDAh9mWAww"
|
||||
|
||||
❯ websocat --header="User-Agent: iOS/12.7 (iPhone) connlib/0.7.412" "ws://127.0.0.1:13000/gateway/websocket?token=${GATEWAY_TOKEN_FROM_SEEDS}&external_id=thisisrandomandpersistent&name=kkX1&public_key=kceI60D6PrwOIiGoVz6hD7VYCgD1H57IVQlPJTTieUE="
|
||||
|
||||
# After this you need to join the `gateway` topic.
|
||||
# For details on this structure see https://hexdocs.pm/phoenix/Phoenix.Socket.Message.html
|
||||
❯ {"event":"phx_join","topic":"gateway","payload":{},"ref":"unique_string_ref","join_ref":"unique_join_ref"}
|
||||
|
||||
{"ref":"unique_string_ref","payload":{"status":"ok","response":{}},"topic":"gateway","event":"phx_reply"}
|
||||
{"ref":null,"payload":{"interface":{"ipv6":"fd00:2021:1111::35:f630","ipv4":"100.77.125.87"},"ipv4_masquerade_enabled":true,"ipv6_masquerade_enabled":true},"topic":"gateway","event":"init"}
|
||||
```
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Relay</summary>
|
||||
|
||||
```bash
|
||||
# Note: The token value below is an example. The token value you will need is generated and printed out when seeding the database, as described earlier in the document.
|
||||
❯ export RELAY_TOKEN_FROM_SEEDS=".SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkNTQ5YzQxMDctMTQ5Mi00ZjhmLWE0ZWMtYTlkMmE2NmQ4YWE5bQAAADhQVTVBSVRFMU84VkRWTk1ITU9BQzc3RElLTU9HVERJQTY3MlM2RzFBQjAyT1MzNEg1TUUwPT09PW4GAJeo1TONAWIAAVGA.Vi3gCkFKoWH03uSUshAYYzRhw7eKQxYw1piFnkFPGtA"
|
||||
|
||||
❯ websocat --header="User-Agent: Linux/5.2.6 (Debian; x86_64) relay/0.7.412" "ws://127.0.0.1:8081/relay/websocket?token=${RELAY_TOKEN_FROM_SEEDS}&ipv4=24.12.79.100&ipv6=4d36:aa7f:473c:4c61:6b9e:2416:9917:55cc"
|
||||
|
||||
# Here is what you will see in docker logs firezone-api-1
|
||||
# {"time":"2023-06-05T23:16:01.537Z","severity":"info","message":"CONNECTED TO API.Relay.Socket in 251ms\n Transport: :websocket\n Serializer: Phoenix.Socket.V1.JSONSerializer\n Parameters: %{\"ipv4\" => \"24.12.79.100\", \"ipv6\" => \"4d36:aa7f:473c:4c61:6b9e:2416:9917:55cc\", \"stamp_secret\" => \"[FILTERED]\", \"token\" => \"[FILTERED]\"}","metadata":{"domain":["elixir"],"erl_level":"info"}}
|
||||
|
||||
# After this you need to join the `relay` topic and pass a `stamp_secret` in the payload.
|
||||
# For details on this structure see https://hexdocs.pm/phoenix/Phoenix.Socket.Message.html
|
||||
❯ {"event":"phx_join","topic":"relay","payload":{"stamp_secret":"makemerandomplz"},"ref":"unique_string_ref","join_ref":"unique_join_ref"}
|
||||
|
||||
{"event":"phx_reply","payload":{"response":{},"status":"ok"},"ref":"unique_string_ref","topic":"relay"}
|
||||
{"event":"init","payload":{},"ref":null,"topic":"relay"}
|
||||
```
|
||||
|
||||
</details>
|
||||
<details>
|
||||
<summary>Client</summary>
|
||||
|
||||
```bash
|
||||
# Note: The token value below is an example. The token value you will need is generated and printed out when seeding the database, as described earlier in the document.
|
||||
❯ export CLIENT_TOKEN_FROM_SEEDS="n.SFMyNTY.g2gDaANtAAAAJGM4OWJjYzhjLTkzOTItNGRhZS1hNDBkLTg4OGFlZjZkMjhlMG0AAAAkN2RhN2QxY2QtMTExYy00NGE3LWI1YWMtNDAyN2I5ZDIzMGU1bQAAACtBaUl5XzZwQmstV0xlUkFQenprQ0ZYTnFJWktXQnMyRGR3XzJ2Z0lRdkZnbgYAGUmu74wBYgABUYA.UN3vSLLcAMkHeEh5VHumPOutkuue8JA6wlxM9JxJEPE"
|
||||
|
||||
# Panel will only accept token if it's coming with this User-Agent header and from IP 172.28.0.1
|
||||
❯ export CLIENT_USER_AGENT="iOS/12.5 (iPhone) connlib/0.7.412"
|
||||
|
||||
❯ websocat --header="User-Agent: ${CLIENT_USER_AGENT}" "ws://127.0.0.1:8081/client/websocket?token=${CLIENT_TOKEN_FROM_SEEDS}&external_id=thisisrandomandpersistent&name=kkX1&public_key=kceI60D6PrwOIiGoVz6hD7VYCgD1H57IVQlPJTTieUE="
|
||||
|
||||
# Here is what you will see in docker logs firezone-api-1
|
||||
# firezone-api-1 | {"domain":["elixir"],"erl_level":"info","logging.googleapis.com/sourceLocation":{"file":"lib/phoenix/logger.ex","line":306,"function":"Elixir.Phoenix.Logger.phoenix_socket_connected/4"},"message":"CONNECTED TO API.Client.Socket in 83ms\n Transport: :websocket\n Serializer: Phoenix.Socket.V1.JSONSerializer\n Parameters: %{\"external_id\" => \"thisisrandomandpersistent\", \"name\" => \"kkX1\", \"public_key\" => \"[FILTERED]\", \"token\" => \"[FILTERED]\"}","severity":"INFO","time":"2023-06-23T21:01:49.566Z"}
|
||||
|
||||
# After this you need to join the `client` topic and pass a `stamp_secret` in the payload.
|
||||
# For details on this structure see https://hexdocs.pm/phoenix/Phoenix.Socket.Message.html
|
||||
❯ {"event":"phx_join","topic":"client","payload":{},"ref":"unique_string_ref","join_ref":"unique_join_ref"}
|
||||
|
||||
{"ref":"unique_string_ref","topic":"client","event":"phx_reply","payload":{"status":"ok","response":{}}}
|
||||
{"ref":null,"topic":"client","event":"init","payload":{"interface":{"ipv6":"fd00:2021:1111::11:f4bd","upstream_dns":[],"ipv4":"100.71.71.245"},"resources":[{"id":"4429d3aa-53ea-4c03-9435-4dee2899672b","name":"172.20.0.1/16","type":"cidr","address":"172.20.0.0/16"},{"id":"85a1cffc-70d3-46dd-aa6b-776192af7b06","name":"gitlab.mycorp.com","type":"dns","address":"gitlab.mycorp.com","ipv6":"fd00:2021:1111::5:b370","ipv4":"100.85.109.146"}]}}
|
||||
|
||||
# List online relays for a Resource
|
||||
❯ {"event":"prepare_connection","topic":"client","payload":{"resource_id":"1f27735f-651d-49e8-840c-8f1ba581d88e"},"ref":"unique_prepare_connection_ref"}
|
||||
|
||||
{"ref":"unique_prepare_connection_ref","payload":{"status":"ok","response":{"relays":[{"type":"turn","uri":"turn:189.172.72.111:3478","username":"1738022400:4ZxvSNDzU98MJiEjsR8DOA","password":"TVZvSgIGFK0TtNDXFVU9gv9a1WDz2Ou7RTEUis4E6To","expires_at":1738022400},{"type":"turn","uri":"turn:[::1]:3478","username":"1738022400:KCYrRTRmfGNAEEe7KyjHkA","password":"8KYplQOKBf5smJRZDhC54kiKKNVmUxsVxH1V8xfY/do","expires_at":1738022400}],"resource_id":"1f27735f-651d-49e8-840c-8f1ba581d88e","gateway_remote_ip":"127.0.0.1","gateway_id":"6e52c0ce-ccd9-46d9-8715-796ec9812719"}},"topic":"client","event":"phx_reply"}
|
||||
{"event":"request_connection","topic":"client","payload":{"resource_id":"1f27735f-651d-49e8-840c-8f1ba581d88e","client_payload":"RTC_SD","client_preshared_key":"+HapiGI5UdeRjKuKTwk4ZPPYpCnlXHvvqebcIevL+2A="},"ref":"unique_request_connection_ref"}
|
||||
|
||||
# Initiate connection to a resource
|
||||
❯ {"event":"request_connection","topic":"client","payload":{"gateway_id":"6e52c0ce-ccd9-46d9-8715-796ec9812719","resource_id":"1f27735f-651d-49e8-840c-8f1ba581d88e","client_payload":"RTC_SD","client_preshared_key":"+HapiGI5UdeRjKuKTwk4ZPPYpCnlXHvvqebcIevL+2A="},"ref":"unique_request_connection_ref"}
|
||||
|
||||
```
|
||||
|
||||
Note: when you run multiple commands it can hang because Phoenix expects a
|
||||
heartbeat packet every 5 seconds, so it can kill your websocket if you send
|
||||
commands slower than that.
|
||||
|
||||
</details>
|
||||
<br />
|
||||
|
||||
You can reset the database (eg. when there is a migration that breaks data model
|
||||
for unreleased versions) using following command:
|
||||
|
||||
```bash
|
||||
❯ docker-compose run elixir /bin/sh -c "cd apps/domain && mix ecto.reset"
|
||||
```
|
||||
|
||||
Stopping everything is easy too:
|
||||
|
||||
```bash
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
## Useful commands for local testing and debugging
|
||||
|
||||
Connecting to an IEx interactive console:
|
||||
|
||||
```bash
|
||||
❯ docker-compose run elixir /bin/sh -c "cd apps/domain && iex -S mix"
|
||||
```
|
||||
|
||||
Connecting to a running api/web instance shell:
|
||||
|
||||
```bash
|
||||
❯ docker exec -it firezone-api-1 sh
|
||||
/app
|
||||
```
|
||||
|
||||
Connecting to a running api/web instance to run Elixir code from them:
|
||||
|
||||
```bash
|
||||
# Start all services in daemon mode (in background)
|
||||
❯ docker-compose up -d --build
|
||||
|
||||
# Connect to a running API node
|
||||
❯ docker exec -it firezone-api-1 bin/api remote
|
||||
Erlang/OTP 25 [erts-13.1.4] [source] [64-bit] [smp:5:5] [ds:5:5:10] [async-threads:1]
|
||||
|
||||
Interactive Elixir (1.14.3) - press Ctrl+C to exit (type h() ENTER for help)
|
||||
iex(api@127.0.0.1)1>
|
||||
|
||||
# Connect to a running Web node
|
||||
❯ docker exec -it firezone-web-1 bin/web remote
|
||||
Erlang/OTP 25 [erts-13.1.4] [source] [64-bit] [smp:5:5] [ds:5:5:10] [async-threads:1]
|
||||
|
||||
Interactive Elixir (1.14.3) - press Ctrl+C to exit (type h() ENTER for help)
|
||||
iex(web@127.0.0.1)1>
|
||||
```
|
||||
|
||||
From `iex` shell you can run any Elixir code, for example you can emulate a full
|
||||
flow using process messages, just keep in mind that you need to run seeds before
|
||||
executing this example:
|
||||
|
||||
```elixir
|
||||
[gateway | _rest_gateways] = Domain.Repo.all(Domain.Gateways.Gateway)
|
||||
:ok = Events.Hooks.Gateways.connect(gateway)
|
||||
|
||||
[relay | _rest_relays] = Domain.Repo.all(Domain.Relays.Relay)
|
||||
relay_secret = Domain.Crypto.random_token()
|
||||
:ok = Domain.Relays.connect_relay(relay, relay_secret)
|
||||
```
|
||||
|
||||
Now if you connect and list resources there will be one online because there is
|
||||
a relay and gateway online.
|
||||
|
||||
Some of the functions require authorization, here is how you can obtain a
|
||||
subject:
|
||||
|
||||
```elixir
|
||||
user_agent = "User-Agent: iOS/12.7 (iPhone) connlib/0.7.412"
|
||||
remote_ip = {127, 0, 0, 1}
|
||||
|
||||
# For a client
|
||||
context = %Domain.Auth.Context{type: :client, user_agent: user_agent, remote_ip: remote_ip}
|
||||
{:ok, subject} = Domain.Auth.authenticate(client_token, context)
|
||||
|
||||
# For an admin user, imitating the browser session
|
||||
context = %Domain.Auth.Context{type: :browser, user_agent: user_agent, remote_ip: remote_ip}
|
||||
provider = Domain.Repo.get_by(Domain.Auth.Provider, adapter: :userpass)
|
||||
identity = Domain.Repo.get_by(Domain.Auth.Identity, provider_id: provider.id, provider_identifier: "firezone@localhost.local")
|
||||
token = Domain.Auth.create_token(identity, context, "", nil)
|
||||
browser_token = Domain.Tokens.encode_fragment!(token)
|
||||
{:ok, subject} = Domain.Auth.authenticate(browser_token, context)
|
||||
```
|
||||
|
||||
Listing connected gateways, relays, clients for an account:
|
||||
|
||||
```elixir
|
||||
account_id = "c89bcc8c-9392-4dae-a40d-888aef6d28e0"
|
||||
|
||||
%{
|
||||
gateways: Domain.Gateways.Presence.list("gateways:#{account_id}"),
|
||||
relays: Domain.Relays.Presence.list("relays:#{account_id}"),
|
||||
clients: Domain.Clients.Presence.list("clients:#{account_id}"),
|
||||
}
|
||||
```
|
||||
|
||||
### Connecting billing in dev mode for manual testing
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- A Stripe account (Note: for the Firezone team, you will need to be invited to
|
||||
the Firezone Stripe account)
|
||||
- [Stripe CLI](https://github.com/stripe/stripe-cli)
|
||||
|
||||
Steps:
|
||||
|
||||
1. Use static seeds to provision account ID that corresponds to staging setup on
|
||||
Stripe:
|
||||
|
||||
```bash
|
||||
STATIC_SEEDS=true mix do ecto.reset, ecto.seed
|
||||
```
|
||||
|
||||
1. Start Stripe CLI webhook proxy:
|
||||
|
||||
```bash
|
||||
stripe listen --forward-to localhost:13001/integrations/stripe/webhooks
|
||||
```
|
||||
|
||||
1. Start the Phoenix server with enabled billing from the [`elixir/`](./) folder
|
||||
using a [test mode token](https://dashboard.stripe.com/test/apikeys):
|
||||
|
||||
```bash
|
||||
cd elixir/
|
||||
BILLING_ENABLED=true STRIPE_SECRET_KEY="...copy from stripe dashboard..." STRIPE_WEBHOOK_SIGNING_SECRET="...copy from stripe cli tool.." mix phx.server
|
||||
```
|
||||
|
||||
When updating the billing plan in stripe, use the
|
||||
[Stripe Testing Docs](https://docs.stripe.com/testing#testing-interactively) for
|
||||
how to add test payment info
|
||||
|
||||
### WorkOS integration
|
||||
|
||||
WorkOS is currently being used for JumpCloud directory sync integration. This
|
||||
allows JumpCloud users to use SCIM on the JumpCloud side, rather than having to
|
||||
give Firezone an admin JumpCloud API token.
|
||||
|
||||
#### Connecting WorkOS in dev mode for manual testing
|
||||
|
||||
If you are not planning to use the JumpCloud provider in your local development
|
||||
setup, then no additional setup is needed. However, if you do need to use the
|
||||
JumpCloud provider locally, you will need to obtain an API Key and Client ID
|
||||
from the [WorkOS Dashboard](https://dashboard.workos.com/api-keys).
|
||||
|
||||
To obtain a WorkOS dashboard login, contact one of the following Firezone team
|
||||
members:
|
||||
|
||||
- @jamilbk
|
||||
- @bmanifold
|
||||
- @AndrewDryga
|
||||
|
||||
Once you are able to login to the WorkOS Dashboard, make sure that you have
|
||||
selected the 'Staging' environment within WorkOS. Navigate to the API Keys page
|
||||
and use the `Create Key` button to obtain credentials.
|
||||
|
||||
After obtaining WorkOS API credentials, you will need to make sure they are set
|
||||
in the environment ENVs when starting your local dev instance of Firezone. As an
|
||||
example:
|
||||
|
||||
```bash
|
||||
cd elixir/
|
||||
WORKOS_API_KEY="..." WORKOS_CLIENT_ID="..." mix phx.server
|
||||
```
|
||||
|
||||
### Acceptance tests
|
||||
|
||||
You can disable headless mode for the browser by adding
|
||||
|
||||
```elixir
|
||||
|
||||
@tag debug: true
|
||||
feature ....
|
||||
```
|
||||
|
||||
to the acceptance test that you are running.
|
||||
|
||||
## Connecting to a staging or production instance
|
||||
|
||||
We use Google Cloud Platform for all our staging and production infrastructure.
|
||||
You'll need access to this env to perform the commands below; to request access
|
||||
you need to complete the following process:
|
||||
|
||||
- Open a PR adding yourself to `project_owners` in `main.tf` for each of the
|
||||
[environments](../terraform/environments) you need access.
|
||||
- Request a review from an existing project owner.
|
||||
- Once approved, merge the PR and verify access by continuing with one of the
|
||||
steps below.
|
||||
|
||||
This is a danger zone so first of all, ALWAYS make sure on which environment
|
||||
your code is running:
|
||||
|
||||
```bash
|
||||
❯ gcloud config get project
|
||||
firezone-staging
|
||||
```
|
||||
|
||||
Then you want to figure out which specific instance you want to connect to:
|
||||
|
||||
```bash
|
||||
❯ gcloud compute instances list
|
||||
NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS
|
||||
api-b02t us-east1-d n1-standard-1 10.128.0.22 RUNNING
|
||||
api-srkp us-east1-d n1-standard-1 10.128.0.23 RUNNING
|
||||
web-51wd us-east1-d n1-standard-1 10.128.0.21 RUNNING
|
||||
web-6k3n us-east1-d n1-standard-1 10.128.0.20 RUNNING
|
||||
```
|
||||
|
||||
SSH into the host VM:
|
||||
|
||||
```bash
|
||||
❯ gcloud compute ssh api-b02t --tunnel-through-iap
|
||||
No zone specified. Using zone [us-east1-d] for instance: [api-b02t].
|
||||
...
|
||||
|
||||
########################[ Welcome ]########################
|
||||
# You have logged in to the guest OS. #
|
||||
# To access your containers use 'docker attach' command #
|
||||
###########################################################
|
||||
|
||||
|
||||
andrew@api-b02t ~ $ $(docker ps | grep klt- | head -n 1 | awk '{split($NF, arr, "-"); print "docker exec -it "$NF" bin/"arr[2]" remote";}')
|
||||
Erlang/OTP 25 [erts-13.1.4] [source] [64-bit] [smp:1:1] [ds:1:1:10] [async-threads:1] [jit]
|
||||
|
||||
Interactive Elixir (1.14.3) - press Ctrl+C to exit (type h() ENTER for help)
|
||||
iex(api@api-b02t.us-east1-d.c.firezone-staging.internal)1>
|
||||
```
|
||||
|
||||
One-liner to connect to a running application container:
|
||||
|
||||
```bash
|
||||
❯ gcloud compute ssh $(gcloud compute instances list | grep "web-" | tail -n 1 | awk '{ print $1 }') --tunnel-through-iap -- '$(docker ps | grep klt- | head -n 1 | awk '\''{split($NF, arr, "-"); print "docker exec -it " $NF " bin/" arr[2] " remote";}'\'')'
|
||||
|
||||
Interactive Elixir (1.15.2) - press Ctrl+C to exit (type h() ENTER for help)
|
||||
iex(web@web-w2f6.us-east1-d.c.firezone-staging.internal)1>
|
||||
```
|
||||
|
||||
### Quickly provisioning an account
|
||||
|
||||
Useful for onboarding beta customers. See the `Domain.Ops.provision_account/1`
|
||||
function:
|
||||
|
||||
```elixir
|
||||
iex> Domain.Ops.create_and_provision_account(%{
|
||||
name: "Customer Account",
|
||||
slug: "customer_account",
|
||||
admin_name: "Test User",
|
||||
admin_email: "test@firezone.localhost"
|
||||
})
|
||||
```
|
||||
|
||||
### Creating an account on staging instance using CLI
|
||||
|
||||
```elixir
|
||||
❯ gcloud compute ssh web-3vmw --tunnel-through-iap
|
||||
|
||||
andrew@web-3vmw ~ $ docker ps --format json | jq '"\(.ID) \(.Image)"'
|
||||
"09eff3c0ebe8 us-east1-docker.pkg.dev/firezone-staging/firezone/web:b9c11007a4e230ab28f0138afc98188b1956dfd3"
|
||||
|
||||
andrew@web-3vmw ~ $ docker exec -it 09eff3c0ebe8 bin/web remote
|
||||
Erlang/OTP 26 [erts-14.0.2] [source] [64-bit] [smp:1:1] [ds:1:1:20] [async-threads:1] [jit]
|
||||
|
||||
Interactive Elixir (1.15.2) - press Ctrl+C to exit (type h() ENTER for help)
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)1> {:ok, account} = Domain.Accounts.create_account(%{name: "Firezone", slug: "firezone"})
|
||||
{:ok, ...}
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)2> {:ok, email_provider} = Domain.Auth.create_provider(account, %{name: "Email (OTP)", adapter: :email, adapter_config: %{}})
|
||||
{:ok, ...}
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)3> {:ok, actor} = Domain.Actors.create_actor(account, %{type: :account_admin_user, name: "Andrii Dryga"})
|
||||
{:ok, ...}
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)4> {:ok, identity} = Domain.Auth.upsert_identity(actor, email_provider, %{provider_identifier: "a@firezone.dev", provider_identifier_confirmation: "a@firezone.dev"})
|
||||
...
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)5> context = %Domain.Auth.Context{type: :browser, user_agent: "User-Agent: iOS/12.7 (iPhone) connlib/0.7.412", remote_ip: {127, 0, 0, 1}}
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)6> {:ok, identity} = Domain.Auth.Adapters.Email.request_sign_in_token(identity, context)
|
||||
{:ok, ...}
|
||||
|
||||
iex(web@web-3vmw.us-east1-d.c.firezone-staging.internal)7> Domain.Mailer.AuthEmail.sign_in_link_email(identity) |> Domain.Mailer.deliver()
|
||||
{:ok, %{id: "d24dbe9a-d0f5-4049-ac0d-0df793725a80"}}
|
||||
```
|
||||
|
||||
### Obtaining admin subject on staging
|
||||
|
||||
```elixir
|
||||
|
||||
❯ gcloud compute ssh web-2f4j --tunnel-through-iap -- '$(docker ps | grep klt- | head -n 1 | awk '\''{split($NF, arr, "-"); print "docker exec -it " $NF " bin/" arr[2] " remote";}'\'')'
|
||||
Erlang/OTP 26 [erts-14.0.2] [source] [64-bit] [smp:1:1] [ds:1:1:20] [async-threads:1] [jit]
|
||||
|
||||
Interactive Elixir (1.15.2) - press Ctrl+C to exit (type h() ENTER for help)
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)1> account_id = "REPLACE_ME"
|
||||
...
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)2> context = %Domain.Auth.Context{type: :browser, user_agent: "User-Agent: iOS/12.7 (iPhone) connlib/0.7.412", remote_ip: {127, 0, 0, 1}}
|
||||
...
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)3> [actor | _] = Domain.Actors.Actor.Query.by_type(:account_admin_user) |> Domain.Actors.Actor.Query.by_account_id(account_id) |> Domain.Repo.all()
|
||||
...
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)4> [identity | _] = Domain.Auth.Identity.Query.by_actor_id(actor.id) |> Domain.Repo.all()
|
||||
...
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)5> token = Domain.Auth.create_token(identity, context, "", nil)
|
||||
...
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)6> browser_token = Domain.Tokens.encode_fragment!(token)
|
||||
...
|
||||
|
||||
iex(web@web-2f4j.us-east1-d.c.firezone-staging.internal)7> {:ok, subject} = Domain.Auth.authenticate(browser_token, context)
|
||||
```
|
||||
|
||||
### Rotate relay token
|
||||
|
||||
```elixir
|
||||
|
||||
iex(web@web-xxxx.us-east1-d.c.firezone-staging.internal)1> group = Domain.Repo.one!(Domain.Relays.Group.Query.global())
|
||||
...
|
||||
|
||||
iex(web@web-xxxx.us-east1-d.c.firezone-staging.internal)2> {:ok, token} = Domain.Relays.create_token(group, %{})
|
||||
...
|
||||
```
|
||||
|
||||
## Connection to production Cloud SQL instance
|
||||
|
||||
Install
|
||||
[`cloud-sql-proxy`](https://cloud.google.com/sql/docs/postgres/connect-instance-auth-proxy)
|
||||
(eg. `brew install cloud-sql-proxy`) and run:
|
||||
|
||||
First, obtain a fresh token:
|
||||
|
||||
```bash
|
||||
gcloud auth application-default login
|
||||
```
|
||||
|
||||
```bash
|
||||
cloud-sql-proxy --auto-iam-authn "firezone-prod:us-east1:firezone-prod?address=0.0.0.0&port=9000"
|
||||
```
|
||||
|
||||
Then you can connect to the PostgreSQL using `psql`:
|
||||
|
||||
```bash
|
||||
# Use your work email as username to connect
|
||||
PG_USER=$(gcloud auth list --filter=status:ACTIVE --format="value(account)" | head -n 1)
|
||||
psql "host=localhost port=9000 sslmode=disable dbname=firezone user=${PG_USER}"
|
||||
```
|
||||
|
||||
### Connecting to Cloud SQL instance as the `firezone` user
|
||||
|
||||
Some operations like DROP'ing indexes to recreate them require you to connect as the table owner, which in our case is the `firezone` user.
|
||||
|
||||
The password for this user is randomly generated by Terraform, so to connect as this user you need to obtain the password
|
||||
from the Application configuration inside a running elixir container.
|
||||
|
||||
First, [obtain an iex shell](#connecting-to-a-staging-or-production-instances), then view the password with:
|
||||
|
||||
```elixir
|
||||
Application.get_env(:domain, Domain.Repo)
|
||||
```
|
||||
|
||||
Now, you can connect to the Cloud SQL instance as the `firezone` user:
|
||||
|
||||
```bash
|
||||
psql "host=localhost port=9000 sslmode=disable dbname=firezone user=firezone"
|
||||
```
|
||||
|
||||
## Deploying
|
||||
|
||||
### Apply Terraform changes without deploying new containers
|
||||
|
||||
This can be helpful when you want to quickly iterate over Terraform configuration in staging environment, without
|
||||
having to merge for every single apply attempt.
|
||||
|
||||
Switch to the staging environment:
|
||||
|
||||
```bash
|
||||
cd terraform/environments/staging
|
||||
```
|
||||
|
||||
and apply changes reusing previous container versions:
|
||||
|
||||
```bash
|
||||
terraform apply -var image_tag=$(terraform output -raw image_tag)
|
||||
```
|
||||
|
||||
### Deploying production
|
||||
|
||||
Before deploying, check if the `main` branch has any breaking changes since the last deployment. You can do this by comparing the `main` branch with the last deployed commit, which you can find [here](https://github.com/firezone/firezone/deployments/gcp_production).
|
||||
|
||||
Here is a one-liner to open the comparison in your browser:
|
||||
|
||||
```bash
|
||||
open "https://github.com/firezone/firezone/compare/$(curl -L -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" "https://api.github.com/repos/firezone/firezone/actions/workflows/deploy.yml/runs?status=completed&per_page=1" | jq -r '.workflow_runs[0].head_commit.id')...main"
|
||||
```
|
||||
|
||||
If there are any breaking changes, make sure to confirm with the rest of the team on a rollout strategy before proceeding with any of the steps listed below.
|
||||
|
||||
Then, go to ["Deploy Production"](https://github.com/firezone/firezone/actions/workflows/deploy.yml) CI workflow and click "Run Workflow".
|
||||
|
||||
1. In the form that appears, read the warning and check the checkbox next to it.
|
||||
2. The main branch is selected by default for deployment. To deploy a previous version, enter the commit SHA in the "Image tag to deploy" field.
|
||||
The commit MUST be from the `main` branch.
|
||||
3. Click "Run Workflow" to start the process.
|
||||
|
||||
The workflow will run all the way till the `deploy-production` step (which runs `terraform apply`) and wait for an approval from one of the project owners,
|
||||
message one of your colleagues to approve it.
|
||||
|
||||
#### Deployment Takes Too Long to Complete
|
||||
|
||||
Typically, `terraform apply` takes around 15 minutes in production. If it's taking longer (or you want to monitor the status), here are a few things you can check:
|
||||
|
||||
1. **Monitor the run status in [Terraform Cloud](https://app.terraform.io/app/firezone/workspaces/production/runs).**
|
||||
2. **Check the status of Instance Groups in [Google Cloud Console](https://console.cloud.google.com/compute/instanceGroups/list?project=firezone-prod).**
|
||||
3. [Check the logs](#viewing-logs) for the deployed instances.
|
||||
|
||||
For instance groups stuck in the `UPDATING` state:
|
||||
|
||||
- Open the group and look for any errors. Typically, if deployment is stuck, you'll find one instance in the group with an error (and a recent creation time), while the others are pending updates.
|
||||
- To quickly view logs for that instance, click the instance name and then click the `Logging` link.
|
||||
|
||||
_Do not panic—our production environment should remain stable. GCP and Terraform are designed to keep old instances running until the new ones are healthy._
|
||||
|
||||
#### Common Reasons for Deployment Issues
|
||||
|
||||
**1. A Bug in the Code**
|
||||
|
||||
- This can either crash the instance or make it unresponsive (you’ll notice failing health checks and error logs).
|
||||
- If this happens, ensure there were no database migrations as part of the changes (check `priv/repo/migrations`).
|
||||
- If no migrations are involved, rollback the deployment. To do this, cancel the currently running deployment,
|
||||
find the last successful deployment in Terraform Cloud, copy the `image_tag` from its output, and run:
|
||||
|
||||
```bash
|
||||
cd terraform/environments/production
|
||||
terraform apply -var image_tag=<LAST_SUCCESSFUL_IMAGE_TAG_HERE>
|
||||
```
|
||||
|
||||
- You can also rollback a specific component by overriding its image tag in the `terraform apply` command:
|
||||
|
||||
```bash
|
||||
terraform apply -var image_tag=<CURRENT_IMAGE_TAG> -var <COMPONENT_NAME>_image_tag=<LAST_SUCCESSFUL_IMAGE_TAG_HERE>
|
||||
```
|
||||
|
||||
_If there were migrations and they’ve already been applied, proceed to the next option._
|
||||
|
||||
**2. An Issue with the Migration**
|
||||
|
||||
- You’ll notice failing health checks and error logs related to the migration.
|
||||
- You can either:
|
||||
- Fix the data causing the migration to fail (refer to [Connection to Production Cloud SQL Instance](#connection-to-production-cloud-sql-instance)).
|
||||
- Fix the migration code and redeploy.
|
||||
|
||||
**3. Insufficient Resources to Deploy New Instances**
|
||||
|
||||
- If there are no errors but updates are pending, there might not be enough resources to deploy new instances.
|
||||
- This can be found in the Errors tab of the instance group.
|
||||
|
||||
Typically, this issue resolves itself as old reservations are freed up.
|
||||
|
||||
## Monitoring and Troubleshooting
|
||||
|
||||
### Viewing logs
|
||||
|
||||
Logs can be viewed via th [Logs Explorer](https://console.cloud.google.com/logs)
|
||||
in GCP, or via the `gcloud` CLI:
|
||||
|
||||
```bash
|
||||
# First, login
|
||||
> gcloud auth login
|
||||
|
||||
# Always make sure you're in the correct environment
|
||||
> gcloud config get project
|
||||
firezone-staging
|
||||
|
||||
# Now you can stream logs directly to your terminal.
|
||||
|
||||
############
|
||||
# Examples #
|
||||
############
|
||||
|
||||
# Stream all Elixir error logs:
|
||||
> gcloud logging read "jsonPayload.message.severity=ERROR"
|
||||
|
||||
# Stream Web app logs (portal UI):
|
||||
> gcloud logging read 'jsonPayload."cos.googleapis.com/container_name":web'
|
||||
|
||||
# Stream API app logs (connlib control plane):
|
||||
> gcloud logging read 'jsonPayload."cos.googleapis.com/container_name":api'
|
||||
|
||||
# For more info on the filter expression syntax, see:
|
||||
# https://cloud.google.com/logging/docs/view/logging-query-language
|
||||
```
|
||||
|
||||
Here is a helpful filter to show all errors and crashes:
|
||||
|
||||
```
|
||||
resource.type="gce_instance"
|
||||
(severity>=ERROR OR "Kernel pid terminated" OR "Crash dump is being written")
|
||||
-protoPayload.@type="type.googleapis.com/google.cloud.audit.AuditLog"
|
||||
-logName:"/logs/GCEGuestAgent"
|
||||
-logName:"/logs/OSConfigAgent"
|
||||
-logName:"/logs/ops-agent-fluent-bit"
|
||||
```
|
||||
|
||||
An alert will be sent to the `#feed-proudction` Slack channel when a new error is logged that matches this filter.
|
||||
You can also see all errors in [Google Cloud Error Reporting](https://console.cloud.google.com/errors?project=firezone-prod).
|
||||
|
||||
Sometimes logs will not provide enough context to understand the issue. In those cases you can
|
||||
try to filter by the `trace` field to get more information. Copy the `trace` value from a log entry
|
||||
and use it in the filter:
|
||||
|
||||
```
|
||||
resource.type="gce_instance"
|
||||
jsonPayload.trace:"<trace_id>"
|
||||
```
|
||||
|
||||
Note: If you simply click "Show entries for this trace" in the log entry, it will
|
||||
automatically **append** the filter for you. You might want to remove rest of filters
|
||||
so you can see all logs for that trace.
|
||||
|
||||
## Viewing metrics
|
||||
|
||||
Metrics can be viewed via the [Metrics Explorer](https://console.cloud.google.com/monitoring/metrics-explorer) in GCP.
|
||||
|
||||
## Viewing traces
|
||||
|
||||
Traces can be viewed via the [Trace Explorer](https://console.cloud.google.com/traces/list) in GCP.
|
||||
They are mostly helpful for debugging Clients, Relays and Gateways.
|
||||
|
||||
For example, if you want to find all traces for client management processes, you can use the following filter:
|
||||
|
||||
```
|
||||
RootSpan: client.connect
|
||||
```
|
||||
|
||||
Then you can drill down either by using a `client_id: <ID>` or an `account_id: <ID>`.
|
||||
|
||||
Note: For WS API processes, the total trace duration might not be helpful since a single trace is defined for
|
||||
the entire connection lifespan.
|
||||
See [CONTRIBUTING](../docs/CONTRIBUTING.md)
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
zenity
|
||||
desktop-file-utils
|
||||
android-tools
|
||||
terraform
|
||||
llvmPackages.bintools-unwrapped
|
||||
bpftools
|
||||
|
||||
|
||||
13
terraform/.gitignore
vendored
13
terraform/.gitignore
vendored
@@ -1,13 +0,0 @@
|
||||
# Ignore Terraform state and temporary files
|
||||
**/.terraform
|
||||
**/*.tfstate.backup
|
||||
**/terraform.tfstate.d
|
||||
**/terraform.tfvars
|
||||
out.plan
|
||||
*.tfstate
|
||||
|
||||
# Don't ever commit these files to git
|
||||
*.p12
|
||||
*id_rsa*
|
||||
*.key
|
||||
*.csr
|
||||
Submodule terraform/environments deleted from ec1cb13935
1
terraform/examples/.gitignore
vendored
1
terraform/examples/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
**/.terraform.*
|
||||
@@ -1,16 +0,0 @@
|
||||
# Terraform Examples
|
||||
|
||||
This directory contains examples of how to use Terraform to deploy Firezone
|
||||
Gateways to your infrastructure.
|
||||
|
||||
## Examples
|
||||
|
||||
Each example below is self-contained and includes a `README.md` with
|
||||
instructions on how to deploy the example.
|
||||
|
||||
### Google Cloud Platform (GCP)
|
||||
|
||||
- [NAT Gateway](./google-cloud/nat-gateway): This example shows how to deploy
|
||||
one or more Firezone Gateways in a single GCP VPC that is configured with a
|
||||
Cloud NAT for egress. Read this if you're looking to deploy Firezone Gateways
|
||||
behind a single, shared static IP address on GCP.
|
||||
@@ -1,4 +0,0 @@
|
||||
# Deploy Firezone on GCP with Terraform
|
||||
|
||||
See [our docs for a detailed guide](/kb/automate/terraform/gcp) on deploying
|
||||
Firezone on GCP with Terraform using this example.
|
||||
@@ -1,212 +0,0 @@
|
||||
module "google_firezone_gateway" {
|
||||
source = "github.com/firezone/firezone/terraform/modules/google-cloud/apps/gateway-region-instance-group"
|
||||
# If you are changing this example along with the module, you should use the local path:
|
||||
# source = "../../../modules/google-cloud/apps/gateway-region-instance-group"
|
||||
|
||||
project_id = var.project_id
|
||||
|
||||
compute_network = google_compute_network.firezone.id
|
||||
compute_subnetwork = google_compute_subnetwork.firezone.id
|
||||
|
||||
compute_instance_replicas = var.replicas
|
||||
compute_instance_type = var.machine_type
|
||||
compute_region = var.region
|
||||
|
||||
# Since we are behind a NAT gateway, we don't need public IP addresses
|
||||
# to be automatically provisioned for the instances
|
||||
compute_provision_public_ipv6_address = false
|
||||
compute_provision_public_ipv4_address = false
|
||||
|
||||
vsn = "latest"
|
||||
|
||||
observability_log_level = "info"
|
||||
|
||||
token = var.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Google Cloud Project
|
||||
################################################################################
|
||||
|
||||
variable "project_id" {
|
||||
type = string
|
||||
description = "Google Cloud Project ID"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Compute
|
||||
################################################################################
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
description = "Region to deploy the Gateway(s) in."
|
||||
}
|
||||
|
||||
variable "replicas" {
|
||||
type = number
|
||||
description = "Number of Gateway replicas to deploy in the availability zone."
|
||||
default = 3
|
||||
}
|
||||
|
||||
variable "machine_type" {
|
||||
type = string
|
||||
default = "n1-standard-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Observability
|
||||
################################################################################
|
||||
|
||||
variable "log_level" {
|
||||
type = string
|
||||
nullable = false
|
||||
default = "info"
|
||||
|
||||
description = "Sets RUST_LOG environment variable to configure the Gateway's log level. Default: 'info'."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Firezone
|
||||
################################################################################
|
||||
|
||||
variable "token" {
|
||||
type = string
|
||||
description = "Gateway token to use for authentication."
|
||||
}
|
||||
|
||||
variable "subnet_cidr" {
|
||||
type = string
|
||||
description = "CIDR Range to use for subnet where Gateway(s) are deployed"
|
||||
}
|
||||
|
||||
provider "google" {
|
||||
project = var.project_id
|
||||
region = var.region
|
||||
}
|
||||
|
||||
resource "google_project_service" "compute-api" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
}
|
||||
|
||||
resource "google_service_account" "firezone" {
|
||||
account_id = "firezone-gateway"
|
||||
display_name = "Firezone Gateway Service Account"
|
||||
}
|
||||
|
||||
# We create a new network and subnetwork. In real-world scenarios,
|
||||
# you would likely use an existing ones where your application is deployed.
|
||||
resource "google_compute_network" "firezone" {
|
||||
name = "firezone-gateway"
|
||||
auto_create_subnetworks = false
|
||||
enable_ula_internal_ipv6 = true
|
||||
depends_on = [google_project_service.compute-api]
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "firezone" {
|
||||
project = var.project_id
|
||||
|
||||
name = "firezone-gateways"
|
||||
|
||||
stack_type = "IPV4_IPV6"
|
||||
|
||||
ip_cidr_range = var.subnet_cidr
|
||||
region = var.region
|
||||
network = google_compute_network.firezone.id
|
||||
|
||||
ipv6_access_type = "INTERNAL"
|
||||
|
||||
private_ip_google_access = true
|
||||
}
|
||||
|
||||
# Allocate IPv4 addresses for the NAT gateway
|
||||
resource "google_compute_address" "ipv4" {
|
||||
project = var.project_id
|
||||
name = "firezone-gateway-nat-ipv4"
|
||||
ip_version = "IPV4"
|
||||
}
|
||||
|
||||
# Create a router and NAT to allow outbound traffic
|
||||
resource "google_compute_router" "firezone" {
|
||||
name = "firezone-gateway-router"
|
||||
network = google_compute_network.firezone.id
|
||||
}
|
||||
|
||||
resource "google_compute_router_nat" "firezone" {
|
||||
name = "firezone-gateway-nat"
|
||||
router = google_compute_router.firezone.name
|
||||
|
||||
nat_ip_allocate_option = "MANUAL_ONLY"
|
||||
nat_ips = [
|
||||
google_compute_address.ipv4.self_link,
|
||||
]
|
||||
|
||||
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
|
||||
subnetwork {
|
||||
name = google_compute_subnetwork.firezone.id
|
||||
source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
|
||||
}
|
||||
}
|
||||
|
||||
# Configure Firewall to allow outbound traffic
|
||||
resource "google_compute_firewall" "gateways-egress-ipv4" {
|
||||
project = var.project_id
|
||||
|
||||
name = "firezone-gateways-egress-ipv4"
|
||||
network = google_compute_network.firezone.id
|
||||
direction = "EGRESS"
|
||||
|
||||
target_tags = module.gateways.target_tags
|
||||
destination_ranges = ["0.0.0.0/0"]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "gateways-egress-ipv6" {
|
||||
project = var.project_id
|
||||
|
||||
name = "firezone-gateways-egress-ipv6"
|
||||
network = google_compute_network.firezone.id
|
||||
direction = "EGRESS"
|
||||
|
||||
target_tags = module.gateways.target_tags
|
||||
destination_ranges = ["::/0"]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
}
|
||||
|
||||
# Allow SSH access to the gateways. This is optional but helpful for debugging
|
||||
# and administration of the gateways. Since they're not publicly accessible,
|
||||
# you need to tunnel through IAP:
|
||||
#
|
||||
# gcloud compute instances list --project <PROJECT_ID>
|
||||
# gcloud compute ssh --tunnel-through-iap --project <PROJECT_ID> gateway-XXXX
|
||||
resource "google_compute_firewall" "ssh-rule" {
|
||||
name = "allow-gateways-ssh"
|
||||
network = google_compute_network.firezone.id
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["22"]
|
||||
}
|
||||
|
||||
target_tags = module.gateways.target_tags
|
||||
source_ranges = ["35.235.240.0/20"] // IAP CIDR
|
||||
}
|
||||
|
||||
output "static_ip_addresses" {
|
||||
value = [google_compute_address.ipv4.address]
|
||||
}
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "5.20"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
resource "aws_instance" "this" {
|
||||
ami = var.ami
|
||||
instance_type = var.instance_type
|
||||
monitoring = var.monitoring
|
||||
subnet_id = var.subnet_id
|
||||
vpc_security_group_ids = var.vpc_security_group_ids
|
||||
associate_public_ip_address = var.associate_public_ip_address
|
||||
user_data_replace_on_change = true
|
||||
|
||||
key_name = var.key_name
|
||||
user_data = file("${path.module}/scripts/setup.sh")
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
volume_size = 20
|
||||
}
|
||||
|
||||
tags = merge({ "Name" = var.name }, var.instance_tags, var.tags)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
output "id" {
|
||||
description = "The ID of the instance"
|
||||
value = try(
|
||||
aws_instance.this.id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "arn" {
|
||||
description = "The ARN of the instance"
|
||||
value = try(
|
||||
aws_instance.this.arn,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "instance_state" {
|
||||
description = "The state of the instance"
|
||||
value = try(
|
||||
aws_instance.this.instance_state,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "primary_network_interface_id" {
|
||||
description = "The ID of the instance's primary network interface"
|
||||
value = try(
|
||||
aws_instance.this.primary_network_interface_id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached"
|
||||
value = try(
|
||||
aws_instance.this.public_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
description = "The private IP address assigned to the instance"
|
||||
value = try(
|
||||
aws_instance.this.private_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "ipv6_addresses" {
|
||||
description = "The IPv6 address assigned to the instance, if applicable"
|
||||
value = try(
|
||||
aws_instance.this.ipv6_addresses,
|
||||
[],
|
||||
)
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
# Install fail2ban
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fail2ban
|
||||
|
||||
ORIG_CONF="/etc/fail2ban/jail.conf"
|
||||
LOCAL_CONF="/etc/fail2ban/jail.local"
|
||||
|
||||
if [ -f "${ORIG_CONF}" ]; then
|
||||
# Configure fail2ban
|
||||
sudo cp "${ORIG_CONF}" "${LOCAL_CONF}"
|
||||
sudo sed -i 's/^bantime\s*= 10m$/bantime = 30m/' "${LOCAL_CONF}"
|
||||
sudo sed -i 's/^findtime\s*= 10m/findtime = 30m/' "${LOCAL_CONF}"
|
||||
sudo sed -i 's/maxretry\s*= 5/maxretry = 3/' "${LOCAL_CONF}"
|
||||
|
||||
# Enable and Start fail2ban
|
||||
sudo systemctl enable --now fail2ban
|
||||
else
|
||||
# If fail2ban is not on the sysytem, something has gone wrong
|
||||
echo "Fail2Ban was not found on the system! Exiting..."
|
||||
fi
|
||||
|
||||
# Turn on automatic upgrades/reboots
|
||||
UPGRADE_CONF_FILE="/etc/apt/apt.conf.d/50unattended-upgrades"
|
||||
|
||||
sudo cp $UPGRADE_CONF_FILE /tmp/unattended-upgrades.conf
|
||||
sudo sed -i 's/\/\/\(\s*"\${distro_id}:\${distro_codename}-updates";\)/ \1/' "${UPGRADE_CONF_FILE}"
|
||||
sudo sed -i 's/\/\/\(Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";\)/\1/' "${UPGRADE_CONF_FILE}"
|
||||
sudo sed -i 's/\/\/\(Unattended-Upgrade::Automatic-Reboot \)"false";/\1 "true";/' "${UPGRADE_CONF_FILE}"
|
||||
sudo sed -i 's/\/\/\(Unattended-Upgrade::Automatic-Reboot-Time \)"02:00";/\1 "07:00";/' "${UPGRADE_CONF_FILE}"
|
||||
sudo sed -i 's/\/\/\(Unattended-Upgrade::Automatic-Reboot-WithUsers "true";\)/\1/' "${UPGRADE_CONF_FILE}"
|
||||
@@ -1,82 +0,0 @@
|
||||
variable "ami" {
|
||||
type = string
|
||||
description = "AMI ID for the EC2 instance"
|
||||
default = "ami-0b2a9065573b0a9c9" # Ubuntu 22.04 in us-east-1
|
||||
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "Please provide a valid value for variable AMI."
|
||||
}
|
||||
}
|
||||
|
||||
variable "associate_public_ip_address" {
|
||||
description = "Whether to associate a public IP address with an instance in a VPC"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The type of instance to start"
|
||||
type = string
|
||||
default = "t3.micro"
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Additional tags for the instance"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "ipv6_addresses" {
|
||||
description = "Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "key_name" {
|
||||
description = "Key name of the Key Pair to use for the instance; which can be managed using the `aws_key_pair` resource"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "monitoring" {
|
||||
description = "If true, the launched EC2 instance will have detailed monitoring enabled"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name to be used on EC2 instance created"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "private_ip" {
|
||||
description = "Private IP address to associate with the instance in a VPC"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "root_block_device" {
|
||||
description = "Customize details about the root block device of the instance. See Block Devices below for details"
|
||||
type = list(any)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "The VPC Subnet ID to launch in"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A mapping of tags to assign to the resource"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "vpc_security_group_ids" {
|
||||
description = "A list of security group IDs to associate with"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
resource "aws_instance" "this" {
|
||||
ami = var.ami
|
||||
instance_type = var.instance_type
|
||||
monitoring = var.monitoring
|
||||
subnet_id = var.subnet_id
|
||||
vpc_security_group_ids = var.vpc_security_group_ids
|
||||
associate_public_ip_address = var.associate_public_ip_address
|
||||
private_ip = var.private_ip
|
||||
key_name = var.key_name
|
||||
user_data_replace_on_change = true
|
||||
|
||||
user_data = templatefile("${path.module}/templates/cloud-init.yaml", {
|
||||
container_name = "coredns"
|
||||
container_image = "coredns/coredns"
|
||||
host_ip = var.private_ip
|
||||
dns_records = concat([{ name = "coredns", value = var.private_ip }], var.dns_records)
|
||||
})
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
volume_size = 15
|
||||
}
|
||||
|
||||
tags = merge({ "Name" = var.name }, var.instance_tags, var.tags)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
output "id" {
|
||||
description = "The ID of the instance"
|
||||
value = try(
|
||||
aws_instance.this.id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "arn" {
|
||||
description = "The ARN of the instance"
|
||||
value = try(
|
||||
aws_instance.this.arn,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "instance_state" {
|
||||
description = "The state of the instance"
|
||||
value = try(
|
||||
aws_instance.this.instance_state,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "primary_network_interface_id" {
|
||||
description = "The ID of the instance's primary network interface"
|
||||
value = try(
|
||||
aws_instance.this.primary_network_interface_id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached"
|
||||
value = try(
|
||||
aws_instance.this.public_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
description = "The private IP address assigned to the instance"
|
||||
value = try(
|
||||
aws_instance.this.private_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "ipv6_addresses" {
|
||||
description = "The IPv6 address assigned to the instance, if applicable"
|
||||
value = try(
|
||||
aws_instance.this.ipv6_addresses,
|
||||
[],
|
||||
)
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/coredns/Corefile
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
.:53 {
|
||||
forward . 1.1.1.1 9.9.9.9
|
||||
log
|
||||
errors
|
||||
}
|
||||
|
||||
firezone.internal:53 {
|
||||
file /etc/coredns/db.firezone.internal
|
||||
log
|
||||
errors
|
||||
}
|
||||
|
||||
- path: /etc/coredns/db.firezone.internal
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
$ORIGIN firezone.internal.
|
||||
$TTL 1h
|
||||
@ IN SOA ns1.firezone.internal. admin.firezone.internal. (
|
||||
2024010501 ; Serial
|
||||
1h ; Refresh (1 hour)
|
||||
10m ; Retry (10 minutes)
|
||||
7d ; Expire (7 days)
|
||||
1h ; Minimum TTL (1 hour)
|
||||
)
|
||||
|
||||
%{ for record in dns_records ~}
|
||||
${record.name} IN A ${record.value}
|
||||
%{ endfor ~}
|
||||
|
||||
- path: /etc/systemd/system/coredns.service
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Start a CoreDNS container
|
||||
|
||||
[Service]
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
ExecStartPre=/usr/bin/docker pull ${container_image}
|
||||
ExecStart=/bin/sh -c 'docker run --name=${container_name} -p ${host_ip}:53:53 -p ${host_ip}:53:53/udp -v /etc/coredns:/etc/coredns --restart=unless-stopped --pull=always ${container_image} -conf /etc/coredns/Corefile'
|
||||
ExecStop=/usr/bin/docker stop coredns
|
||||
ExecStopPost=/usr/bin/docker rm coredns
|
||||
|
||||
runcmd:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||
- echo '{"experimental":true,"ip6tables":true,"ipv6":true,"fixed-cidr-v6":"fd00::/80"}' | sudo tee -a /etc/docker/daemon.json
|
||||
- sudo usermod -aG docker ubuntu
|
||||
- sudo systemctl enable docker
|
||||
- sudo systemctl stop docker
|
||||
- sudo systemctl start docker
|
||||
- sudo systemctl daemon-reload
|
||||
- sudo sed -r -i 's/^\s*(.* IN A .*)$/\1/' /etc/coredns/db.firezone.internal
|
||||
- sudo systemctl start coredns.service
|
||||
@@ -1,128 +0,0 @@
|
||||
variable "ami" {
|
||||
description = "AMI ID for the EC2 instance"
|
||||
type = string
|
||||
default = "ami-0b2a9065573b0a9c9" # Ubuntu 22.04 in us-east-1
|
||||
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "Please provide a valid value for variable AMI."
|
||||
}
|
||||
}
|
||||
|
||||
variable "api_url" {
|
||||
description = "URL of the control plane endpoint."
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
|
||||
variable "application_name" {
|
||||
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "application_version" {
|
||||
description = "Version of the application. Defaults to value of `var.image_tag`."
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "associate_public_ip_address" {
|
||||
description = "Whether to associate a public IP address with an instance in a VPC"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "dns_records" {
|
||||
description = "List of DNS records to set for CoreDNS."
|
||||
type = list(object({
|
||||
name = string
|
||||
value = string
|
||||
}))
|
||||
default = []
|
||||
nullable = false
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The type of instance to start"
|
||||
type = string
|
||||
default = "t3.micro"
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Additional tags for the instance"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "ipv6_addresses" {
|
||||
description = "Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "key_name" {
|
||||
description = "Key name of the Key Pair to use for the instance; which can be managed using the `aws_key_pair` resource"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "monitoring" {
|
||||
description = "If true, the launched EC2 instance will have detailed monitoring enabled"
|
||||
type = bool
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name to be used on EC2 instance created"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "observability_log_level" {
|
||||
description = "Sets RUST_LOG environment variable which applications should use to configure Rust Logger. Default: 'info'."
|
||||
type = string
|
||||
nullable = false
|
||||
default = "info"
|
||||
|
||||
}
|
||||
|
||||
variable "private_ip" {
|
||||
description = "Private IP address to associate with the instance in a VPC"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "root_block_device" {
|
||||
description = "Customize details about the root block device of the instance. See Block Devices below for details"
|
||||
type = list(any)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "The VPC Subnet ID to launch in"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A mapping of tags to assign to the resource"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "token" {
|
||||
description = "Portal token to use for authentication."
|
||||
type = string
|
||||
default = null
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "vpc_security_group_ids" {
|
||||
description = "A list of security group IDs to associate with"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
locals {
|
||||
application_name = var.application_name != null ? var.application_name : var.image
|
||||
application_version = var.application_version != null ? var.application_version : var.image_tag
|
||||
|
||||
environment_variables = concat([
|
||||
{
|
||||
name = "RUST_LOG"
|
||||
value = var.observability_log_level
|
||||
},
|
||||
{
|
||||
name = "RUST_BACKTRACE"
|
||||
value = "full"
|
||||
},
|
||||
{
|
||||
name = "FIREZONE_TOKEN"
|
||||
value = var.token
|
||||
},
|
||||
{
|
||||
name = "FIREZONE_API_URL"
|
||||
value = var.api_url
|
||||
}
|
||||
], var.application_environment_variables)
|
||||
}
|
||||
|
||||
resource "aws_instance" "this" {
|
||||
ami = var.ami
|
||||
instance_type = var.instance_type
|
||||
monitoring = var.monitoring
|
||||
subnet_id = var.subnet_id
|
||||
vpc_security_group_ids = var.vpc_security_group_ids
|
||||
associate_public_ip_address = var.associate_public_ip_address
|
||||
private_ip = var.private_ip
|
||||
key_name = var.key_name
|
||||
user_data_replace_on_change = true
|
||||
|
||||
user_data = templatefile("${path.module}/templates/cloud-init.yaml", {
|
||||
container_name = local.application_name != null ? local.application_name : var.image
|
||||
container_image = "${var.container_registry}/${var.image_repo}/${var.image}:${var.image_tag}"
|
||||
container_environment = local.environment_variables
|
||||
})
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
volume_size = 20
|
||||
}
|
||||
|
||||
tags = merge({ "Name" = var.name }, var.instance_tags, var.tags)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
output "id" {
|
||||
description = "The ID of the instance"
|
||||
value = try(
|
||||
aws_instance.this.id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "arn" {
|
||||
description = "The ARN of the instance"
|
||||
value = try(
|
||||
aws_instance.this.arn,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "instance_state" {
|
||||
description = "The state of the instance"
|
||||
value = try(
|
||||
aws_instance.this.instance_state,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "primary_network_interface_id" {
|
||||
description = "The ID of the instance's primary network interface"
|
||||
value = try(
|
||||
aws_instance.this.primary_network_interface_id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached"
|
||||
value = try(
|
||||
aws_instance.this.public_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
description = "The private IP address assigned to the instance"
|
||||
value = try(
|
||||
aws_instance.this.private_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "ipv6_addresses" {
|
||||
description = "The IPv6 address assigned to the instance, if applicable"
|
||||
value = try(
|
||||
aws_instance.this.ipv6_addresses,
|
||||
[],
|
||||
)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/firezone-gateway/env
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
%{ for env in container_environment ~}
|
||||
${env.name}=${env.value}
|
||||
%{ endfor ~}
|
||||
|
||||
- path: /etc/systemd/system/gateway.service
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Start an Firezone Gateway container
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
ExecStartPre=-/usr/bin/docker stop ${container_name}
|
||||
ExecStartPre=-/usr/bin/docker rm ${container_name}
|
||||
ExecStartPre=/usr/bin/docker pull ${container_image}
|
||||
ExecStart=/bin/sh -c 'docker run --rm --name=${container_name} --cap-add=NET_ADMIN --volume /etc/firezone --sysctl net.ipv4.ip_forward=1 --sysctl net.ipv4.conf.all.src_valid_mark=1 --sysctl net.ipv6.conf.all.disable_ipv6=0 --sysctl net.ipv6.conf.all.forwarding=1 --sysctl net.ipv6.conf.default.forwarding=1 --device="/dev/net/tun:/dev/net/tun" --env FIREZONE_NAME=$(hostname) --env FIREZONE_ID=$(echo $RANDOM$(hostname) | md5sum | head -c 20; echo;) --env-file="/etc/firezone-gateway/env" ${container_image}'
|
||||
ExecStop=/usr/bin/docker stop gateway
|
||||
ExecStopPost=/usr/bin/docker rm gateway
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
runcmd:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||
- echo '{"experimental":true,"ip6tables":true,"ipv6":true,"fixed-cidr-v6":"fd00::/80"}' | sudo tee -a /etc/docker/daemon.json
|
||||
- sudo usermod -aG docker ubuntu
|
||||
- sudo systemctl enable docker
|
||||
- sudo systemctl stop docker
|
||||
- sudo systemctl start docker
|
||||
- sudo systemctl daemon-reload
|
||||
- sudo systemctl enable --now gateway.service
|
||||
@@ -1,151 +0,0 @@
|
||||
variable "ami" {
|
||||
description = "AMI ID for the EC2 instance"
|
||||
type = string
|
||||
default = "ami-0b2a9065573b0a9c9" # Ubuntu 22.04 in us-east-1
|
||||
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "Please provide a valid value for variable AMI."
|
||||
}
|
||||
}
|
||||
|
||||
variable "api_url" {
|
||||
description = "URL of the control plane endpoint."
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "application_environment_variables" {
|
||||
description = "List of environment variables to set for all application containers."
|
||||
type = list(object({
|
||||
name = string
|
||||
value = string
|
||||
}))
|
||||
default = []
|
||||
nullable = false
|
||||
}
|
||||
|
||||
variable "application_name" {
|
||||
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "application_version" {
|
||||
description = "Version of the application. Defaults to value of `var.image_tag`."
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "associate_public_ip_address" {
|
||||
description = "Whether to associate a public IP address with an instance in a VPC"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "container_registry" {
|
||||
description = "Container registry URL to pull the image from."
|
||||
type = string
|
||||
nullable = false
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
description = "Container image used to deploy the application."
|
||||
type = string
|
||||
nullable = false
|
||||
}
|
||||
|
||||
variable "image_repo" {
|
||||
description = "Repo of a container image used to deploy the application."
|
||||
type = string
|
||||
nullable = false
|
||||
}
|
||||
|
||||
variable "image_tag" {
|
||||
description = "Container image used to deploy the application."
|
||||
type = string
|
||||
nullable = false
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The type of instance to start"
|
||||
type = string
|
||||
default = "t3.micro"
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Additional tags for the instance"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "ipv6_addresses" {
|
||||
description = "Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "key_name" {
|
||||
description = "Key name of the Key Pair to use for the instance; which can be managed using the `aws_key_pair` resource"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "monitoring" {
|
||||
description = "If true, the launched EC2 instance will have detailed monitoring enabled"
|
||||
type = bool
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name to be used on EC2 instance created"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "observability_log_level" {
|
||||
description = "Sets RUST_LOG environment variable which applications should use to configure Rust Logger. Default: 'info'."
|
||||
type = string
|
||||
nullable = false
|
||||
default = "info"
|
||||
|
||||
}
|
||||
|
||||
variable "private_ip" {
|
||||
description = "Private IP address to associate with the instance in a VPC"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "root_block_device" {
|
||||
description = "Customize details about the root block device of the instance. See Block Devices below for details"
|
||||
type = list(any)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "The VPC Subnet ID to launch in"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A mapping of tags to assign to the resource"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "token" {
|
||||
description = "Portal token to use for authentication."
|
||||
type = string
|
||||
default = null
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "vpc_security_group_ids" {
|
||||
description = "A list of security group IDs to associate with"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
resource "aws_instance" "this" {
|
||||
ami = var.ami
|
||||
instance_type = var.instance_type
|
||||
monitoring = var.monitoring
|
||||
subnet_id = var.subnet_id
|
||||
vpc_security_group_ids = var.vpc_security_group_ids
|
||||
associate_public_ip_address = var.associate_public_ip_address
|
||||
private_ip = var.private_ip
|
||||
user_data_replace_on_change = true
|
||||
|
||||
key_name = var.key_name
|
||||
user_data = file("${path.module}/scripts/setup.sh")
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
volume_size = 20
|
||||
}
|
||||
|
||||
tags = merge({ "Name" = var.name }, var.instance_tags, var.tags)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
output "id" {
|
||||
description = "The ID of the instance"
|
||||
value = try(
|
||||
aws_instance.this.id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "arn" {
|
||||
description = "The ARN of the instance"
|
||||
value = try(
|
||||
aws_instance.this.arn,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "instance_state" {
|
||||
description = "The state of the instance"
|
||||
value = try(
|
||||
aws_instance.this.instance_state,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "primary_network_interface_id" {
|
||||
description = "The ID of the instance's primary network interface"
|
||||
value = try(
|
||||
aws_instance.this.primary_network_interface_id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached"
|
||||
value = try(
|
||||
aws_instance.this.public_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
description = "The private IP address assigned to the instance"
|
||||
value = try(
|
||||
aws_instance.this.private_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "ipv6_addresses" {
|
||||
description = "The IPv6 address assigned to the instance, if applicable"
|
||||
value = try(
|
||||
aws_instance.this.ipv6_addresses,
|
||||
[],
|
||||
)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y docker-ce
|
||||
sudo usermod -aG docker ubuntu
|
||||
|
||||
docker run \
|
||||
--restart=unless-stopped \
|
||||
--name=httpbin \
|
||||
-p "80:80" \
|
||||
kong/httpbin
|
||||
@@ -1,82 +0,0 @@
|
||||
variable "ami" {
|
||||
type = string
|
||||
description = "AMI ID for the EC2 instance"
|
||||
default = "ami-0b2a9065573b0a9c9" # Ubuntu 22.04 in us-east-1
|
||||
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "Please provide a valid value for variable AMI."
|
||||
}
|
||||
}
|
||||
|
||||
variable "associate_public_ip_address" {
|
||||
description = "Whether to associate a public IP address with an instance in a VPC"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The type of instance to start"
|
||||
type = string
|
||||
default = "t3.micro"
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Additional tags for the instance"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "ipv6_addresses" {
|
||||
description = "Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "key_name" {
|
||||
description = "Key name of the Key Pair to use for the instance; which can be managed using the `aws_key_pair` resource"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "monitoring" {
|
||||
description = "If true, the launched EC2 instance will have detailed monitoring enabled"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name to be used on EC2 instance created"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "private_ip" {
|
||||
description = "Private IP address to associate with the instance in a VPC"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "root_block_device" {
|
||||
description = "Customize details about the root block device of the instance. See Block Devices below for details"
|
||||
type = list(any)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "The VPC Subnet ID to launch in"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A mapping of tags to assign to the resource"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "vpc_security_group_ids" {
|
||||
description = "A list of security group IDs to associate with"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
resource "aws_instance" "this" {
|
||||
ami = var.ami
|
||||
instance_type = var.instance_type
|
||||
monitoring = var.monitoring
|
||||
subnet_id = var.subnet_id
|
||||
vpc_security_group_ids = var.vpc_security_group_ids
|
||||
associate_public_ip_address = var.associate_public_ip_address
|
||||
private_ip = var.private_ip
|
||||
user_data_replace_on_change = true
|
||||
|
||||
key_name = var.key_name
|
||||
user_data = file("${path.module}/scripts/setup.sh")
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
volume_size = 20
|
||||
}
|
||||
|
||||
tags = merge({ "Name" = var.name }, var.instance_tags, var.tags)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
output "id" {
|
||||
description = "The ID of the instance"
|
||||
value = try(
|
||||
aws_instance.this.id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "arn" {
|
||||
description = "The ARN of the instance"
|
||||
value = try(
|
||||
aws_instance.this.arn,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "instance_state" {
|
||||
description = "The state of the instance"
|
||||
value = try(
|
||||
aws_instance.this.instance_state,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "primary_network_interface_id" {
|
||||
description = "The ID of the instance's primary network interface"
|
||||
value = try(
|
||||
aws_instance.this.primary_network_interface_id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached"
|
||||
value = try(
|
||||
aws_instance.this.public_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
description = "The private IP address assigned to the instance"
|
||||
value = try(
|
||||
aws_instance.this.private_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "ipv6_addresses" {
|
||||
description = "The IPv6 address assigned to the instance, if applicable"
|
||||
value = try(
|
||||
aws_instance.this.ipv6_addresses,
|
||||
[],
|
||||
)
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y iperf3
|
||||
|
||||
sudo tee -a /etc/systemd/system/iperf3.service << EOF
|
||||
[Unit]
|
||||
Description=iperf3 server
|
||||
After=syslog.target network.target auditd.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/iperf3 -s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
sudo systemctl enable --now iperf3
|
||||
@@ -1,82 +0,0 @@
|
||||
variable "ami" {
|
||||
type = string
|
||||
description = "AMI ID for the EC2 instance"
|
||||
default = "ami-0b2a9065573b0a9c9" # Ubuntu 22.04 in us-east-1
|
||||
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "Please provide a valid value for variable AMI."
|
||||
}
|
||||
}
|
||||
|
||||
variable "associate_public_ip_address" {
|
||||
description = "Whether to associate a public IP address with an instance in a VPC"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The type of instance to start"
|
||||
type = string
|
||||
default = "t3.micro"
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Additional tags for the instance"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "ipv6_addresses" {
|
||||
description = "Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "key_name" {
|
||||
description = "Key name of the Key Pair to use for the instance; which can be managed using the `aws_key_pair` resource"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "monitoring" {
|
||||
description = "If true, the launched EC2 instance will have detailed monitoring enabled"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name to be used on EC2 instance created"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "private_ip" {
|
||||
description = "Private IP address to associate with the instance in a VPC"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "root_block_device" {
|
||||
description = "Customize details about the root block device of the instance. See Block Devices below for details"
|
||||
type = list(any)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "The VPC Subnet ID to launch in"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A mapping of tags to assign to the resource"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "vpc_security_group_ids" {
|
||||
description = "A list of security group IDs to associate with"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
resource "aws_instance" "this" {
|
||||
ami = var.ami
|
||||
instance_type = var.instance_type
|
||||
monitoring = var.monitoring
|
||||
subnet_id = var.subnet_id
|
||||
vpc_security_group_ids = var.vpc_security_group_ids
|
||||
associate_public_ip_address = var.associate_public_ip_address
|
||||
source_dest_check = false
|
||||
user_data_replace_on_change = true
|
||||
|
||||
key_name = var.key_name
|
||||
user_data = file("${path.module}/scripts/setup.sh")
|
||||
|
||||
root_block_device {
|
||||
volume_type = "gp3"
|
||||
volume_size = 15
|
||||
}
|
||||
|
||||
tags = merge({ "Name" = var.name }, var.instance_tags, var.tags)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
output "id" {
|
||||
description = "The ID of the instance"
|
||||
value = try(
|
||||
aws_instance.this.id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "arn" {
|
||||
description = "The ARN of the instance"
|
||||
value = try(
|
||||
aws_instance.this.arn,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "instance_state" {
|
||||
description = "The state of the instance"
|
||||
value = try(
|
||||
aws_instance.this.instance_state,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "primary_network_interface_id" {
|
||||
description = "The ID of the instance's primary network interface"
|
||||
value = try(
|
||||
aws_instance.this.primary_network_interface_id,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
description = "The public IP address assigned to the instance, if applicable. NOTE: If you are using an aws_eip with your instance, you should refer to the EIP's address directly and not use `public_ip` as this field will change after the EIP is attached"
|
||||
value = try(
|
||||
aws_instance.this.public_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
description = "The private IP address assigned to the instance"
|
||||
value = try(
|
||||
aws_instance.this.private_ip,
|
||||
null,
|
||||
)
|
||||
}
|
||||
|
||||
output "ipv6_addresses" {
|
||||
description = "The IPv6 address assigned to the instance, if applicable"
|
||||
value = try(
|
||||
aws_instance.this.ipv6_addresses,
|
||||
[],
|
||||
)
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
# Enable IP forwarding
|
||||
echo "net.ipv4.ip_forward = 1" | sudo tee -a /etc/sysctl.conf
|
||||
sudo sysctl -p
|
||||
|
||||
# Setup iptables NAT
|
||||
sudo iptables -t nat -A POSTROUTING -o ens5 -s 0.0.0.0/0 -j MASQUERADE
|
||||
|
||||
# Save iptables rules in case of reboot
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y iptables-persistent
|
||||
sudo systemctl enable --now netfilter-persistent.service
|
||||
sudo mkdir -p /etc/iptables
|
||||
sudo /usr/bin/iptables-save | sudo tee -a /etc/iptables/rules.v4
|
||||
@@ -1,82 +0,0 @@
|
||||
variable "ami" {
|
||||
type = string
|
||||
description = "AMI ID for the EC2 instance"
|
||||
default = "ami-0b2a9065573b0a9c9" # Ubuntu 22.04 in us-east-1
|
||||
|
||||
validation {
|
||||
condition = length(var.ami) > 4 && substr(var.ami, 0, 4) == "ami-"
|
||||
error_message = "Please provide a valid value for variable AMI."
|
||||
}
|
||||
}
|
||||
|
||||
variable "associate_public_ip_address" {
|
||||
description = "Whether to associate a public IP address with an instance in a VPC"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
description = "The type of instance to start"
|
||||
type = string
|
||||
default = "t3.micro"
|
||||
}
|
||||
|
||||
variable "instance_tags" {
|
||||
description = "Additional tags for the instance"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "ipv6_addresses" {
|
||||
description = "Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "key_name" {
|
||||
description = "Key name of the Key Pair to use for the instance; which can be managed using the `aws_key_pair` resource"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "monitoring" {
|
||||
description = "If true, the launched EC2 instance will have detailed monitoring enabled"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name to be used on EC2 instance created"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "private_ip" {
|
||||
description = "Private IP address to associate with the instance in a VPC"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "root_block_device" {
|
||||
description = "Customize details about the root block device of the instance. See Block Devices below for details"
|
||||
type = list(any)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "The VPC Subnet ID to launch in"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A mapping of tags to assign to the resource"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "vpc_security_group_ids" {
|
||||
description = "A list of security group IDs to associate with"
|
||||
type = list(string)
|
||||
default = null
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
|
||||
# Create IAM role for the application instances
|
||||
resource "google_service_account" "application" {
|
||||
project = var.project_id
|
||||
|
||||
account_id = "app-${local.application_name}"
|
||||
display_name = "${local.application_name} app"
|
||||
description = "Service account for ${local.application_name} application instances."
|
||||
}
|
||||
|
||||
## Allow fluentbit to injest logs
|
||||
resource "google_project_iam_member" "logs" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/logging.logWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting application errors
|
||||
resource "google_project_iam_member" "errors" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/errorreporting.writer"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "metrics" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/monitoring.metricWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "service_management" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/servicemanagement.reporter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow appending traces
|
||||
resource "google_project_iam_member" "cloudtrace" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/cloudtrace.agent"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
locals {
|
||||
application_name = var.application_name
|
||||
application_version = var.application_version
|
||||
|
||||
application_labels = merge({
|
||||
managed_by = "terraform"
|
||||
application = local.application_name
|
||||
}, var.application_labels)
|
||||
|
||||
application_tags = ["app-${local.application_name}"]
|
||||
|
||||
google_health_check_ip_ranges = [
|
||||
"130.211.0.0/22",
|
||||
"35.191.0.0/16"
|
||||
]
|
||||
|
||||
environment_variables = concat([
|
||||
{
|
||||
name = "GOOGLE_CLOUD_PROJECT_ID"
|
||||
value = var.project_id
|
||||
}
|
||||
], var.application_environment_variables)
|
||||
}
|
||||
|
||||
# Find latest ubuntu 22.04 image
|
||||
data "google_compute_image" "ubuntu" {
|
||||
family = "ubuntu-2204-lts"
|
||||
project = "ubuntu-os-cloud"
|
||||
}
|
||||
|
||||
# Deploy app
|
||||
resource "google_compute_address" "client_monitor" {
|
||||
project = var.project_id
|
||||
|
||||
region = var.compute_region
|
||||
name = "firezone-monitor"
|
||||
subnetwork = var.compute_subnetwork
|
||||
|
||||
address_type = "INTERNAL"
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "client_monitor" {
|
||||
project = var.project_id
|
||||
|
||||
name = local.application_name
|
||||
description = "This template is used to create ${local.application_name} instances."
|
||||
|
||||
zone = var.compute_instance_availability_zone
|
||||
|
||||
machine_type = var.compute_instance_type
|
||||
|
||||
can_ip_forward = true
|
||||
|
||||
tags = local.application_tags
|
||||
|
||||
labels = merge({
|
||||
ubuntu-vm = data.google_compute_image.ubuntu.name
|
||||
version = local.application_version
|
||||
}, local.application_labels)
|
||||
|
||||
boot_disk {
|
||||
auto_delete = true
|
||||
|
||||
initialize_params {
|
||||
image = data.google_compute_image.ubuntu.self_link
|
||||
|
||||
labels = {
|
||||
managed_by = "terraform"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = var.compute_subnetwork
|
||||
stack_type = "IPV4_ONLY"
|
||||
network_ip = google_compute_address.client_monitor.address
|
||||
|
||||
access_config {
|
||||
network_tier = "PREMIUM"
|
||||
# Ephemeral IP address
|
||||
}
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = google_service_account.application.email
|
||||
|
||||
scopes = [
|
||||
# Those are default scopes
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
"https://www.googleapis.com/auth/service.management.readonly",
|
||||
"https://www.googleapis.com/auth/servicecontrol",
|
||||
"https://www.googleapis.com/auth/trace.append",
|
||||
]
|
||||
}
|
||||
|
||||
shielded_instance_config {
|
||||
enable_integrity_monitoring = true
|
||||
enable_secure_boot = false
|
||||
enable_vtpm = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
user-data = templatefile("${path.module}/templates/cloud-init.yaml", {
|
||||
client_container_image = "${var.container_registry}/${var.image_repo}/${var.image}:${var.image_tag}"
|
||||
firezone_token = var.firezone_token
|
||||
firezone_api_url = var.firezone_api_url
|
||||
firezone_client_id = var.firezone_client_id
|
||||
firezone_client_log_level = var.firezone_client_log_level
|
||||
})
|
||||
|
||||
google-logging-enabled = "true"
|
||||
google-logging-use-fluentbit = "true"
|
||||
|
||||
# Report health-related metrics to Cloud Monitoring
|
||||
google-monitoring-enabled = "true"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
google_project_iam_member.logs,
|
||||
google_project_iam_member.errors,
|
||||
google_project_iam_member.metrics,
|
||||
google_project_iam_member.service_management,
|
||||
google_project_iam_member.cloudtrace,
|
||||
]
|
||||
|
||||
allow_stopping_for_update = true
|
||||
}
|
||||
|
||||
## Open metrics port for the health checks
|
||||
resource "google_compute_firewall" "http-health-checks" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-healthcheck"
|
||||
network = var.compute_network
|
||||
|
||||
source_ranges = local.google_health_check_ip_ranges
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
allow {
|
||||
protocol = var.health_check.protocol
|
||||
ports = [var.health_check.port]
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
output "service_account" {
|
||||
value = google_service_account.application
|
||||
}
|
||||
|
||||
output "target_tags" {
|
||||
value = local.application_tags
|
||||
}
|
||||
|
||||
output "instance" {
|
||||
value = google_compute_instance.client_monitor
|
||||
}
|
||||
|
||||
output "internal_ip" {
|
||||
value = google_compute_address.client_monitor.address
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
resource "google_project_service" "compute" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "pubsub" {
|
||||
project = var.project_id
|
||||
service = "pubsub.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "bigquery" {
|
||||
project = var.project_id
|
||||
service = "bigquery.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "container" {
|
||||
project = var.project_id
|
||||
service = "container.googleapis.com"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
]
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "stackdriver" {
|
||||
project = var.project_id
|
||||
service = "stackdriver.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "logging" {
|
||||
project = var.project_id
|
||||
service = "logging.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "monitoring" {
|
||||
project = var.project_id
|
||||
service = "monitoring.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudprofiler" {
|
||||
project = var.project_id
|
||||
service = "cloudprofiler.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudtrace" {
|
||||
project = var.project_id
|
||||
service = "cloudtrace.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "servicenetworking" {
|
||||
project = var.project_id
|
||||
service = "servicenetworking.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,240 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/dev.firezone.client/token
|
||||
content: ${firezone_token}
|
||||
permissions: "0600"
|
||||
owner: root
|
||||
|
||||
- path: /etc/systemd/system/firezone.service
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Firezone Client
|
||||
|
||||
[Service]
|
||||
AmbientCapabilities=CAP_NET_ADMIN
|
||||
CapabilityBoundingSet=CAP_CHOWN CAP_NET_ADMIN
|
||||
DeviceAllow=/dev/net/tun
|
||||
LockPersonality=true
|
||||
MemoryDenyWriteExecute=true
|
||||
NoNewPrivileges=true
|
||||
PrivateMounts=true
|
||||
PrivateTmp=true
|
||||
PrivateUsers=false
|
||||
ProcSubset=pid
|
||||
ProtectClock=true
|
||||
ProtectControlGroups=true
|
||||
ProtectHome=true
|
||||
ProtectHostname=true
|
||||
ProtectKernelLogs=true
|
||||
ProtectKernelModules=true
|
||||
ProtectKernelTunables=true
|
||||
ProtectProc=invisible
|
||||
ProtectSystem=full
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_NETLINK AF_UNIX
|
||||
RestrictNamespaces=true
|
||||
RestrictRealtime=true
|
||||
RestrictSUIDSGID=true
|
||||
StateDirectory=dev.firezone.client
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@aio @basic-io @file-system @io-event @ipc @network-io @signal @system-service
|
||||
UMask=077
|
||||
|
||||
Environment="FIREZONE_API_URL=${firezone_api_url}"
|
||||
Environment="FIREZONE_ID=${firezone_client_id}"
|
||||
Environment="RUST_LOG=${firezone_client_log_level}"
|
||||
Environment="LOG_DIR=/var/log/firezone"
|
||||
|
||||
ExecStart=/usr/local/bin/firezone-headless-client standalone
|
||||
Type=notify
|
||||
User=root
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
|
||||
- path: /etc/google-cloud-ops-agent/config.yaml
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
logging:
|
||||
receivers:
|
||||
firezone_monitor:
|
||||
type: files
|
||||
include_paths:
|
||||
- /var/log/firezone_monitor/*.log
|
||||
service:
|
||||
pipelines:
|
||||
firezone_monitor_pipeline:
|
||||
receivers: [firezone_monitor]
|
||||
|
||||
- path: /etc/cron.d/firezone_monitor
|
||||
owner: root
|
||||
content: |
|
||||
* * * * * root /usr/local/bin/firezone-monitor/tunnel.sh 2>&1 >> /var/log/firezone_monitor/tunnel.log
|
||||
*/2 * * * * root /usr/local/bin/firezone-monitor/ping.sh 10.0.32.100 2>&1 >> /var/log/firezone_monitor/ping_internal.log
|
||||
*/2 * * * * root /usr/local/bin/firezone-monitor/ping.sh 8.8.4.4 2>&1 >> /var/log/firezone_monitor/ping_google_dns_ipv4.log
|
||||
*/2 * * * * root /usr/local/bin/firezone-monitor/ping6.sh 2001:4860:4860::8844 2>&1 >> /var/log/firezone_monitor/ping_google_dns_ipv6.log
|
||||
*/10 * * * * root /usr/local/bin/firezone-monitor/iperf.sh 10.0.32.101 2>&1 >> /var/log/firezone_monitor/iperf.log
|
||||
|
||||
- path: /usr/local/bin/firezone-monitor/common.sh
|
||||
permissions: "0555"
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
log() {
|
||||
local timestamp=$(date "+%Y/%m/%d-%H:%M:%S")
|
||||
echo "$timestamp >> $1"
|
||||
}
|
||||
|
||||
check_tunnel() {
|
||||
log "Checking tunnel state"
|
||||
|
||||
if $(ip address show tun-firezone > /dev/null 2>&1) ; then
|
||||
log "Firezone Tunnel is running"
|
||||
else
|
||||
log "Firezone Monitor Test ERROR: Firezone tunnel is not running"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
- path: /usr/local/bin/firezone-monitor/tunnel.sh
|
||||
permissions: "0555"
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
source $(dirname "$0")/common.sh
|
||||
|
||||
TEST_NAME="tunnel"
|
||||
|
||||
main() {
|
||||
log "Start Firezone Monitor Test: $TEST_NAME"
|
||||
check_tunnel
|
||||
}
|
||||
|
||||
finish() {
|
||||
log "Finish Firezone Monitor Test: $TEST_NAME"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
main
|
||||
|
||||
- path: /usr/local/bin/firezone-monitor/ping.sh
|
||||
permissions: "0555"
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
source $(dirname "$0")/common.sh
|
||||
|
||||
TEST_NAME="ping"
|
||||
PING_HOST=$1
|
||||
|
||||
run_test() {
|
||||
log "Test output:"
|
||||
ping -4 -c 10 -W 5 -I "tun-firezone" "$PING_HOST"
|
||||
}
|
||||
|
||||
main() {
|
||||
log "Start Firezone Monitor Test: $TEST_NAME"
|
||||
check_tunnel
|
||||
run_test
|
||||
}
|
||||
|
||||
finish() {
|
||||
log "Finish Firezone Monitor Test: $TEST_NAME"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
main
|
||||
|
||||
- path: /usr/local/bin/firezone-monitor/ping6.sh
|
||||
permissions: "0555"
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
source $(dirname "$0")/common.sh
|
||||
|
||||
TEST_NAME="ping6"
|
||||
PING_HOST=$1
|
||||
|
||||
run_test() {
|
||||
log "Test output:"
|
||||
ping -6 -c 10 -W 5 -I "tun-firezone" "$PING_HOST"
|
||||
}
|
||||
|
||||
main() {
|
||||
log "Start Firezone Monitor Test: $TEST_NAME"
|
||||
check_tunnel
|
||||
run_test
|
||||
}
|
||||
|
||||
finish() {
|
||||
log "Finish Firezone Monitor Test: $TEST_NAME"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
main
|
||||
|
||||
- path: /usr/local/bin/firezone-monitor/iperf.sh
|
||||
permissions: "0555"
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
source $(dirname "$0")/common.sh
|
||||
|
||||
TEST_NAME="iperf"
|
||||
IPERF_HOST=$1
|
||||
TIMEOUT=5000
|
||||
|
||||
run_test() {
|
||||
log "Test output:"
|
||||
iperf3 -c $IPERF_HOST -R --connect-timeout $TIMEOUT
|
||||
}
|
||||
|
||||
main() {
|
||||
log "Start Firezone Monitor Test: $TEST_NAME"
|
||||
check_tunnel
|
||||
run_test
|
||||
}
|
||||
|
||||
finish() {
|
||||
log "Finish Firezone Monitor Test: $TEST_NAME"
|
||||
}
|
||||
|
||||
trap finish EXIT
|
||||
main
|
||||
|
||||
runcmd:
|
||||
- sudo mkdir -m 0755 -p /var/log/firezone_monitor
|
||||
- sudo apt update -y
|
||||
- sudo apt install -y apt-transport-https ca-certificates curl software-properties-common iperf3
|
||||
- sudo install -m 0755 -d /etc/apt/keyrings
|
||||
- "sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc"
|
||||
- sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||
- 'echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null'
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
- sudo systemctl enable --now docker.service
|
||||
- sudo systemctl enable --now containerd.service
|
||||
- curl -sSO https://dl.google.com/cloudagents/add-google-cloud-ops-agent-repo.sh
|
||||
- sudo bash add-google-cloud-ops-agent-repo.sh
|
||||
- sudo apt-get update
|
||||
- 'sudo apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install google-cloud-ops-agent'
|
||||
- sudo docker pull ${client_container_image}
|
||||
- sudo docker create --name fz-client ${client_container_image}
|
||||
- "sudo docker cp fz-client:/bin/firezone-headless-client /usr/local/bin/firezone-headless-client"
|
||||
- sudo docker rm -v fz-client
|
||||
- sudo systemctl enable --now firezone.service
|
||||
@@ -1,164 +0,0 @@
|
||||
variable "project_id" {
|
||||
type = string
|
||||
description = "ID of a Google Cloud Project"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Compute
|
||||
################################################################################
|
||||
|
||||
variable "compute_network" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_subnetwork" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_instance_availability_zone" {
|
||||
type = string
|
||||
description = "List of zones in the region defined in `compute_region` where replicas should be deployed."
|
||||
}
|
||||
|
||||
variable "compute_instance_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Container Registry
|
||||
################################################################################
|
||||
|
||||
variable "container_registry" {
|
||||
type = string
|
||||
nullable = false
|
||||
description = "Container registry URL to pull the image from."
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# Container Image
|
||||
###############################################################################
|
||||
|
||||
variable "image_repo" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Repo of a container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image_tag" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Application
|
||||
################################################################################
|
||||
|
||||
variable "application_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
|
||||
}
|
||||
|
||||
variable "application_version" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Version of the application. Defaults to value of `var.image_tag`."
|
||||
}
|
||||
|
||||
variable "application_labels" {
|
||||
type = map(string)
|
||||
nullable = false
|
||||
default = {}
|
||||
|
||||
description = "Labels to add to all created by this module resources."
|
||||
}
|
||||
|
||||
variable "health_check" {
|
||||
type = object({
|
||||
name = string
|
||||
protocol = string
|
||||
port = number
|
||||
|
||||
initial_delay_sec = number
|
||||
check_interval_sec = optional(number)
|
||||
timeout_sec = optional(number)
|
||||
healthy_threshold = optional(number)
|
||||
unhealthy_threshold = optional(number)
|
||||
|
||||
http_health_check = optional(object({
|
||||
host = optional(string)
|
||||
request_path = optional(string)
|
||||
port = optional(string)
|
||||
response = optional(string)
|
||||
}))
|
||||
})
|
||||
|
||||
nullable = false
|
||||
|
||||
description = "Health check which will be used for auto healing policy."
|
||||
}
|
||||
|
||||
variable "application_environment_variables" {
|
||||
type = list(object({
|
||||
name = string
|
||||
value = string
|
||||
}))
|
||||
|
||||
nullable = false
|
||||
default = []
|
||||
|
||||
description = "List of environment variables to set for the application."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Firezone Client
|
||||
################################################################################
|
||||
|
||||
variable "firezone_api_url" {
|
||||
type = string
|
||||
nullable = false
|
||||
default = "wss://api.firez.one"
|
||||
|
||||
description = "URL the firezone client will connect to"
|
||||
}
|
||||
|
||||
variable "firezone_client_id" {
|
||||
type = string
|
||||
nullable = false
|
||||
default = ""
|
||||
|
||||
description = ""
|
||||
}
|
||||
|
||||
variable "firezone_token" {
|
||||
type = string
|
||||
default = ""
|
||||
|
||||
description = "Firezone token to allow client to connect to portal"
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "firezone_client_log_level" {
|
||||
type = string
|
||||
default = "debug"
|
||||
|
||||
description = "Firezone client Rust log level"
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
# Create DNS records for the application
|
||||
resource "google_dns_record_set" "application-ipv4" {
|
||||
count = var.application_dns_tld != null ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${var.application_dns_tld}."
|
||||
type = "A"
|
||||
ttl = 300
|
||||
|
||||
managed_zone = var.dns_managed_zone_name
|
||||
|
||||
rrdatas = google_compute_global_address.ipv4[*].address
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_dns_record_set" "application-ipv6" {
|
||||
count = var.application_dns_tld != null ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${var.application_dns_tld}."
|
||||
type = "AAAA"
|
||||
ttl = 300
|
||||
|
||||
managed_zone = var.dns_managed_zone_name
|
||||
|
||||
rrdatas = google_compute_global_address.ipv6[*].address
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
# Create IAM role for the application instances
|
||||
resource "google_service_account" "application" {
|
||||
project = var.project_id
|
||||
|
||||
account_id = "app-${local.application_name}"
|
||||
display_name = "${local.application_name} app"
|
||||
description = "Service account for ${local.application_name} application instances."
|
||||
}
|
||||
|
||||
## Allow application service account to pull images from the container registry
|
||||
resource "google_project_iam_member" "artifacts" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/artifactregistry.reader"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow fluentbit to injest logs
|
||||
resource "google_project_iam_member" "logs" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/logging.logWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting application errors
|
||||
resource "google_project_iam_member" "errors" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/errorreporting.writer"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "metrics" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/monitoring.metricWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "service_management" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/servicemanagement.reporter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow appending traces
|
||||
resource "google_project_iam_member" "cloudtrace" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/cloudtrace.agent"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
@@ -1,375 +0,0 @@
|
||||
locals {
|
||||
application_name = var.application_name != null ? var.application_name : var.image
|
||||
application_version = var.application_version != null ? var.application_version : var.image_tag
|
||||
|
||||
application_labels = merge({
|
||||
managed_by = "terraform"
|
||||
|
||||
# Note: this labels are used to fetch a release name for Erlang Cluster
|
||||
application = local.application_name
|
||||
}, var.application_labels)
|
||||
|
||||
application_environment_variables = concat([
|
||||
{
|
||||
name = "RELEASE_HOST_DISCOVERY_METHOD"
|
||||
value = "gce_metadata"
|
||||
},
|
||||
{
|
||||
name = "PHOENIX_EXTERNAL_TRUSTED_PROXIES"
|
||||
value = jsonencode(concat(
|
||||
[
|
||||
"35.191.0.0/16",
|
||||
"130.211.0.0/22"
|
||||
],
|
||||
google_compute_global_address.ipv4[*].address,
|
||||
google_compute_global_address.ipv6[*].address
|
||||
))
|
||||
},
|
||||
{
|
||||
name = "LOG_LEVEL"
|
||||
value = var.observability_log_level
|
||||
},
|
||||
{
|
||||
name = "OTLP_ENDPOINT",
|
||||
value = "http://localhost:4318"
|
||||
},
|
||||
{
|
||||
name = "OTEL_RESOURCE_ATTRIBUTES"
|
||||
value = "application.name=${local.application_name}"
|
||||
},
|
||||
{
|
||||
name = "TELEMETRY_METRICS_REPORTER"
|
||||
value = "Elixir.Domain.Telemetry.Reporter.GoogleCloudMetrics"
|
||||
},
|
||||
{
|
||||
name = "TELEMETRY_METRICS_REPORTER_OPTS"
|
||||
value = jsonencode({
|
||||
project_id = var.project_id
|
||||
})
|
||||
},
|
||||
{
|
||||
name = "GOOGLE_CLOUD_PROJECT",
|
||||
value = var.project_id
|
||||
},
|
||||
{
|
||||
name = "PLATFORM_ADAPTER"
|
||||
value = "Elixir.Domain.GoogleCloudPlatform"
|
||||
},
|
||||
{
|
||||
name = "PLATFORM_ADAPTER_CONFIG"
|
||||
value = jsonencode({
|
||||
project_id = var.project_id
|
||||
service_account_email = google_service_account.application.email
|
||||
})
|
||||
}
|
||||
], var.application_environment_variables)
|
||||
|
||||
application_ports_by_name = { for port in var.application_ports : port.name => port }
|
||||
}
|
||||
|
||||
# Fetch most recent COS image
|
||||
data "google_compute_image" "coreos" {
|
||||
family = "cos-117-lts"
|
||||
project = "cos-cloud"
|
||||
}
|
||||
|
||||
# Reserve instances for the application
|
||||
# If you don't reserve them deployment takes much longer and there is no guarantee that instances will be created at all,
|
||||
# Google Cloud Platform does not guarantee that instances will be available when you need them.
|
||||
resource "google_compute_reservation" "reservation" {
|
||||
# for_each = toset(var.compute_instance_availability_zones)
|
||||
|
||||
project = var.project_id
|
||||
|
||||
# name = "${local.application_name}-${each.key}-${var.compute_instance_type}"
|
||||
name = "${local.application_name}-${element(var.compute_instance_availability_zones, length(var.compute_instance_availability_zones) - 1)}-${var.compute_instance_type}"
|
||||
# zone = each.key
|
||||
zone = element(var.compute_instance_availability_zones, length(var.compute_instance_availability_zones) - 1)
|
||||
|
||||
specific_reservation_required = true
|
||||
|
||||
specific_reservation {
|
||||
count = var.scaling_horizontal_replicas * 2
|
||||
|
||||
instance_properties {
|
||||
machine_type = var.compute_instance_type
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Deploy app
|
||||
resource "google_compute_instance_template" "application" {
|
||||
project = var.project_id
|
||||
|
||||
name_prefix = "${local.application_name}-"
|
||||
|
||||
description = "This template is used to create ${local.application_name} instances."
|
||||
|
||||
machine_type = var.compute_instance_type
|
||||
region = var.compute_instance_region
|
||||
|
||||
can_ip_forward = false
|
||||
|
||||
tags = ["app-${local.application_name}"]
|
||||
|
||||
labels = merge({
|
||||
container-vm = data.google_compute_image.coreos.name
|
||||
|
||||
# This variable can be used by Erlang Cluster not to join nodes of older versions
|
||||
version = local.application_version
|
||||
}, local.application_labels)
|
||||
|
||||
|
||||
scheduling {
|
||||
automatic_restart = true
|
||||
on_host_maintenance = "MIGRATE"
|
||||
provisioning_model = "STANDARD"
|
||||
}
|
||||
|
||||
reservation_affinity {
|
||||
type = "SPECIFIC_RESERVATION"
|
||||
|
||||
specific_reservation {
|
||||
key = "compute.googleapis.com/reservation-name"
|
||||
# *Regional* instance group can consume only one reservation, which is zonal by default,
|
||||
# so we are always locked to one zone per region until Google Cloud Platform will fix that.
|
||||
# values = [for r in google_compute_reservation.reservation : r.name]
|
||||
values = [google_compute_reservation.reservation.name]
|
||||
}
|
||||
}
|
||||
|
||||
disk {
|
||||
source_image = data.google_compute_image.coreos.self_link
|
||||
auto_delete = true
|
||||
boot = true
|
||||
disk_type = var.compute_boot_disk_type
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = var.vpc_subnetwork
|
||||
nic_type = "GVNIC"
|
||||
queue_count = var.queue_count
|
||||
stack_type = "IPV4_IPV6"
|
||||
|
||||
ipv6_access_config {
|
||||
network_tier = "PREMIUM"
|
||||
}
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = google_service_account.application.email
|
||||
|
||||
scopes = concat([
|
||||
# Those are default scopes
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
"https://www.googleapis.com/auth/service.management.readonly",
|
||||
"https://www.googleapis.com/auth/servicecontrol",
|
||||
"https://www.googleapis.com/auth/trace.append",
|
||||
# Required to discover the other instances in the Erlang Cluster
|
||||
"https://www.googleapis.com/auth/compute.readonly"
|
||||
], var.application_token_scopes)
|
||||
}
|
||||
|
||||
shielded_instance_config {
|
||||
enable_integrity_monitoring = true
|
||||
enable_secure_boot = false
|
||||
enable_vtpm = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
gce-container-declaration = yamlencode({
|
||||
spec = {
|
||||
containers = [{
|
||||
name = local.application_name != null ? local.application_name : var.image
|
||||
image = "${var.container_registry}/${var.image_repo}/${var.image}:${var.image_tag}"
|
||||
env = local.application_environment_variables
|
||||
}]
|
||||
|
||||
volumes = []
|
||||
|
||||
restartPolicy = "Always"
|
||||
}
|
||||
})
|
||||
|
||||
user-data = templatefile("${path.module}/templates/cloud-init.yaml", {
|
||||
swap_size_gb = var.compute_swap_size_gb,
|
||||
otel_config_content = indent(6, var.otel_config)
|
||||
})
|
||||
|
||||
google-logging-enabled = "true"
|
||||
google-logging-use-fluentbit = "true"
|
||||
|
||||
# Report health-related metrics to Cloud Monitoring
|
||||
google-monitoring-enabled = "true"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
google_project_iam_member.artifacts,
|
||||
google_project_iam_member.logs,
|
||||
google_project_iam_member.errors,
|
||||
google_project_iam_member.metrics,
|
||||
google_project_iam_member.service_management,
|
||||
google_project_iam_member.cloudtrace,
|
||||
google_compute_reservation.reservation,
|
||||
]
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Create health checks for the application ports
|
||||
resource "google_compute_health_check" "port" {
|
||||
for_each = { for port in var.application_ports : port.name => port if try(port.health_check, null) != null }
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-${each.key}"
|
||||
|
||||
check_interval_sec = each.value.health_check.check_interval_sec != null ? each.value.health_check.check_interval_sec : 5
|
||||
timeout_sec = each.value.health_check.timeout_sec != null ? each.value.health_check.timeout_sec : 5
|
||||
healthy_threshold = each.value.health_check.healthy_threshold != null ? each.value.health_check.healthy_threshold : 2
|
||||
unhealthy_threshold = each.value.health_check.unhealthy_threshold != null ? each.value.health_check.unhealthy_threshold : 2
|
||||
|
||||
log_config {
|
||||
enable = false
|
||||
}
|
||||
|
||||
dynamic "tcp_health_check" {
|
||||
for_each = try(each.value.health_check.tcp_health_check, null)[*]
|
||||
|
||||
content {
|
||||
port = each.value.port
|
||||
|
||||
response = lookup(tcp_health_check.value, "response", null)
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "http_health_check" {
|
||||
for_each = try(each.value.health_check.http_health_check, null)[*]
|
||||
|
||||
content {
|
||||
port = each.value.port
|
||||
|
||||
host = lookup(http_health_check.value, "host", null)
|
||||
request_path = lookup(http_health_check.value, "request_path", null)
|
||||
response = lookup(http_health_check.value, "response", null)
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "https_health_check" {
|
||||
for_each = try(each.value.health_check.https_health_check, null)[*]
|
||||
|
||||
content {
|
||||
port = each.value.port
|
||||
|
||||
host = lookup(https_health_check.value, "host", null)
|
||||
request_path = lookup(https_health_check.value, "request_path", null)
|
||||
response = lookup(http_health_check.value, "response", null)
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
# Use template to deploy zonal instance group
|
||||
resource "google_compute_region_instance_group_manager" "application" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-group"
|
||||
|
||||
base_instance_name = local.application_name
|
||||
region = var.compute_instance_region
|
||||
distribution_policy_zones = var.compute_instance_availability_zones
|
||||
|
||||
target_size = var.scaling_horizontal_replicas
|
||||
|
||||
wait_for_instances = true
|
||||
wait_for_instances_status = "STABLE"
|
||||
|
||||
version {
|
||||
name = local.application_version
|
||||
instance_template = google_compute_instance_template.application.self_link
|
||||
}
|
||||
|
||||
dynamic "named_port" {
|
||||
for_each = var.application_ports
|
||||
|
||||
content {
|
||||
name = named_port.value.name
|
||||
port = named_port.value.port
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "auto_healing_policies" {
|
||||
for_each = try([google_compute_health_check.port["http"].self_link], [])
|
||||
|
||||
content {
|
||||
initial_delay_sec = local.application_ports_by_name["http"].health_check.initial_delay_sec
|
||||
|
||||
health_check = auto_healing_policies.value
|
||||
}
|
||||
}
|
||||
|
||||
update_policy {
|
||||
type = "PROACTIVE"
|
||||
minimal_action = "REPLACE"
|
||||
|
||||
# The number of instances that can be unavailable (from the target size) during the update. We set
|
||||
# this to 0 because we want all new instances to come online before we start taking down the old ones.
|
||||
max_unavailable_fixed = 0
|
||||
|
||||
# The number of additional instances that can be created during the update. Since we are reserving 2 * the
|
||||
# number of instances in the group, we set this to the target number of instances.
|
||||
max_surge_fixed = var.scaling_horizontal_replicas
|
||||
}
|
||||
|
||||
timeouts {
|
||||
create = "30m"
|
||||
update = "30m"
|
||||
delete = "20m"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_compute_instance_template.application
|
||||
]
|
||||
}
|
||||
|
||||
# Auto-scale instances with high CPU and Memory usage
|
||||
resource "google_compute_region_autoscaler" "application" {
|
||||
count = var.scaling_max_horizontal_replicas != null ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-autoscaler"
|
||||
|
||||
region = var.compute_instance_region
|
||||
target = google_compute_region_instance_group_manager.application.id
|
||||
|
||||
autoscaling_policy {
|
||||
max_replicas = var.scaling_max_horizontal_replicas
|
||||
min_replicas = var.scaling_horizontal_replicas
|
||||
|
||||
# wait 3 minutes before trying to measure the CPU utilization for new instances
|
||||
cooldown_period = 180
|
||||
|
||||
cpu_utilization {
|
||||
target = 0.8
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,582 +0,0 @@
|
||||
locals {
|
||||
google_load_balancer_ip_ranges = [
|
||||
"130.211.0.0/22",
|
||||
"35.191.0.0/16",
|
||||
]
|
||||
|
||||
google_health_check_ip_ranges = [
|
||||
"130.211.0.0/22",
|
||||
"35.191.0.0/16"
|
||||
]
|
||||
|
||||
public_application = var.application_dns_tld != null
|
||||
}
|
||||
|
||||
# Define a security policy which allows to filter traffic by IP address,
|
||||
# an edge security policy can also detect and block common types of web attacks
|
||||
resource "google_compute_security_policy" "default" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = local.application_name
|
||||
|
||||
type = "CLOUD_ARMOR"
|
||||
|
||||
advanced_options_config {
|
||||
json_parsing = "STANDARD"
|
||||
log_level = "NORMAL"
|
||||
}
|
||||
|
||||
adaptive_protection_config {
|
||||
layer_7_ddos_defense_config {
|
||||
enable = local.public_application
|
||||
rule_visibility = "STANDARD"
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "rate limit all requests that match the default rule"
|
||||
|
||||
# TODO: disable preview when we make sure that rate limited logs look good for some time
|
||||
preview = true
|
||||
|
||||
action = "throttle"
|
||||
priority = "1"
|
||||
|
||||
match {
|
||||
versioned_expr = "SRC_IPS_V1"
|
||||
|
||||
config {
|
||||
src_ip_ranges = ["*"]
|
||||
}
|
||||
}
|
||||
|
||||
rate_limit_options {
|
||||
conform_action = "allow"
|
||||
exceed_action = "deny(429)"
|
||||
|
||||
enforce_on_key = "IP"
|
||||
|
||||
rate_limit_threshold {
|
||||
count = 240
|
||||
interval_sec = 60
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "block sanctioned countries"
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "101"
|
||||
|
||||
match {
|
||||
expr {
|
||||
# Required by US law due to sanctions.
|
||||
expression = "origin.region_code.matches('^RU|BY|KP|IR|SY|CU|VE|XC|XD|SD|MM$')"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured sqli-v33-stable OWASP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1001"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('sqli-v33-stable', {'sensitivity': 1})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured xss-v33-stable OWASP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1002"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('xss-v33-stable', {'sensitivity': 1})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured methodenforcement-v33-stable OWASP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1003"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('methodenforcement-v33-stable', {'sensitivity': 1})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured scannerdetection-v33-stable OWASP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1004"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('scannerdetection-v33-stable', {'sensitivity': 1})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured protocolattack-v33-stable OWASP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1005"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('protocolattack-v33-stable', {'sensitivity': 1})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured sessionfixation-v33-stable OWASP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1006"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('sessionfixation-v33-stable', {'sensitivity': 1})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "log all requests that match preconfigured cve-canary GCP rule"
|
||||
preview = true
|
||||
|
||||
action = "deny(403)"
|
||||
priority = "1007"
|
||||
|
||||
match {
|
||||
expr {
|
||||
expression = "evaluatePreconfiguredWaf('cve-canary', {'sensitivity': 2})"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
description = "default allow rule"
|
||||
|
||||
action = "allow"
|
||||
priority = "2147483647"
|
||||
|
||||
match {
|
||||
versioned_expr = "SRC_IPS_V1"
|
||||
|
||||
config {
|
||||
src_ip_ranges = ["*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
# Expose the application ports via HTTP(S) load balancer with a managed SSL certificate and a static IP address
|
||||
resource "google_compute_backend_service" "default" {
|
||||
for_each = local.public_application ? local.application_ports_by_name : {}
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-backend-${each.value.name}"
|
||||
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
|
||||
port_name = each.value.name
|
||||
protocol = "HTTP"
|
||||
|
||||
timeout_sec = 86400
|
||||
connection_draining_timeout_sec = 120
|
||||
|
||||
enable_cdn = var.application_cdn_enabled
|
||||
|
||||
cdn_policy {
|
||||
cache_mode = "CACHE_ALL_STATIC"
|
||||
|
||||
cache_key_policy {
|
||||
include_host = true
|
||||
include_protocol = true
|
||||
include_query_string = true
|
||||
}
|
||||
|
||||
default_ttl = 3600
|
||||
client_ttl = 3600
|
||||
max_ttl = 86400
|
||||
}
|
||||
|
||||
compression_mode = "DISABLED"
|
||||
|
||||
custom_request_headers = [
|
||||
"X-Geo-Location-Region:{client_region}",
|
||||
"X-Geo-Location-City:{client_city}",
|
||||
"X-Geo-Location-Coordinates:{client_city_lat_long}",
|
||||
]
|
||||
|
||||
custom_response_headers = [
|
||||
"X-Cache-Hit: {cdn_cache_status}"
|
||||
]
|
||||
|
||||
session_affinity = "CLIENT_IP"
|
||||
|
||||
health_checks = try([google_compute_health_check.port[each.key].self_link], null)
|
||||
|
||||
security_policy = google_compute_security_policy.default[0].self_link
|
||||
|
||||
backend {
|
||||
balancing_mode = "UTILIZATION"
|
||||
capacity_scaler = 1
|
||||
group = google_compute_region_instance_group_manager.application.instance_group
|
||||
|
||||
# Do not send traffic to nodes that have CPU load higher than 80%
|
||||
# max_utilization = 0.8
|
||||
}
|
||||
|
||||
log_config {
|
||||
enable = false
|
||||
sample_rate = "1.0"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_compute_region_instance_group_manager.application,
|
||||
google_compute_health_check.port,
|
||||
]
|
||||
}
|
||||
|
||||
## Create a SSL policy
|
||||
resource "google_compute_ssl_policy" "application" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = local.application_name
|
||||
|
||||
min_tls_version = "TLS_1_2"
|
||||
profile = "RESTRICTED"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
## Create a managed SSL certificate
|
||||
resource "google_compute_managed_ssl_certificate" "default" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-mig-lb-cert"
|
||||
|
||||
type = "MANAGED"
|
||||
|
||||
managed {
|
||||
domains = [
|
||||
var.application_dns_tld,
|
||||
]
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
## Create URL map for the application
|
||||
resource "google_compute_url_map" "default" {
|
||||
count = local.public_application && contains(keys(local.application_ports_by_name), "http") ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = local.application_name
|
||||
default_service = google_compute_backend_service.default["http"].self_link
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
# Set up HTTP(s) proxies and redirect HTTP to HTTPS
|
||||
resource "google_compute_url_map" "https_redirect" {
|
||||
count = local.public_application && contains(keys(local.application_ports_by_name), "http") ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-https-redirect"
|
||||
|
||||
default_url_redirect {
|
||||
https_redirect = true
|
||||
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
|
||||
strip_query = false
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_target_http_proxy" "default" {
|
||||
count = length(google_compute_url_map.https_redirect) > 0 ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-http"
|
||||
|
||||
url_map = google_compute_url_map.https_redirect[0].self_link
|
||||
}
|
||||
|
||||
resource "google_compute_target_https_proxy" "default" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-https"
|
||||
|
||||
url_map = google_compute_url_map.default[0].self_link
|
||||
|
||||
ssl_certificates = google_compute_managed_ssl_certificate.default[*].self_link
|
||||
ssl_policy = google_compute_ssl_policy.application[0].self_link
|
||||
quic_override = "NONE"
|
||||
}
|
||||
|
||||
# Allocate global addresses for the load balancer and set up forwarding rules
|
||||
## IPv4
|
||||
resource "google_compute_global_address" "ipv4" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-ipv4"
|
||||
|
||||
ip_version = "IPV4"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_global_forwarding_rule" "http" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = local.application_name
|
||||
labels = local.application_labels
|
||||
|
||||
target = google_compute_target_http_proxy.default[0].self_link
|
||||
ip_address = google_compute_global_address.ipv4[0].address
|
||||
port_range = "80"
|
||||
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
}
|
||||
|
||||
resource "google_compute_global_forwarding_rule" "https" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-https"
|
||||
labels = local.application_labels
|
||||
|
||||
target = google_compute_target_https_proxy.default[0].self_link
|
||||
ip_address = google_compute_global_address.ipv4[0].address
|
||||
port_range = "443"
|
||||
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
}
|
||||
|
||||
## IPv6
|
||||
resource "google_compute_global_address" "ipv6" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-ipv6"
|
||||
|
||||
ip_version = "IPV6"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_global_forwarding_rule" "http_ipv6" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-ipv6-http"
|
||||
labels = local.application_labels
|
||||
|
||||
target = google_compute_target_http_proxy.default[0].self_link
|
||||
ip_address = google_compute_global_address.ipv6[0].address
|
||||
port_range = "80"
|
||||
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
}
|
||||
|
||||
resource "google_compute_global_forwarding_rule" "https_ipv6" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-ipv6-https"
|
||||
labels = local.application_labels
|
||||
|
||||
target = google_compute_target_https_proxy.default[0].self_link
|
||||
ip_address = google_compute_global_address.ipv6[0].address
|
||||
port_range = "443"
|
||||
|
||||
load_balancing_scheme = "EXTERNAL"
|
||||
}
|
||||
|
||||
## Open HTTP ports for the load balancer
|
||||
resource "google_compute_firewall" "http" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-firewall-lb-to-instances-ipv4"
|
||||
network = var.vpc_network
|
||||
|
||||
source_ranges = local.google_load_balancer_ip_ranges
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
dynamic "allow" {
|
||||
for_each = var.application_ports
|
||||
|
||||
content {
|
||||
protocol = allow.value.protocol
|
||||
ports = [allow.value.port]
|
||||
}
|
||||
}
|
||||
|
||||
# We also enable UDP to allow QUIC if it's enabled
|
||||
dynamic "allow" {
|
||||
for_each = var.application_ports
|
||||
|
||||
content {
|
||||
protocol = "udp"
|
||||
ports = [allow.value.port]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
## Open HTTP ports for the health checks
|
||||
resource "google_compute_firewall" "http-health-checks" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-healthcheck"
|
||||
network = var.vpc_network
|
||||
|
||||
source_ranges = local.google_health_check_ip_ranges
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
dynamic "allow" {
|
||||
for_each = var.application_ports
|
||||
|
||||
content {
|
||||
protocol = allow.value.protocol
|
||||
ports = [allow.value.port]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
# Allow outbound traffic
|
||||
resource "google_compute_firewall" "egress-ipv4" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-egress-ipv4"
|
||||
network = var.vpc_network
|
||||
direction = "EGRESS"
|
||||
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
destination_ranges = ["0.0.0.0/0"]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "egress-ipv6" {
|
||||
count = local.public_application ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-egress-ipv6"
|
||||
network = var.vpc_network
|
||||
direction = "EGRESS"
|
||||
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
destination_ranges = ["::/0"]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
output "service_account" {
|
||||
value = google_service_account.application
|
||||
}
|
||||
|
||||
output "target_tags" {
|
||||
value = ["app-${local.application_name}"]
|
||||
}
|
||||
|
||||
output "instance_group" {
|
||||
value = google_compute_region_instance_group_manager.application
|
||||
}
|
||||
|
||||
output "host" {
|
||||
value = var.application_dns_tld
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
|
||||
resource "google_project_service" "compute" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "pubsub" {
|
||||
project = var.project_id
|
||||
service = "pubsub.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "bigquery" {
|
||||
project = var.project_id
|
||||
service = "bigquery.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "container" {
|
||||
project = var.project_id
|
||||
service = "container.googleapis.com"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
]
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "stackdriver" {
|
||||
project = var.project_id
|
||||
service = "stackdriver.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "logging" {
|
||||
project = var.project_id
|
||||
service = "logging.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "monitoring" {
|
||||
project = var.project_id
|
||||
service = "monitoring.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudprofiler" {
|
||||
project = var.project_id
|
||||
service = "cloudprofiler.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudtrace" {
|
||||
project = var.project_id
|
||||
service = "cloudtrace.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "servicenetworking" {
|
||||
project = var.project_id
|
||||
service = "servicenetworking.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
write_files:
|
||||
- path: /etc/otelcol-contrib/config.yaml
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
${otel_config_content}
|
||||
|
||||
- path: /etc/systemd/system/otel-collector.service
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Start an OpenTelemetry collector docker container
|
||||
|
||||
[Service]
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
ExecStartPre=/usr/bin/docker pull otel/opentelemetry-collector-contrib:0.127.0
|
||||
ExecStart=/usr/bin/docker run --rm -u 2000 --name=otel-collector --network host --volume /etc/otelcol-contrib/:/etc/otelcol-contrib/ otel/opentelemetry-collector-contrib:0.127.0
|
||||
ExecStop=/usr/bin/docker stop otel-collector
|
||||
ExecStopPost=/usr/bin/docker rm otel-collector
|
||||
|
||||
- path: /etc/iptables/rules.v6
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
*filter
|
||||
:INPUT DROP [0:0]
|
||||
:FORWARD DROP [0:0]
|
||||
:OUTPUT DROP [0:0]
|
||||
:DOCKER - [0:0]
|
||||
:DOCKER-ISOLATION-STAGE-1 - [0:0]
|
||||
:DOCKER-ISOLATION-STAGE-2 - [0:0]
|
||||
:DOCKER-USER - [0:0]
|
||||
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
-A INPUT -i lo -j ACCEPT
|
||||
-A INPUT -p ipv6-icmp -j ACCEPT
|
||||
-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
|
||||
-A INPUT -p tcp -j ACCEPT
|
||||
-A INPUT -p udp -j ACCEPT
|
||||
-A FORWARD -j DOCKER-USER
|
||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -o docker0 -j DOCKER
|
||||
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
|
||||
-A FORWARD -i docker0 -o docker0 -j ACCEPT
|
||||
-A FORWARD -p tcp -j ACCEPT
|
||||
-A FORWARD -p udp -j ACCEPT
|
||||
-A FORWARD -p ipv6-icmp -j ACCEPT
|
||||
-A OUTPUT -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
|
||||
-A OUTPUT -o lo -j ACCEPT
|
||||
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
|
||||
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
|
||||
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
|
||||
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
|
||||
-A DOCKER-USER -j RETURN
|
||||
COMMIT
|
||||
|
||||
runcmd:
|
||||
- sudo ip6tables-restore < /etc/iptables/rules.v6
|
||||
- systemctl daemon-reload
|
||||
- systemctl start otel-collector.service
|
||||
|
||||
swap:
|
||||
filename: /swapfile
|
||||
size: ${swap_size_gb}G
|
||||
maxsize: ${swap_size_gb}G
|
||||
@@ -1,321 +0,0 @@
|
||||
variable "project_id" {
|
||||
type = string
|
||||
description = "ID of a Google Cloud Project"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Compute
|
||||
################################################################################
|
||||
|
||||
variable "compute_instance_type" {
|
||||
type = string
|
||||
description = "Type of the instance."
|
||||
default = "n1-standard-1"
|
||||
}
|
||||
|
||||
variable "compute_instance_region" {
|
||||
type = string
|
||||
description = "Region which would be used to create compute resources."
|
||||
}
|
||||
|
||||
variable "compute_instance_availability_zones" {
|
||||
type = list(string)
|
||||
description = "List of availability zone for the VMs. It must be in the same region as `var.compute_instance_region`."
|
||||
}
|
||||
|
||||
variable "compute_boot_disk_type" {
|
||||
type = string
|
||||
default = "pd-ssd"
|
||||
description = "Type of the boot disk."
|
||||
}
|
||||
|
||||
variable "compute_swap_size_gb" {
|
||||
type = number
|
||||
default = 0
|
||||
description = "Size of the swap disk in GB."
|
||||
}
|
||||
|
||||
variable "queue_count" {
|
||||
type = number
|
||||
default = 2
|
||||
description = "Number of max RX / TX queues to assign to the NIC."
|
||||
|
||||
validation {
|
||||
condition = var.queue_count >= 2
|
||||
error_message = "queue_count must be greater than or equal to 2."
|
||||
}
|
||||
|
||||
validation {
|
||||
condition = var.queue_count % 2 == 0
|
||||
error_message = "queue_count must be an even number."
|
||||
}
|
||||
|
||||
validation {
|
||||
condition = var.queue_count <= 16
|
||||
error_message = "queue_count must be less than or equal to 16."
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## VPC
|
||||
################################################################################
|
||||
|
||||
variable "vpc_network" {
|
||||
description = "ID of a VPC which will be used to deploy the application."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_subnetwork" {
|
||||
description = "ID of a VPC subnet which will be used to deploy the application."
|
||||
type = string
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Container Registry
|
||||
################################################################################
|
||||
|
||||
variable "container_registry" {
|
||||
type = string
|
||||
nullable = false
|
||||
description = "Container registry URL to pull the image from."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Container Image
|
||||
################################################################################
|
||||
|
||||
variable "image_repo" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Repo of a container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image_tag" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Scaling
|
||||
################################################################################
|
||||
|
||||
variable "scaling_horizontal_replicas" {
|
||||
type = number
|
||||
nullable = false
|
||||
default = 1
|
||||
|
||||
validation {
|
||||
condition = var.scaling_horizontal_replicas > 0
|
||||
error_message = "Number of replicas should be greater or equal to 0."
|
||||
}
|
||||
|
||||
description = "Number of replicas in an instance group."
|
||||
}
|
||||
|
||||
variable "scaling_max_horizontal_replicas" {
|
||||
type = number
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Maximum number of replacias an instance group can be auto-scaled to. `null` disables auto-scaling."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Observability
|
||||
################################################################################
|
||||
|
||||
variable "observability_log_level" {
|
||||
type = string
|
||||
nullable = false
|
||||
default = "info"
|
||||
|
||||
validation {
|
||||
condition = (
|
||||
contains(
|
||||
["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
|
||||
var.observability_log_level
|
||||
)
|
||||
)
|
||||
error_message = "Only Elixir Logger log levels are accepted."
|
||||
}
|
||||
|
||||
description = "Sets LOG_LEVEL environment variable which applications should use to configure Elixir Logger. Default: 'info'."
|
||||
}
|
||||
|
||||
variable "otel_config" {
|
||||
type = string
|
||||
description = "otel-collector YAML config content"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Erlang
|
||||
################################################################################
|
||||
|
||||
variable "erlang_release_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = <<EOT
|
||||
Name of an Erlang/Elixir release which should correspond to shell executable name which is used to run the container.
|
||||
|
||||
By default an `var.image_tag` with `-` replaced to `_` would be used.
|
||||
EOT
|
||||
}
|
||||
|
||||
variable "erlang_cluster_cookie" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Value of the Erlang cluster cookie."
|
||||
}
|
||||
|
||||
|
||||
variable "erlang_cluster_disterl_port" {
|
||||
type = number
|
||||
nullable = false
|
||||
default = 10000
|
||||
|
||||
description = <<EOT
|
||||
Sets the `LISTEN_DIST_MIN` and `LISTEN_DIST_MAX` environment variables that can be used by setting
|
||||
`ELIXIR_ERL_OPTIONS="-kernel inet_dist_listen_min $\{LISTEN_DIST_MIN} inet_dist_listen_max $\{LISTEN_DIST_MAX}"`
|
||||
option in `env.sh.eex` for Elixir release.
|
||||
|
||||
This helps when you want to forward the port from localhost to the cluster and connect to a remote Elixir node debugging
|
||||
it in production.
|
||||
|
||||
Default: 10000.
|
||||
EOT
|
||||
}
|
||||
|
||||
variable "erlang_cluster_node_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = <<EOT
|
||||
Name of the node in the Erlang cluster. Defaults to `replace(var.image_name, "_", "-")`.
|
||||
EOT
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## DNS
|
||||
################################################################################
|
||||
|
||||
variable "dns_managed_zone_name" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Name of the DNS managed zone."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Application
|
||||
################################################################################
|
||||
|
||||
variable "application_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
|
||||
}
|
||||
|
||||
variable "application_version" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Version of the application. Defaults to value of `var.image_tag`."
|
||||
}
|
||||
|
||||
variable "application_labels" {
|
||||
type = map(string)
|
||||
nullable = false
|
||||
default = {}
|
||||
|
||||
description = "Labels to add to all created by this module resources."
|
||||
}
|
||||
|
||||
variable "application_token_scopes" {
|
||||
type = list(string)
|
||||
nullable = false
|
||||
default = []
|
||||
|
||||
description = "Any extra oAuth2 token scopes granted to the token of default service account."
|
||||
}
|
||||
|
||||
variable "application_dns_tld" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "DNS host which will be used to create DNS records for the application and provision SSL-certificates."
|
||||
}
|
||||
|
||||
variable "application_cdn_enabled" {
|
||||
type = bool
|
||||
nullable = false
|
||||
default = false
|
||||
|
||||
description = "Enable CDN for all static assets the application."
|
||||
}
|
||||
|
||||
variable "application_ports" {
|
||||
type = list(object({
|
||||
name = string
|
||||
protocol = string
|
||||
port = number
|
||||
|
||||
health_check = object({
|
||||
initial_delay_sec = number
|
||||
check_interval_sec = optional(number)
|
||||
timeout_sec = optional(number)
|
||||
healthy_threshold = optional(number)
|
||||
unhealthy_threshold = optional(number)
|
||||
|
||||
tcp_health_check = optional(object({}))
|
||||
|
||||
http_health_check = optional(object({
|
||||
host = optional(string)
|
||||
request_path = optional(string)
|
||||
port = optional(string)
|
||||
response = optional(string)
|
||||
}))
|
||||
|
||||
https_health_check = optional(object({
|
||||
host = optional(string)
|
||||
request_path = optional(string)
|
||||
port = optional(string)
|
||||
response = optional(string)
|
||||
}))
|
||||
})
|
||||
}))
|
||||
|
||||
nullable = false
|
||||
default = []
|
||||
|
||||
description = "List of ports to expose for the application. One of ports MUST be named 'http' for auto healing policy to work."
|
||||
}
|
||||
|
||||
variable "application_environment_variables" {
|
||||
type = list(object({
|
||||
name = string
|
||||
value = string
|
||||
}))
|
||||
|
||||
nullable = false
|
||||
default = []
|
||||
|
||||
description = "List of environment variables to set for all application containers."
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
# Terraform Google Gateway
|
||||
|
||||
This module has been moved to a dedicated repisotry [here](https://github.com/firezone/terraform-google-gateway).
|
||||
@@ -1,54 +0,0 @@
|
||||
|
||||
# Create IAM role for the application instances
|
||||
resource "google_service_account" "application" {
|
||||
project = var.project_id
|
||||
|
||||
account_id = "app-${local.application_name}"
|
||||
display_name = "${local.application_name} app"
|
||||
description = "Service account for ${local.application_name} application instances."
|
||||
}
|
||||
|
||||
## Allow fluentbit to injest logs
|
||||
resource "google_project_iam_member" "logs" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/logging.logWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting application errors
|
||||
resource "google_project_iam_member" "errors" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/errorreporting.writer"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "metrics" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/monitoring.metricWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "service_management" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/servicemanagement.reporter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow appending traces
|
||||
resource "google_project_iam_member" "cloudtrace" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/cloudtrace.agent"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
@@ -1,199 +0,0 @@
|
||||
locals {
|
||||
application_name = var.application_name != null ? var.application_name : var.image
|
||||
application_version = var.application_version != null ? var.application_version : var.image_tag
|
||||
|
||||
application_labels = merge({
|
||||
managed_by = "terraform"
|
||||
application = local.application_name
|
||||
}, var.application_labels)
|
||||
|
||||
application_tags = ["app-${local.application_name}"]
|
||||
|
||||
google_health_check_ip_ranges = [
|
||||
"130.211.0.0/22",
|
||||
"35.191.0.0/16"
|
||||
]
|
||||
|
||||
environment_variables = concat([
|
||||
{
|
||||
name = "GOOGLE_CLOUD_PROJECT_ID"
|
||||
value = var.project_id
|
||||
}
|
||||
], var.application_environment_variables)
|
||||
}
|
||||
|
||||
# Fetch most recent COS image
|
||||
data "google_compute_image" "coreos" {
|
||||
family = "cos-113-lts"
|
||||
project = "cos-cloud"
|
||||
}
|
||||
|
||||
# Deploy app
|
||||
resource "google_compute_address" "metabase" {
|
||||
project = var.project_id
|
||||
|
||||
region = var.compute_region
|
||||
name = "metabase"
|
||||
subnetwork = var.compute_subnetwork
|
||||
|
||||
address_type = "INTERNAL"
|
||||
}
|
||||
|
||||
# Reserve instances for the metabase
|
||||
# If you don't reserve them deployment takes much longer and there is no guarantee that instances will be created at all,
|
||||
# Google Cloud Platform does not guarantee that instances will be available when you need them.
|
||||
resource "google_compute_reservation" "reservation" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-${var.compute_instance_availability_zone}-${var.compute_instance_type}"
|
||||
zone = var.compute_instance_availability_zone
|
||||
|
||||
specific_reservation_required = true
|
||||
|
||||
specific_reservation {
|
||||
count = 1
|
||||
|
||||
instance_properties {
|
||||
machine_type = var.compute_instance_type
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "metabase" {
|
||||
project = var.project_id
|
||||
|
||||
name = local.application_name
|
||||
description = "This template is used to create ${local.application_name} instances."
|
||||
|
||||
zone = var.compute_instance_availability_zone
|
||||
|
||||
machine_type = var.compute_instance_type
|
||||
|
||||
can_ip_forward = true
|
||||
|
||||
tags = local.application_tags
|
||||
|
||||
labels = merge({
|
||||
container-vm = data.google_compute_image.coreos.name
|
||||
version = local.application_version
|
||||
}, local.application_labels)
|
||||
|
||||
scheduling {
|
||||
automatic_restart = true
|
||||
on_host_maintenance = "MIGRATE"
|
||||
provisioning_model = "STANDARD"
|
||||
}
|
||||
|
||||
reservation_affinity {
|
||||
type = "SPECIFIC_RESERVATION"
|
||||
|
||||
specific_reservation {
|
||||
key = "compute.googleapis.com/reservation-name"
|
||||
values = ["${local.application_name}-${var.compute_instance_availability_zone}-${var.compute_instance_type}"]
|
||||
}
|
||||
}
|
||||
|
||||
boot_disk {
|
||||
auto_delete = true
|
||||
|
||||
initialize_params {
|
||||
image = data.google_compute_image.coreos.self_link
|
||||
|
||||
labels = {
|
||||
managed_by = "terraform"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = var.compute_subnetwork
|
||||
|
||||
stack_type = "IPV4_ONLY"
|
||||
|
||||
network_ip = google_compute_address.metabase.address
|
||||
|
||||
access_config {
|
||||
network_tier = "PREMIUM"
|
||||
# Ephemeral IP address
|
||||
}
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = google_service_account.application.email
|
||||
|
||||
scopes = [
|
||||
# Those are default scopes
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
"https://www.googleapis.com/auth/service.management.readonly",
|
||||
"https://www.googleapis.com/auth/servicecontrol",
|
||||
"https://www.googleapis.com/auth/trace.append",
|
||||
]
|
||||
}
|
||||
|
||||
shielded_instance_config {
|
||||
enable_integrity_monitoring = true
|
||||
enable_secure_boot = false
|
||||
enable_vtpm = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
gce-container-declaration = yamlencode({
|
||||
spec = {
|
||||
containers = [{
|
||||
name = local.application_name != null ? local.application_name : var.image
|
||||
image = "${var.image_repo}/${var.image}:${var.image_tag}"
|
||||
env = local.environment_variables
|
||||
}]
|
||||
|
||||
volumes = []
|
||||
|
||||
restartPolicy = "Always"
|
||||
}
|
||||
})
|
||||
|
||||
google-logging-enabled = "true"
|
||||
google-logging-use-fluentbit = "true"
|
||||
|
||||
# Report health-related metrics to Cloud Monitoring
|
||||
google-monitoring-enabled = "true"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
google_project_iam_member.logs,
|
||||
google_project_iam_member.errors,
|
||||
google_project_iam_member.metrics,
|
||||
google_project_iam_member.service_management,
|
||||
google_project_iam_member.cloudtrace,
|
||||
google_compute_reservation.reservation,
|
||||
]
|
||||
|
||||
allow_stopping_for_update = true
|
||||
}
|
||||
|
||||
## Open metrics port for the health checks
|
||||
resource "google_compute_firewall" "http-health-checks" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-healthcheck"
|
||||
network = var.compute_network
|
||||
|
||||
source_ranges = local.google_health_check_ip_ranges
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
allow {
|
||||
protocol = var.health_check.protocol
|
||||
ports = [var.health_check.port]
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
output "service_account" {
|
||||
value = google_service_account.application
|
||||
}
|
||||
|
||||
output "target_tags" {
|
||||
value = local.application_tags
|
||||
}
|
||||
|
||||
output "instance" {
|
||||
value = google_compute_instance.metabase
|
||||
}
|
||||
|
||||
output "internal_ip" {
|
||||
value = google_compute_address.metabase.address
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
resource "google_project_service" "compute" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "pubsub" {
|
||||
project = var.project_id
|
||||
service = "pubsub.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "bigquery" {
|
||||
project = var.project_id
|
||||
service = "bigquery.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "container" {
|
||||
project = var.project_id
|
||||
service = "container.googleapis.com"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
]
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "stackdriver" {
|
||||
project = var.project_id
|
||||
service = "stackdriver.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "logging" {
|
||||
project = var.project_id
|
||||
service = "logging.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "monitoring" {
|
||||
project = var.project_id
|
||||
service = "monitoring.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudprofiler" {
|
||||
project = var.project_id
|
||||
service = "cloudprofiler.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudtrace" {
|
||||
project = var.project_id
|
||||
service = "cloudtrace.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "servicenetworking" {
|
||||
project = var.project_id
|
||||
service = "servicenetworking.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
variable "project_id" {
|
||||
type = string
|
||||
description = "ID of a Google Cloud Project"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Compute
|
||||
################################################################################
|
||||
|
||||
variable "compute_network" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_subnetwork" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_instance_availability_zone" {
|
||||
type = string
|
||||
description = "List of zones in the region defined in `compute_region` where replicas should be deployed."
|
||||
}
|
||||
|
||||
variable "compute_instance_type" {
|
||||
type = string
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Container Image
|
||||
################################################################################
|
||||
|
||||
variable "image_repo" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Repo of a container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image_tag" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Application
|
||||
################################################################################
|
||||
|
||||
variable "application_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
|
||||
}
|
||||
|
||||
variable "application_version" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Version of the application. Defaults to value of `var.image_tag`."
|
||||
}
|
||||
|
||||
variable "application_labels" {
|
||||
type = map(string)
|
||||
nullable = false
|
||||
default = {}
|
||||
|
||||
description = "Labels to add to all created by this module resources."
|
||||
}
|
||||
|
||||
variable "health_check" {
|
||||
type = object({
|
||||
name = string
|
||||
protocol = string
|
||||
port = number
|
||||
|
||||
initial_delay_sec = number
|
||||
check_interval_sec = optional(number)
|
||||
timeout_sec = optional(number)
|
||||
healthy_threshold = optional(number)
|
||||
unhealthy_threshold = optional(number)
|
||||
|
||||
http_health_check = optional(object({
|
||||
host = optional(string)
|
||||
request_path = optional(string)
|
||||
port = optional(string)
|
||||
response = optional(string)
|
||||
}))
|
||||
})
|
||||
|
||||
nullable = false
|
||||
|
||||
description = "Health check which will be used for auto healing policy."
|
||||
}
|
||||
|
||||
variable "application_environment_variables" {
|
||||
type = list(object({
|
||||
name = string
|
||||
value = string
|
||||
}))
|
||||
|
||||
nullable = false
|
||||
default = []
|
||||
|
||||
description = "List of environment variables to set for all application containers."
|
||||
}
|
||||
@@ -1,484 +0,0 @@
|
||||
locals {
|
||||
application_name = var.application_name != null ? var.application_name : var.image
|
||||
application_version = var.application_version != null ? var.application_version : var.image_tag
|
||||
|
||||
application_labels = merge({
|
||||
managed_by = "terraform"
|
||||
application = local.application_name
|
||||
}, var.application_labels)
|
||||
|
||||
google_health_check_ip_ranges = [
|
||||
"130.211.0.0/22",
|
||||
"35.191.0.0/16"
|
||||
]
|
||||
|
||||
environment_variables = concat([
|
||||
{
|
||||
name = "LISTEN_ADDRESS_DISCOVERY_METHOD"
|
||||
value = "gce_metadata"
|
||||
},
|
||||
{
|
||||
name = "OTEL_METADATA_DISCOVERY_METHOD"
|
||||
value = "gce_metadata"
|
||||
},
|
||||
{
|
||||
name = "RUST_LOG"
|
||||
value = var.observability_log_level
|
||||
},
|
||||
{
|
||||
name = "RUST_BACKTRACE"
|
||||
value = "full"
|
||||
},
|
||||
{
|
||||
name = "LOG_FORMAT"
|
||||
value = "google-cloud"
|
||||
},
|
||||
{
|
||||
name = "GOOGLE_CLOUD_PROJECT_ID"
|
||||
value = var.project_id
|
||||
},
|
||||
{
|
||||
name = "OTLP_GRPC_ENDPOINT"
|
||||
value = "127.0.0.1:4317"
|
||||
},
|
||||
{
|
||||
name = "FIREZONE_TOKEN"
|
||||
value = var.token
|
||||
},
|
||||
{
|
||||
name = "FIREZONE_API_URL"
|
||||
value = var.api_url
|
||||
},
|
||||
{
|
||||
name = "EBPF_OFFLOADING"
|
||||
value = "eth0"
|
||||
}
|
||||
], var.application_environment_variables)
|
||||
}
|
||||
|
||||
# Fetch most recent COS image
|
||||
data "google_compute_image" "coreos" {
|
||||
family = "cos-117-lts"
|
||||
project = "cos-cloud"
|
||||
}
|
||||
|
||||
# Create IAM role for the application instances
|
||||
resource "google_service_account" "application" {
|
||||
project = var.project_id
|
||||
|
||||
account_id = "app-${local.application_name}-${var.naming_suffix}"
|
||||
display_name = "${local.application_name} app"
|
||||
description = "Service account for ${local.application_name} application instances."
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
## Allow application service account to pull images from the container registry
|
||||
resource "google_project_iam_member" "artifacts" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/artifactregistry.reader"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow fluentbit to injest logs
|
||||
resource "google_project_iam_member" "logs" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/logging.logWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting application errors
|
||||
resource "google_project_iam_member" "errors" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/errorreporting.writer"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "metrics" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/monitoring.metricWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "service_management" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/servicemanagement.reporter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow appending traces
|
||||
resource "google_project_iam_member" "cloudtrace" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/cloudtrace.agent"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
resource "google_compute_reservation" "relay_reservation" {
|
||||
for_each = var.instances
|
||||
|
||||
project = var.project_id
|
||||
|
||||
# IMPORTANT: Keep the instance_group_manager reservation name in sync with this one
|
||||
name = "relays-${element(each.value.zones, length(each.value.zones) - 1)}-${each.value.type}-${var.naming_suffix}"
|
||||
|
||||
zone = element(each.value.zones, length(each.value.zones) - 1)
|
||||
|
||||
specific_reservation_required = true
|
||||
|
||||
specific_reservation {
|
||||
count = each.value.replicas
|
||||
|
||||
instance_properties {
|
||||
machine_type = each.value.type
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Deploy app
|
||||
resource "google_compute_instance_template" "application" {
|
||||
for_each = var.instances
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name_prefix = "${local.application_name}-${each.key}-${var.naming_suffix}-"
|
||||
|
||||
description = "This template is used to create ${local.application_name} instances using Terraform."
|
||||
|
||||
machine_type = each.value.type
|
||||
|
||||
can_ip_forward = false
|
||||
|
||||
tags = ["app-${local.application_name}"]
|
||||
|
||||
labels = merge({
|
||||
container-vm = data.google_compute_image.coreos.name
|
||||
version = local.application_version
|
||||
}, local.application_labels)
|
||||
|
||||
scheduling {
|
||||
automatic_restart = true
|
||||
on_host_maintenance = "MIGRATE"
|
||||
provisioning_model = "STANDARD"
|
||||
}
|
||||
|
||||
reservation_affinity {
|
||||
type = "SPECIFIC_RESERVATION"
|
||||
|
||||
specific_reservation {
|
||||
key = "compute.googleapis.com/reservation-name"
|
||||
|
||||
# Keep this up to date with the relay reservation name
|
||||
values = ["relays-${element(each.value.zones, length(each.value.zones) - 1)}-${each.value.type}-${var.naming_suffix}"]
|
||||
}
|
||||
}
|
||||
|
||||
disk {
|
||||
source_image = data.google_compute_image.coreos.self_link
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = var.instances[each.key].subnet
|
||||
|
||||
nic_type = "GVNIC"
|
||||
queue_count = var.queue_count
|
||||
|
||||
stack_type = "IPV4_IPV6"
|
||||
|
||||
ipv6_access_config {
|
||||
network_tier = "PREMIUM"
|
||||
# Ephemeral IP address
|
||||
}
|
||||
|
||||
access_config {
|
||||
network_tier = "PREMIUM"
|
||||
# Ephemeral IP address
|
||||
}
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = google_service_account.application.email
|
||||
|
||||
scopes = [
|
||||
# Those are default scopes
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
"https://www.googleapis.com/auth/service.management.readonly",
|
||||
"https://www.googleapis.com/auth/servicecontrol",
|
||||
"https://www.googleapis.com/auth/trace.append",
|
||||
]
|
||||
}
|
||||
|
||||
shielded_instance_config {
|
||||
enable_integrity_monitoring = true
|
||||
enable_secure_boot = false
|
||||
enable_vtpm = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
# RX and TX queue count should be half of queue_count above.
|
||||
startup-script = "#!/bin/sh\nethtool -L eth0 rx ${var.queue_count / 2} tx ${var.queue_count / 2}"
|
||||
|
||||
gce-container-declaration = yamlencode({
|
||||
spec = {
|
||||
containers = [{
|
||||
name = local.application_name != null ? local.application_name : var.image
|
||||
image = "${var.container_registry}/${var.image_repo}/${var.image}:${var.image_tag}"
|
||||
env = local.environment_variables
|
||||
securityContext = {
|
||||
privileged = true # For loading eBPF programs
|
||||
}
|
||||
}]
|
||||
|
||||
volumes = []
|
||||
|
||||
restartPolicy = "Always"
|
||||
}
|
||||
})
|
||||
|
||||
user-data = templatefile("${path.module}/templates/cloud-init.yaml", {})
|
||||
|
||||
google-logging-enabled = "true"
|
||||
google-logging-use-fluentbit = "true"
|
||||
|
||||
# Report health-related metrics to Cloud Monitoring
|
||||
google-monitoring-enabled = "true"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
google_project_iam_member.artifacts,
|
||||
google_project_iam_member.logs,
|
||||
google_project_iam_member.errors,
|
||||
google_project_iam_member.metrics,
|
||||
google_project_iam_member.service_management,
|
||||
google_project_iam_member.cloudtrace,
|
||||
google_compute_reservation.relay_reservation,
|
||||
]
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Create health checks for the application ports
|
||||
resource "google_compute_health_check" "port" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-${var.health_check.name}-${var.naming_suffix}"
|
||||
|
||||
check_interval_sec = var.health_check.check_interval_sec != null ? var.health_check.check_interval_sec : 5
|
||||
timeout_sec = var.health_check.timeout_sec != null ? var.health_check.timeout_sec : 5
|
||||
healthy_threshold = var.health_check.healthy_threshold != null ? var.health_check.healthy_threshold : 2
|
||||
unhealthy_threshold = var.health_check.unhealthy_threshold != null ? var.health_check.unhealthy_threshold : 2
|
||||
|
||||
log_config {
|
||||
enable = false
|
||||
}
|
||||
|
||||
http_health_check {
|
||||
port = var.health_check.port
|
||||
|
||||
host = var.health_check.http_health_check.host
|
||||
request_path = var.health_check.http_health_check.request_path
|
||||
response = var.health_check.http_health_check.response
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Use template to deploy zonal instance group
|
||||
resource "google_compute_region_instance_group_manager" "application" {
|
||||
for_each = var.instances
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-group-${each.key}-${var.naming_suffix}"
|
||||
|
||||
base_instance_name = local.application_name
|
||||
|
||||
region = each.key
|
||||
distribution_policy_zones = each.value.zones
|
||||
|
||||
target_size = each.value.replicas
|
||||
|
||||
wait_for_instances = true
|
||||
wait_for_instances_status = "STABLE"
|
||||
|
||||
version {
|
||||
name = local.application_version
|
||||
instance_template = google_compute_instance_template.application[each.key].self_link
|
||||
}
|
||||
|
||||
named_port {
|
||||
name = "stun"
|
||||
port = 3478
|
||||
}
|
||||
|
||||
auto_healing_policies {
|
||||
initial_delay_sec = var.health_check.initial_delay_sec
|
||||
|
||||
health_check = google_compute_health_check.port.self_link
|
||||
}
|
||||
|
||||
update_policy {
|
||||
type = "PROACTIVE"
|
||||
minimal_action = "REPLACE"
|
||||
|
||||
# For all regions we need to take one replica down first because the reservation
|
||||
# won't allow to create surge instances
|
||||
# max_unavailable_fixed = each.value.replicas == 1 ? 0 : 1
|
||||
max_unavailable_fixed = 1
|
||||
|
||||
# Reservations won't allow surge instances
|
||||
# max_surge_fixed = max(length(each.value.zones), each.value.replicas - 1)
|
||||
max_surge_fixed = 0
|
||||
}
|
||||
|
||||
timeouts {
|
||||
create = "30m"
|
||||
update = "30m"
|
||||
delete = "20m"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_compute_instance_template.application
|
||||
]
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: Rate limit requests to the relays by source IP address
|
||||
|
||||
# Open ports for STUN and TURN
|
||||
resource "google_compute_firewall" "stun-turn-ipv4" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-firewall-lb-to-instances-ipv4-${var.naming_suffix}"
|
||||
network = var.network
|
||||
|
||||
source_ranges = ["0.0.0.0/0"]
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = ["3478", "49152-65535"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "stun-turn-ipv6" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-firewall-lb-to-instances-ipv6-${var.naming_suffix}"
|
||||
network = var.network
|
||||
|
||||
source_ranges = ["::/0"]
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = ["3478", "49152-65535"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
## Open metrics port for the health checks
|
||||
resource "google_compute_firewall" "http-health-checks" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-healthcheck-${var.naming_suffix}"
|
||||
network = var.network
|
||||
|
||||
source_ranges = local.google_health_check_ip_ranges
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
|
||||
allow {
|
||||
protocol = var.health_check.protocol
|
||||
ports = [var.health_check.port]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
# Allow outbound traffic
|
||||
resource "google_compute_firewall" "egress-ipv4" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-egress-ipv4-${var.naming_suffix}"
|
||||
network = var.network
|
||||
direction = "EGRESS"
|
||||
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
destination_ranges = ["0.0.0.0/0"]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "egress-ipv6" {
|
||||
project = var.project_id
|
||||
|
||||
name = "${local.application_name}-egress-ipv6-${var.naming_suffix}"
|
||||
network = var.network
|
||||
direction = "EGRESS"
|
||||
|
||||
target_tags = ["app-${local.application_name}"]
|
||||
destination_ranges = ["::/0"]
|
||||
|
||||
allow {
|
||||
protocol = "all"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
output "service_account" {
|
||||
value = google_service_account.application
|
||||
}
|
||||
|
||||
output "target_tags" {
|
||||
value = ["app-${local.application_name}"]
|
||||
}
|
||||
|
||||
output "instances" {
|
||||
value = var.instances
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
|
||||
resource "google_project_service" "compute" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "pubsub" {
|
||||
project = var.project_id
|
||||
service = "pubsub.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "bigquery" {
|
||||
project = var.project_id
|
||||
service = "bigquery.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "container" {
|
||||
project = var.project_id
|
||||
service = "container.googleapis.com"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
]
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "stackdriver" {
|
||||
project = var.project_id
|
||||
service = "stackdriver.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "logging" {
|
||||
project = var.project_id
|
||||
service = "logging.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "monitoring" {
|
||||
project = var.project_id
|
||||
service = "monitoring.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudprofiler" {
|
||||
project = var.project_id
|
||||
service = "cloudprofiler.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudtrace" {
|
||||
project = var.project_id
|
||||
service = "cloudtrace.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "servicenetworking" {
|
||||
project = var.project_id
|
||||
service = "servicenetworking.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
#cloud-config
|
||||
|
||||
users:
|
||||
- name: cloudservice
|
||||
uid: 2000
|
||||
|
||||
write_files:
|
||||
- path: /etc/otelcol-contrib/config.yaml
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: localhost:4317
|
||||
exporters:
|
||||
googlecloud:
|
||||
log:
|
||||
default_log_name: opentelemetry.io/collector-exported-log
|
||||
processors:
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_percentage: 65
|
||||
spike_limit_percentage: 20
|
||||
batch:
|
||||
resourcedetection:
|
||||
detectors: [gcp]
|
||||
timeout: 10s
|
||||
transform:
|
||||
# Several metrics labels are reserved on Google Cloud. We need to prefix them with `exported_` to prevent the exporter from failing.
|
||||
# See https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/googlecloudexporter/README.md#preventing-metric-label-collisions for example.
|
||||
metric_statements:
|
||||
- context: datapoint
|
||||
statements:
|
||||
- set(attributes["exported_location"], attributes["location"])
|
||||
- delete_key(attributes, "location")
|
||||
- set(attributes["exported_cluster"], attributes["cluster"])
|
||||
- delete_key(attributes, "cluster")
|
||||
- set(attributes["exported_namespace"], attributes["namespace"])
|
||||
- delete_key(attributes, "namespace")
|
||||
- set(attributes["exported_job"], attributes["job"])
|
||||
- delete_key(attributes, "job")
|
||||
- set(attributes["exported_instance"], attributes["instance"])
|
||||
- delete_key(attributes, "instance")
|
||||
- set(attributes["exported_project_id"], attributes["project_id"])
|
||||
- delete_key(attributes, "project_id")
|
||||
- set(attributes["exported_service_name"], attributes["service_name"])
|
||||
- delete_key(attributes, "service_name")
|
||||
- set(attributes["exported_service_namespace"], attributes["service_namespace"])
|
||||
- delete_key(attributes, "service_namespace")
|
||||
- set(attributes["exported_service_instance_id"], attributes["service_instance_id"])
|
||||
- delete_key(attributes, "service_instance_id")
|
||||
- set(attributes["exported_instrumentation_source"], attributes["instrumentation_source"])
|
||||
- delete_key(attributes, "instrumentation_source")
|
||||
- set(attributes["exported_instrumentation_version"], attributes["instrumentation_version"])
|
||||
- delete_key(attributes, "instrumentation_version")
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, batch]
|
||||
exporters: [googlecloud]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, batch, transform]
|
||||
exporters: [googlecloud]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, batch]
|
||||
exporters: [googlecloud]
|
||||
|
||||
- path: /etc/systemd/system/otel-collector.service
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Start an OpenTelemetry collector docker container
|
||||
|
||||
[Service]
|
||||
TimeoutStartSec=0
|
||||
Restart=always
|
||||
ExecStartPre=/usr/bin/docker pull otel/opentelemetry-collector-contrib:0.119.0
|
||||
ExecStart=/usr/bin/docker run --rm -u 2000 --name=otel-collector --network host --volume /etc/otelcol-contrib/:/etc/otelcol-contrib/ otel/opentelemetry-collector-contrib:0.119.0
|
||||
ExecStop=/usr/bin/docker stop otel-collector
|
||||
ExecStopPost=/usr/bin/docker rm otel-collector
|
||||
|
||||
- path: /etc/iptables/rules.v6
|
||||
permissions: "0644"
|
||||
owner: root
|
||||
content: |
|
||||
*filter
|
||||
:INPUT DROP [0:0]
|
||||
:FORWARD DROP [0:0]
|
||||
:OUTPUT DROP [0:0]
|
||||
:DOCKER - [0:0]
|
||||
:DOCKER-ISOLATION-STAGE-1 - [0:0]
|
||||
:DOCKER-ISOLATION-STAGE-2 - [0:0]
|
||||
:DOCKER-USER - [0:0]
|
||||
-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
-A INPUT -i lo -j ACCEPT
|
||||
-A INPUT -p ipv6-icmp -j ACCEPT
|
||||
-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
|
||||
-A INPUT -p tcp -j ACCEPT
|
||||
-A INPUT -p udp -j ACCEPT
|
||||
-A FORWARD -j DOCKER-USER
|
||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -o docker0 -j DOCKER
|
||||
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
|
||||
-A FORWARD -i docker0 -o docker0 -j ACCEPT
|
||||
-A FORWARD -p tcp -j ACCEPT
|
||||
-A FORWARD -p udp -j ACCEPT
|
||||
-A FORWARD -p ipv6-icmp -j ACCEPT
|
||||
-A OUTPUT -m state --state NEW,RELATED,ESTABLISHED -j ACCEPT
|
||||
-A OUTPUT -o lo -j ACCEPT
|
||||
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
|
||||
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
|
||||
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
|
||||
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
|
||||
-A DOCKER-USER -j RETURN
|
||||
COMMIT
|
||||
|
||||
runcmd:
|
||||
- sudo ip6tables-restore < /etc/iptables/rules.v6
|
||||
- systemctl daemon-reload
|
||||
- systemctl start otel-collector.service
|
||||
@@ -1,186 +0,0 @@
|
||||
variable "project_id" {
|
||||
type = string
|
||||
description = "ID of a Google Cloud Project"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Compute
|
||||
################################################################################
|
||||
|
||||
variable "instances" {
|
||||
type = map(object({
|
||||
subnet = string
|
||||
type = string
|
||||
replicas = number
|
||||
zones = list(string)
|
||||
}))
|
||||
|
||||
description = "List deployment locations for the application."
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
type = string
|
||||
description = "ID of a Google Cloud Network"
|
||||
}
|
||||
|
||||
# Ensure instances are recreated when this is changed.
|
||||
variable "naming_suffix" {
|
||||
type = string
|
||||
description = "Suffix to append to the name of resources."
|
||||
}
|
||||
|
||||
# Maximum NIC Rx/Tx queue count. The default is 1. Adjust this based on number of vCPUs.
|
||||
# NOTE: Minimum of 2 is required for XDP programs to load onto the NIC.
|
||||
# This is because the `gve` driver expects the number of active queues to be
|
||||
# less than or equal to half the maximum number of queues.
|
||||
# The active queue count will need to be set at boot in order to be half this, because
|
||||
# gve driver defaults to setting the active queue count to the maximum.
|
||||
# NOTE 2: The maximum number here should max the number of vCPUs.
|
||||
variable "queue_count" {
|
||||
type = number
|
||||
default = 2
|
||||
description = "Number of max RX / TX queues to assign to the NIC."
|
||||
|
||||
validation {
|
||||
condition = var.queue_count >= 2
|
||||
error_message = "queue_count must be greater than or equal to 2."
|
||||
}
|
||||
|
||||
validation {
|
||||
condition = var.queue_count % 2 == 0
|
||||
error_message = "queue_count must be an even number."
|
||||
}
|
||||
|
||||
validation {
|
||||
condition = var.queue_count <= 16
|
||||
error_message = "queue_count must be less than or equal to 16."
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Container Registry
|
||||
################################################################################
|
||||
|
||||
variable "container_registry" {
|
||||
type = string
|
||||
nullable = false
|
||||
description = "Container registry URL to pull the image from."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Container Image
|
||||
################################################################################
|
||||
|
||||
variable "image_repo" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Repo of a container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
variable "image_tag" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Container image used to deploy the application."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Observability
|
||||
################################################################################
|
||||
|
||||
variable "observability_log_level" {
|
||||
type = string
|
||||
nullable = false
|
||||
default = "info"
|
||||
|
||||
description = "Sets RUST_LOG environment variable which applications should use to configure Rust Logger. Default: 'info'."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Application
|
||||
################################################################################
|
||||
|
||||
variable "application_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Name of the application. Defaults to value of `var.image_name` with `_` replaced to `-`."
|
||||
}
|
||||
|
||||
variable "application_version" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Version of the application. Defaults to value of `var.image_tag`."
|
||||
}
|
||||
|
||||
variable "application_labels" {
|
||||
type = map(string)
|
||||
nullable = false
|
||||
default = {}
|
||||
|
||||
description = "Labels to add to all created by this module resources."
|
||||
}
|
||||
|
||||
variable "health_check" {
|
||||
type = object({
|
||||
name = string
|
||||
protocol = string
|
||||
port = number
|
||||
|
||||
initial_delay_sec = number
|
||||
check_interval_sec = optional(number)
|
||||
timeout_sec = optional(number)
|
||||
healthy_threshold = optional(number)
|
||||
unhealthy_threshold = optional(number)
|
||||
|
||||
http_health_check = optional(object({
|
||||
host = optional(string)
|
||||
request_path = optional(string)
|
||||
port = optional(string)
|
||||
response = optional(string)
|
||||
}))
|
||||
})
|
||||
|
||||
nullable = false
|
||||
|
||||
description = "Health check which will be used for auto healing policy."
|
||||
}
|
||||
|
||||
variable "application_environment_variables" {
|
||||
type = list(object({
|
||||
name = string
|
||||
value = string
|
||||
}))
|
||||
|
||||
nullable = false
|
||||
default = []
|
||||
|
||||
description = "List of environment variables to set for all application containers."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Firezone
|
||||
################################################################################
|
||||
|
||||
variable "token" {
|
||||
type = string
|
||||
description = "Portal token to use for authentication."
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "api_url" {
|
||||
type = string
|
||||
default = "wss://api.firezone.dev"
|
||||
description = "URL of the control plane endpoint."
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
|
||||
# Create IAM role for the application instances
|
||||
resource "google_service_account" "application" {
|
||||
project = var.project_id
|
||||
|
||||
account_id = "vm-${local.vm_name}"
|
||||
display_name = "${local.vm_name} app"
|
||||
description = "Service account for ${local.vm_name} VM."
|
||||
}
|
||||
|
||||
## Allow fluentbit/OPS Agent to injest logs
|
||||
resource "google_project_iam_member" "logs" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/logging.logWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting application errors
|
||||
resource "google_project_iam_member" "errors" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/errorreporting.writer"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "metrics" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/monitoring.metricWriter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow reporting metrics
|
||||
resource "google_project_iam_member" "service_management" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/servicemanagement.reporter"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
|
||||
## Allow appending traces
|
||||
resource "google_project_iam_member" "cloudtrace" {
|
||||
project = var.project_id
|
||||
|
||||
role = "roles/cloudtrace.agent"
|
||||
|
||||
member = "serviceAccount:${google_service_account.application.email}"
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
locals {
|
||||
vm_name = var.vm_name
|
||||
|
||||
vm_labels = merge({
|
||||
managed_by = "terraform"
|
||||
}, var.vm_labels)
|
||||
|
||||
vm_network_tags = [var.vm_network_tag]
|
||||
|
||||
google_health_check_ip_ranges = [
|
||||
"130.211.0.0/22",
|
||||
"35.191.0.0/16"
|
||||
]
|
||||
}
|
||||
|
||||
# Find the latest boot image
|
||||
data "google_compute_image" "boot" {
|
||||
family = var.boot_image_family
|
||||
project = var.boot_image_project
|
||||
}
|
||||
|
||||
# Provision an internal IPv4 address for the VM
|
||||
resource "google_compute_address" "ipv4" {
|
||||
project = var.project_id
|
||||
|
||||
region = var.compute_region
|
||||
name = local.vm_name
|
||||
subnetwork = var.compute_subnetwork
|
||||
|
||||
address_type = "INTERNAL"
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "vm" {
|
||||
project = var.project_id
|
||||
|
||||
name = local.vm_name
|
||||
description = "This template is used to create ${local.vm_name} instances."
|
||||
|
||||
zone = var.compute_instance_availability_zone
|
||||
|
||||
machine_type = var.compute_instance_type
|
||||
|
||||
can_ip_forward = true
|
||||
|
||||
tags = local.vm_network_tags
|
||||
|
||||
labels = merge({
|
||||
boot_image_family = var.boot_image_family
|
||||
boot_image_project = var.boot_image_project
|
||||
}, local.vm_labels)
|
||||
|
||||
boot_disk {
|
||||
auto_delete = true
|
||||
|
||||
initialize_params {
|
||||
image = data.google_compute_image.boot.self_link
|
||||
|
||||
labels = {
|
||||
managed_by = "terraform"
|
||||
boot_image_family = var.boot_image_family
|
||||
boot_image_project = var.boot_image_project
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = var.compute_subnetwork
|
||||
stack_type = "IPV4_ONLY"
|
||||
network_ip = google_compute_address.ipv4.address
|
||||
|
||||
access_config {
|
||||
network_tier = "PREMIUM"
|
||||
# Ephemeral IP address
|
||||
}
|
||||
}
|
||||
|
||||
service_account {
|
||||
email = google_service_account.application.email
|
||||
|
||||
scopes = [
|
||||
# Those are default scopes
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring.write",
|
||||
"https://www.googleapis.com/auth/service.management.readonly",
|
||||
"https://www.googleapis.com/auth/servicecontrol",
|
||||
"https://www.googleapis.com/auth/trace.append",
|
||||
]
|
||||
}
|
||||
|
||||
shielded_instance_config {
|
||||
enable_integrity_monitoring = true
|
||||
enable_secure_boot = false
|
||||
enable_vtpm = true
|
||||
}
|
||||
|
||||
metadata = {
|
||||
user-data = var.cloud_init
|
||||
|
||||
# Report logs to Cloud Logging and errors to Cloud Error Reporting
|
||||
google-logging-enabled = "true"
|
||||
google-logging-use-fluentbit = "true"
|
||||
|
||||
# Report VM metrics to Cloud Monitoring
|
||||
google-monitoring-enabled = "true"
|
||||
}
|
||||
|
||||
# Install the Ops Agent and some other tools that are helpful for debugging (curl, jq, etc.)
|
||||
metadata_startup_script = <<EOT
|
||||
set -xe \
|
||||
&& sudo apt update -y \
|
||||
&& sudo apt install -y apt-transport-https ca-certificates curl jq software-properties-common \
|
||||
&& sudo install -m 0755 -d /etc/apt/keyrings \
|
||||
&& sudo apt-get update \
|
||||
&& curl -sSO https://dl.google.com/cloudagents/add-google-cloud-ops-agent-repo.sh \
|
||||
&& sudo bash add-google-cloud-ops-agent-repo.sh --also-install
|
||||
EOT
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
google_project_service.container,
|
||||
google_project_service.stackdriver,
|
||||
google_project_service.logging,
|
||||
google_project_service.monitoring,
|
||||
google_project_service.cloudprofiler,
|
||||
google_project_service.cloudtrace,
|
||||
google_project_service.servicenetworking,
|
||||
google_project_iam_member.logs,
|
||||
google_project_iam_member.errors,
|
||||
google_project_iam_member.metrics,
|
||||
google_project_iam_member.service_management,
|
||||
google_project_iam_member.cloudtrace,
|
||||
]
|
||||
|
||||
allow_stopping_for_update = true
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
output "service_account" {
|
||||
value = google_service_account.application
|
||||
}
|
||||
|
||||
output "target_tags" {
|
||||
value = local.vm_network_tags
|
||||
}
|
||||
|
||||
output "instance" {
|
||||
value = google_compute_instance.vm
|
||||
}
|
||||
|
||||
output "internal_ipv4_address" {
|
||||
value = google_compute_address.ipv4.address
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
resource "google_project_service" "compute" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "pubsub" {
|
||||
project = var.project_id
|
||||
service = "pubsub.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "bigquery" {
|
||||
project = var.project_id
|
||||
service = "bigquery.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "container" {
|
||||
project = var.project_id
|
||||
service = "container.googleapis.com"
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute,
|
||||
google_project_service.pubsub,
|
||||
google_project_service.bigquery,
|
||||
]
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "stackdriver" {
|
||||
project = var.project_id
|
||||
service = "stackdriver.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "logging" {
|
||||
project = var.project_id
|
||||
service = "logging.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "monitoring" {
|
||||
project = var.project_id
|
||||
service = "monitoring.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudprofiler" {
|
||||
project = var.project_id
|
||||
service = "cloudprofiler.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "cloudtrace" {
|
||||
project = var.project_id
|
||||
service = "cloudtrace.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
|
||||
depends_on = [google_project_service.stackdriver]
|
||||
}
|
||||
|
||||
resource "google_project_service" "servicenetworking" {
|
||||
project = var.project_id
|
||||
service = "servicenetworking.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
variable "project_id" {
|
||||
type = string
|
||||
description = "ID of a Google Cloud Project"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Compute
|
||||
################################################################################
|
||||
|
||||
variable "compute_network" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_subnetwork" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "compute_instance_availability_zone" {
|
||||
type = string
|
||||
description = "List of zones in the region defined in `compute_region` where replicas should be deployed."
|
||||
}
|
||||
|
||||
variable "compute_instance_type" {
|
||||
type = string
|
||||
description = "Machine type to use for the instances."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Boot Image
|
||||
################################################################################
|
||||
|
||||
variable "boot_image_family" {
|
||||
type = string
|
||||
description = "Family of the boot image to use for the instances."
|
||||
default = "ubuntu-2204-lts"
|
||||
}
|
||||
|
||||
variable "boot_image_project" {
|
||||
type = string
|
||||
description = "Project of the boot image to use for the instances."
|
||||
default = "ubuntu-os-cloud"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Virtual Machine
|
||||
################################################################################
|
||||
|
||||
variable "vm_name" {
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
|
||||
description = "Name of the VM to create."
|
||||
}
|
||||
|
||||
variable "vm_labels" {
|
||||
type = map(string)
|
||||
nullable = false
|
||||
default = {}
|
||||
|
||||
description = "Labels to add to all created by this module resources."
|
||||
}
|
||||
|
||||
variable "vm_network_tag" {
|
||||
type = string
|
||||
nullable = false
|
||||
|
||||
description = "Network tags to add to VM created by this module."
|
||||
}
|
||||
|
||||
################################################################################
|
||||
## Cloud-init Configuration
|
||||
################################################################################
|
||||
|
||||
variable "cloud_init" {
|
||||
type = string
|
||||
description = "Cloud-init configuration to use for the VM."
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
resource "google_project_service" "artifactregistry" {
|
||||
project = var.project_id
|
||||
service = "artifactregistry.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_artifact_registry_repository" "firezone" {
|
||||
provider = google-beta
|
||||
project = var.project_id
|
||||
|
||||
location = var.region
|
||||
repository_id = "firezone"
|
||||
description = "Repository for storing Docker images in the ${var.project_name}."
|
||||
|
||||
format = "DOCKER"
|
||||
|
||||
# It's false by default but setting it explicitly produces unwanted state diff
|
||||
# docker_config {
|
||||
# immutable_tags = false
|
||||
# }
|
||||
|
||||
cleanup_policies {
|
||||
id = "keep-latest-release"
|
||||
action = "KEEP"
|
||||
|
||||
condition {
|
||||
tag_state = "TAGGED"
|
||||
tag_prefixes = ["latest"]
|
||||
}
|
||||
}
|
||||
|
||||
cleanup_policies {
|
||||
id = "keep-minimum-versions"
|
||||
action = "KEEP"
|
||||
|
||||
most_recent_versions {
|
||||
keep_count = 5
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "cleanup_policies" {
|
||||
for_each = var.store_untagged_artifacts_for != null ? [1] : []
|
||||
|
||||
content {
|
||||
id = "gc-untagged"
|
||||
action = "DELETE"
|
||||
|
||||
condition {
|
||||
tag_state = "UNTAGGED"
|
||||
older_than = var.store_untagged_artifacts_for
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "cleanup_policies" {
|
||||
for_each = var.store_tagged_artifacts_for != null ? [1] : []
|
||||
|
||||
content {
|
||||
id = "gc-expired-artifacts"
|
||||
action = "DELETE"
|
||||
|
||||
condition {
|
||||
tag_state = "TAGGED"
|
||||
older_than = var.store_tagged_artifacts_for
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.artifactregistry
|
||||
]
|
||||
}
|
||||
|
||||
data "google_iam_policy" "artifacts_policy" {
|
||||
binding {
|
||||
role = "roles/artifactregistry.reader"
|
||||
members = ["allUsers"]
|
||||
}
|
||||
|
||||
binding {
|
||||
role = "roles/artifactregistry.writer"
|
||||
members = var.writers
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_artifact_registry_repository_iam_policy" "policy" {
|
||||
project = google_artifact_registry_repository.firezone.project
|
||||
location = google_artifact_registry_repository.firezone.location
|
||||
repository = google_artifact_registry_repository.firezone.name
|
||||
|
||||
policy_data = data.google_iam_policy.artifacts_policy.policy_data
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
output "name" {
|
||||
value = google_artifact_registry_repository.firezone.name
|
||||
}
|
||||
|
||||
output "url" {
|
||||
value = "${var.region}-docker.pkg.dev"
|
||||
}
|
||||
|
||||
output "repo" {
|
||||
value = "${var.project_id}/${google_artifact_registry_repository.firezone.name}"
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
variable "project_id" {
|
||||
description = "The ID of the project in which the resource belongs."
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "The name of the project in which the resource belongs."
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
description = "The region in which the registry is hosted."
|
||||
}
|
||||
|
||||
variable "writers" {
|
||||
description = "The list of IAM members that have write access to the container registry."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "store_tagged_artifacts_for" {
|
||||
description = "Sets the maximum lifetime of artifacts, eg. `300s`. Keep empty to set to `null` to never delete them."
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "store_untagged_artifacts_for" {
|
||||
description = "Sets the maximum lifetime of artifacts, eg. `300s`. Keep empty to set to `null` to never delete them."
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
resource "google_project_service" "dns" {
|
||||
project = var.project_id
|
||||
service = "dns.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_dns_managed_zone" "main" {
|
||||
project = var.project_id
|
||||
|
||||
name = join("-", compact(split(".", var.tld)))
|
||||
dns_name = "${var.tld}."
|
||||
|
||||
labels = {
|
||||
managed = true
|
||||
managed_by = "terraform"
|
||||
}
|
||||
|
||||
dnssec_config {
|
||||
kind = "dns#managedZoneDnsSecConfig"
|
||||
non_existence = "nsec3"
|
||||
|
||||
state = var.dnssec_enabled ? "on" : "off"
|
||||
|
||||
default_key_specs {
|
||||
algorithm = "rsasha256"
|
||||
key_length = 2048
|
||||
key_type = "keySigning"
|
||||
kind = "dns#dnsKeySpec"
|
||||
}
|
||||
|
||||
default_key_specs {
|
||||
algorithm = "rsasha256"
|
||||
key_length = 1024
|
||||
key_type = "zoneSigning"
|
||||
kind = "dns#dnsKeySpec"
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
# prevent_destroy = true
|
||||
ignore_changes = []
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.dns
|
||||
]
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
output "name_servers" {
|
||||
value = join(" ", google_dns_managed_zone.main.name_servers)
|
||||
}
|
||||
|
||||
output "zone_name" {
|
||||
value = google_dns_managed_zone.main.name
|
||||
}
|
||||
|
||||
output "dns_name" {
|
||||
value = google_dns_managed_zone.main.dns_name
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
variable "project_id" {
|
||||
description = "The ID of the project in which the resource belongs."
|
||||
}
|
||||
|
||||
variable "tld" {
|
||||
description = "The top level domain to use for the cluster. Should end with a dot, eg: 'app.firez.one.'"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "dnssec_enabled" {
|
||||
description = "Whether or not to enable DNSSEC"
|
||||
type = bool
|
||||
}
|
||||
@@ -1,417 +0,0 @@
|
||||
resource "google_monitoring_notification_channel" "slack" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "Slack: ${var.slack_alerts_channel}"
|
||||
type = "slack"
|
||||
|
||||
labels = {
|
||||
"channel_name" = var.slack_alerts_channel
|
||||
}
|
||||
|
||||
sensitive_labels {
|
||||
auth_token = var.slack_alerts_auth_token
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_notification_channel" "pagerduty" {
|
||||
count = var.pagerduty_auth_token != null ? 1 : 0
|
||||
|
||||
project = var.project_id
|
||||
|
||||
display_name = "PagerDuty"
|
||||
type = "pagerduty"
|
||||
|
||||
sensitive_labels {
|
||||
service_key = var.pagerduty_auth_token
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
notification_channels = concat(
|
||||
[google_monitoring_notification_channel.slack.name],
|
||||
var.additional_notification_channels,
|
||||
google_monitoring_notification_channel.pagerduty[*].name
|
||||
)
|
||||
}
|
||||
|
||||
resource "google_monitoring_uptime_check_config" "api-https" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "api-https"
|
||||
timeout = "60s"
|
||||
|
||||
http_check {
|
||||
port = "443"
|
||||
use_ssl = true
|
||||
validate_ssl = true
|
||||
|
||||
request_method = "GET"
|
||||
path = "/healthz"
|
||||
|
||||
accepted_response_status_codes {
|
||||
status_class = "STATUS_CLASS_2XX"
|
||||
}
|
||||
}
|
||||
|
||||
monitored_resource {
|
||||
type = "uptime_url"
|
||||
|
||||
labels = {
|
||||
project_id = var.project_id
|
||||
host = var.api_host
|
||||
}
|
||||
}
|
||||
|
||||
content_matchers {
|
||||
content = "\"ok\""
|
||||
matcher = "MATCHES_JSON_PATH"
|
||||
|
||||
json_path_matcher {
|
||||
json_path = "$.status"
|
||||
json_matcher = "EXACT_MATCH"
|
||||
}
|
||||
}
|
||||
|
||||
checker_type = "STATIC_IP_CHECKERS"
|
||||
}
|
||||
|
||||
resource "google_monitoring_uptime_check_config" "web-https" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "web-https"
|
||||
timeout = "60s"
|
||||
|
||||
http_check {
|
||||
port = "443"
|
||||
use_ssl = true
|
||||
validate_ssl = true
|
||||
|
||||
request_method = "GET"
|
||||
|
||||
path = "/healthz"
|
||||
|
||||
accepted_response_status_codes {
|
||||
status_class = "STATUS_CLASS_2XX"
|
||||
}
|
||||
}
|
||||
|
||||
monitored_resource {
|
||||
type = "uptime_url"
|
||||
|
||||
labels = {
|
||||
project_id = var.project_id
|
||||
host = var.web_host
|
||||
}
|
||||
}
|
||||
|
||||
content_matchers {
|
||||
content = "\"ok\""
|
||||
matcher = "MATCHES_JSON_PATH"
|
||||
|
||||
json_path_matcher {
|
||||
json_path = "$.status"
|
||||
json_matcher = "EXACT_MATCH"
|
||||
}
|
||||
}
|
||||
|
||||
checker_type = "STATIC_IP_CHECKERS"
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "api-downtime" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "API service is DOWN!"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
conditions {
|
||||
display_name = "Uptime Health Check on api-https"
|
||||
|
||||
condition_threshold {
|
||||
filter = "resource.type = \"uptime_url\" AND metric.type = \"monitoring.googleapis.com/uptime_check/check_passed\" AND metric.labels.check_id = \"${reverse(split("/", google_monitoring_uptime_check_config.api-https.id))[0]}\""
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
threshold_value = 1
|
||||
duration = "0s"
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
|
||||
aggregations {
|
||||
alignment_period = "60s"
|
||||
cross_series_reducer = "REDUCE_COUNT_FALSE"
|
||||
per_series_aligner = "ALIGN_NEXT_OLDER"
|
||||
|
||||
group_by_fields = [
|
||||
"resource.label.project_id",
|
||||
"resource.label.host"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "28800s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "web-downtime" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "Portal service is DOWN!"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
conditions {
|
||||
display_name = "Uptime Health Check on web-https"
|
||||
|
||||
condition_threshold {
|
||||
filter = "resource.type = \"uptime_url\" AND metric.type = \"monitoring.googleapis.com/uptime_check/check_passed\" AND metric.labels.check_id = \"${reverse(split("/", google_monitoring_uptime_check_config.web-https.id))[0]}\""
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
threshold_value = 1
|
||||
duration = "0s"
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
|
||||
aggregations {
|
||||
alignment_period = "60s"
|
||||
cross_series_reducer = "REDUCE_COUNT_FALSE"
|
||||
per_series_aligner = "ALIGN_NEXT_OLDER"
|
||||
|
||||
group_by_fields = [
|
||||
"resource.label.project_id",
|
||||
"resource.label.host"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "28800s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "instances_high_cpu_policy" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "High Instance CPU utilization"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
conditions {
|
||||
display_name = "VM Instance - CPU utilization"
|
||||
|
||||
condition_threshold {
|
||||
filter = "resource.type = \"gce_instance\" AND metric.type = \"compute.googleapis.com/instance/cpu/utilization\" AND metadata.user_labels.managed_by = \"terraform\""
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
threshold_value = 0.9
|
||||
duration = "60s"
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
|
||||
aggregations {
|
||||
alignment_period = "600s"
|
||||
cross_series_reducer = "REDUCE_NONE"
|
||||
per_series_aligner = "ALIGN_MEAN"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "28800s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "sql_high_cpu_policy" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "High Cloud SQL CPU utilization"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
conditions {
|
||||
display_name = "Cloud SQL Database - CPU utilization"
|
||||
|
||||
condition_threshold {
|
||||
filter = "resource.type = \"cloudsql_database\" AND metric.type = \"cloudsql.googleapis.com/database/cpu/utilization\""
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
threshold_value = 0.8
|
||||
duration = "60s"
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
|
||||
aggregations {
|
||||
alignment_period = "60s"
|
||||
cross_series_reducer = "REDUCE_NONE"
|
||||
per_series_aligner = "ALIGN_MEAN"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "28800s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "sql_disk_utiliziation_policy" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "High Cloud SQL Disk utilization"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
conditions {
|
||||
display_name = "Cloud SQL Database - Disk utilization"
|
||||
|
||||
condition_threshold {
|
||||
filter = "resource.type = \"cloudsql_database\" AND metric.type = \"cloudsql.googleapis.com/database/disk/utilization\""
|
||||
comparison = "COMPARISON_GT"
|
||||
|
||||
threshold_value = 0.8
|
||||
duration = "300s"
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
|
||||
aggregations {
|
||||
alignment_period = "300s"
|
||||
cross_series_reducer = "REDUCE_NONE"
|
||||
per_series_aligner = "ALIGN_MEAN"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "28800s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "production_db_access_policy" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "DB Access"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = [
|
||||
google_monitoring_notification_channel.slack.name
|
||||
]
|
||||
|
||||
documentation {
|
||||
content = "Somebody just accessed the production database, this notification incident will be automatically discarded in 1 hour."
|
||||
mime_type = "text/markdown"
|
||||
}
|
||||
|
||||
conditions {
|
||||
display_name = "Log match condition"
|
||||
|
||||
condition_matched_log {
|
||||
filter = <<-EOT
|
||||
protoPayload.methodName="cloudsql.instances.connect"
|
||||
protoPayload.authenticationInfo.principalEmail!="terraform-cloud@terraform-iam-387817.iam.gserviceaccount.com"
|
||||
EOT
|
||||
|
||||
label_extractors = {
|
||||
"Email" = "EXTRACT(protoPayload.authenticationInfo.principalEmail)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "3600s"
|
||||
|
||||
notification_rate_limit {
|
||||
period = "28800s"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "ssl_certs_expiring_policy" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "SSL certificate expiring soon"
|
||||
combiner = "OR"
|
||||
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
user_labels = {
|
||||
version = "1"
|
||||
uptime = "ssl_cert_expiration"
|
||||
}
|
||||
|
||||
conditions {
|
||||
display_name = "SSL certificate expiring soon"
|
||||
|
||||
condition_threshold {
|
||||
comparison = "COMPARISON_LT"
|
||||
filter = "metric.type=\"monitoring.googleapis.com/uptime_check/time_until_ssl_cert_expires\" AND resource.type=\"uptime_url\""
|
||||
|
||||
aggregations {
|
||||
alignment_period = "1200s"
|
||||
cross_series_reducer = "REDUCE_MEAN"
|
||||
group_by_fields = ["resource.label.*"]
|
||||
per_series_aligner = "ALIGN_NEXT_OLDER"
|
||||
}
|
||||
|
||||
duration = "600s"
|
||||
threshold_value = 15
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert_strategy {
|
||||
auto_close = "28800s"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_monitoring_alert_policy" "load_balancer_latency_policy" {
|
||||
project = var.project_id
|
||||
|
||||
display_name = "Load balancer latency"
|
||||
combiner = "OR"
|
||||
notification_channels = local.notification_channels
|
||||
|
||||
documentation {
|
||||
content = "This alert is triggered when the load balancer latency is higher than 3000ms."
|
||||
mime_type = "text/markdown"
|
||||
}
|
||||
|
||||
conditions {
|
||||
display_name = "Load balancer latency"
|
||||
|
||||
condition_threshold {
|
||||
# Filter out HTTP 101 responses (switching protocols) to prevent WebSocket connections from triggering the alert
|
||||
filter = "metric.labels.response_code != \"101\" AND resource.type = \"https_lb_rule\" AND metric.type = \"loadbalancing.googleapis.com/https/total_latencies\""
|
||||
comparison = "COMPARISON_GT"
|
||||
threshold_value = 3000
|
||||
duration = "0s"
|
||||
|
||||
trigger {
|
||||
count = 1
|
||||
}
|
||||
|
||||
aggregations {
|
||||
alignment_period = "300s"
|
||||
per_series_aligner = "ALIGN_PERCENTILE_99"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
output "notification_channels" {
|
||||
value = local.notification_channels
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
variable "project_id" {
|
||||
description = "The ID of the project in which the resource belongs."
|
||||
}
|
||||
|
||||
variable "slack_alerts_channel" {
|
||||
type = string
|
||||
description = "Slack channel which will receive monitoring alerts"
|
||||
}
|
||||
|
||||
variable "slack_alerts_auth_token" {
|
||||
type = string
|
||||
description = "Slack auth token for the infra alerts channel"
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "pagerduty_auth_token" {
|
||||
type = string
|
||||
description = "Pagerduty auth token for the infra alerts channel"
|
||||
default = null
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "additional_notification_channels" {
|
||||
type = list(string)
|
||||
default = []
|
||||
description = "List of mobile app notification channels"
|
||||
}
|
||||
|
||||
variable "api_host" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "web_host" {
|
||||
type = string
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
|
||||
resource "google_project" "project" {
|
||||
name = var.name
|
||||
|
||||
org_id = var.organization_id
|
||||
billing_account = var.billing_account_id
|
||||
project_id = var.id != "" ? var.id : replace(lower(var.name), " ", "-")
|
||||
|
||||
auto_create_network = var.auto_create_network
|
||||
}
|
||||
|
||||
resource "google_project_service" "oslogin" {
|
||||
project = google_project.project.project_id
|
||||
service = "oslogin.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "iam" {
|
||||
project = google_project.project.project_id
|
||||
service = "iam.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "iamcredentials" {
|
||||
project = google_project.project.project_id
|
||||
service = "iamcredentials.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "serviceusage" {
|
||||
project = google_project.project.project_id
|
||||
service = "serviceusage.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
output "project" {
|
||||
description = "Project struct which can be used to create resources in this project"
|
||||
value = google_project.project
|
||||
}
|
||||
|
||||
output "name" {
|
||||
description = "The project name"
|
||||
value = google_project.project.name
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
variable "organization_id" {
|
||||
description = "ID of a Google Cloud Organization"
|
||||
}
|
||||
|
||||
variable "billing_account_id" {
|
||||
description = "ID of a Google Cloud Billing Account which will be used to pay for resources"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name of a Google Cloud Project"
|
||||
}
|
||||
|
||||
variable "id" {
|
||||
description = "ID of a Google Cloud Project. Can be omitted and will be generated automatically"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "auto_create_network" {
|
||||
description = "Whether to create a default network in the project"
|
||||
default = "true"
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
# Enable Cloud SQL for the Google Cloud project
|
||||
|
||||
resource "google_project_service" "sqladmin" {
|
||||
project = var.project_id
|
||||
service = "sqladmin.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "sql-component" {
|
||||
project = var.project_id
|
||||
service = "sql-component.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "servicenetworking" {
|
||||
project = var.project_id
|
||||
service = "servicenetworking.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
# Create a reserved for Google Cloud SQL address range and connect it to VPC network
|
||||
resource "google_compute_global_address" "private_ip_pool" {
|
||||
project = var.project_id
|
||||
network = var.network
|
||||
|
||||
name = "google-cloud-sql"
|
||||
purpose = "VPC_PEERING"
|
||||
address_type = "INTERNAL"
|
||||
prefix_length = 16
|
||||
}
|
||||
|
||||
resource "google_service_networking_connection" "connection" {
|
||||
network = var.network
|
||||
|
||||
service = "servicenetworking.googleapis.com"
|
||||
reserved_peering_ranges = [google_compute_global_address.private_ip_pool.name]
|
||||
|
||||
depends_on = [
|
||||
google_project_service.servicenetworking,
|
||||
]
|
||||
}
|
||||
|
||||
# Create the main Cloud SQL instance
|
||||
resource "google_sql_database_instance" "master" {
|
||||
project = var.project_id
|
||||
|
||||
name = var.database_name
|
||||
database_version = var.database_version
|
||||
region = var.compute_region
|
||||
|
||||
settings {
|
||||
tier = "db-custom-${var.compute_instance_cpu_count}-${var.compute_instance_memory_size}"
|
||||
|
||||
disk_type = "PD_SSD"
|
||||
disk_autoresize = true
|
||||
|
||||
activation_policy = "ALWAYS"
|
||||
availability_type = var.database_highly_available ? "REGIONAL" : "ZONAL"
|
||||
|
||||
deletion_protection_enabled = strcontains(var.database_name, "-prod") ? true : false
|
||||
|
||||
location_preference {
|
||||
zone = var.compute_availability_zone
|
||||
}
|
||||
|
||||
backup_configuration {
|
||||
# Backups must be enabled if read replicas are enabled
|
||||
enabled = length(var.database_read_replica_locations) > 0 ? true : var.database_backups_enabled
|
||||
start_time = "10:00"
|
||||
|
||||
# PITR backups must be enabled if read replicas are enabled
|
||||
point_in_time_recovery_enabled = length(var.database_read_replica_locations) > 0 ? true : var.database_backups_enabled
|
||||
|
||||
backup_retention_settings {
|
||||
retained_backups = 30
|
||||
}
|
||||
}
|
||||
|
||||
ip_configuration {
|
||||
ipv4_enabled = true
|
||||
private_network = var.network
|
||||
}
|
||||
|
||||
maintenance_window {
|
||||
day = 7
|
||||
hour = 8
|
||||
update_track = "stable"
|
||||
}
|
||||
|
||||
insights_config {
|
||||
query_insights_enabled = true
|
||||
record_application_tags = true
|
||||
record_client_address = false
|
||||
|
||||
query_plans_per_minute = 20
|
||||
query_string_length = 4500
|
||||
}
|
||||
|
||||
password_validation_policy {
|
||||
enable_password_policy = true
|
||||
|
||||
complexity = "COMPLEXITY_DEFAULT"
|
||||
|
||||
min_length = 16
|
||||
disallow_username_substring = true
|
||||
}
|
||||
|
||||
dynamic "database_flags" {
|
||||
for_each = var.database_flags
|
||||
|
||||
content {
|
||||
name = database_flags.key
|
||||
value = database_flags.value
|
||||
}
|
||||
}
|
||||
|
||||
database_flags {
|
||||
name = "maintenance_work_mem"
|
||||
value = floor(var.compute_instance_memory_size * 1024 / 100 * 5)
|
||||
}
|
||||
|
||||
database_flags {
|
||||
name = "cloudsql.iam_authentication"
|
||||
value = "on"
|
||||
}
|
||||
|
||||
database_flags {
|
||||
name = "cloudsql.enable_pgaudit"
|
||||
value = "on"
|
||||
}
|
||||
|
||||
database_flags {
|
||||
name = "pgaudit.log"
|
||||
value = "all"
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
prevent_destroy = true
|
||||
ignore_changes = []
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
google_project_service.sqladmin,
|
||||
google_project_service.sql-component,
|
||||
google_service_networking_connection.connection,
|
||||
]
|
||||
}
|
||||
|
||||
# Create followers for the main Cloud SQL instance
|
||||
resource "google_sql_database_instance" "read-replica" {
|
||||
for_each = tomap({
|
||||
for location in var.database_read_replica_locations : location.region => location
|
||||
})
|
||||
|
||||
project = var.project_id
|
||||
|
||||
name = "${var.database_name}-read-replica-${each.key}"
|
||||
database_version = var.database_version
|
||||
region = each.value.region
|
||||
|
||||
master_instance_name = var.database_name
|
||||
|
||||
replica_configuration {
|
||||
connect_retry_interval = "30"
|
||||
}
|
||||
|
||||
settings {
|
||||
# We must use the same tier as the master instance,
|
||||
# otherwise it might be lagging behind during the replication and won't be usable
|
||||
tier = "db-custom-${var.compute_instance_cpu_count}-${var.compute_instance_memory_size}"
|
||||
|
||||
disk_type = "PD_SSD"
|
||||
disk_autoresize = true
|
||||
|
||||
activation_policy = "ALWAYS"
|
||||
availability_type = "ZONAL"
|
||||
|
||||
location_preference {
|
||||
zone = var.compute_availability_zone
|
||||
}
|
||||
|
||||
ip_configuration {
|
||||
ipv4_enabled = each.value.ipv4_enabled
|
||||
private_network = each.value.network
|
||||
}
|
||||
|
||||
insights_config {
|
||||
query_insights_enabled = true
|
||||
record_application_tags = true
|
||||
record_client_address = false
|
||||
|
||||
query_plans_per_minute = 20
|
||||
query_string_length = 4500
|
||||
}
|
||||
|
||||
dynamic "database_flags" {
|
||||
for_each = var.database_flags
|
||||
|
||||
content {
|
||||
name = database_flags.key
|
||||
value = database_flags.value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
prevent_destroy = true
|
||||
ignore_changes = []
|
||||
}
|
||||
|
||||
depends_on = [google_sql_database_instance.master]
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
output "master_instance_ip_address" {
|
||||
value = google_sql_database_instance.master.private_ip_address
|
||||
}
|
||||
|
||||
output "master_instance_name" {
|
||||
value = google_sql_database_instance.master.name
|
||||
}
|
||||
|
||||
output "master_instance_address" {
|
||||
value = google_sql_database_instance.master.private_ip_address
|
||||
}
|
||||
|
||||
output "read-replicas" {
|
||||
value = google_sql_database_instance.read-replica
|
||||
}
|
||||
|
||||
output "bi_instance_ip_address" {
|
||||
value = try(google_sql_database_instance.read-replica[var.database_read_replica_locations[0].region].ip_address[0], google_sql_database_instance.master.private_ip_address)
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
variable "project_id" {
|
||||
description = "The ID of the project in which the resource belongs."
|
||||
}
|
||||
|
||||
variable "compute_region" {
|
||||
description = "The region the instance will sit in."
|
||||
}
|
||||
|
||||
variable "compute_availability_zone" {
|
||||
description = "The preferred compute engine zone. See https://cloud.google.com/compute/docs/regions-zones?hl=en"
|
||||
}
|
||||
|
||||
variable "compute_instance_memory_size" {
|
||||
description = "Instance memory size. See https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create"
|
||||
}
|
||||
|
||||
variable "compute_instance_cpu_count" {
|
||||
description = "Count of CPUs. See https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create"
|
||||
}
|
||||
|
||||
variable "network" {
|
||||
description = "Full network identifier which is used to create private VPC connection with Cloud SQL instance"
|
||||
}
|
||||
|
||||
variable "database_name" {
|
||||
description = "Name of the Cloud SQL database"
|
||||
}
|
||||
|
||||
variable "database_version" {
|
||||
description = "Version of the Cloud SQL database"
|
||||
default = "POSTGRES_17"
|
||||
}
|
||||
|
||||
variable "database_highly_available" {
|
||||
description = "Creates a failover copy for the master intancy and makes it availability regional."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "database_backups_enabled" {
|
||||
description = "Should backups be enabled on this database?"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "database_read_replica_locations" {
|
||||
description = "List of read-only replicas to create."
|
||||
type = list(object({
|
||||
region = string
|
||||
ipv4_enabled = bool
|
||||
network = string
|
||||
}))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "database_flags" {
|
||||
description = "List of PostgreSQL database flags. Can be used to install Postgres extensions."
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
resource "google_project_service" "storage-api" {
|
||||
project = var.project_id
|
||||
|
||||
service = "storage-api.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_project_service" "storage-component" {
|
||||
project = var.project_id
|
||||
|
||||
service = "storage-component.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
variable "project_id" {
|
||||
description = "The ID of the project in which the resource belongs."
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
resource "google_project_service" "compute" {
|
||||
project = var.project_id
|
||||
service = "compute.googleapis.com"
|
||||
|
||||
disable_on_destroy = false
|
||||
}
|
||||
|
||||
resource "google_compute_network" "vpc_network" {
|
||||
project = var.project_id
|
||||
name = var.name
|
||||
|
||||
routing_mode = "GLOBAL"
|
||||
|
||||
auto_create_subnetworks = false
|
||||
|
||||
depends_on = [
|
||||
google_project_service.compute
|
||||
]
|
||||
}
|
||||
|
||||
## Router and Cloud NAT are required for instances without external IP address
|
||||
resource "google_compute_router" "default" {
|
||||
project = var.project_id
|
||||
|
||||
name = google_compute_network.vpc_network.name
|
||||
network = google_compute_network.vpc_network.self_link
|
||||
region = var.nat_region
|
||||
}
|
||||
|
||||
resource "google_compute_router_nat" "application" {
|
||||
project = var.project_id
|
||||
|
||||
name = google_compute_network.vpc_network.name
|
||||
region = var.nat_region
|
||||
|
||||
router = google_compute_router.default.name
|
||||
|
||||
nat_ip_allocate_option = "AUTO_ONLY"
|
||||
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
|
||||
|
||||
enable_dynamic_port_allocation = false
|
||||
min_ports_per_vm = 32
|
||||
|
||||
udp_idle_timeout_sec = 30
|
||||
icmp_idle_timeout_sec = 30
|
||||
tcp_established_idle_timeout_sec = 1200
|
||||
tcp_transitory_idle_timeout_sec = 30
|
||||
tcp_time_wait_timeout_sec = 120
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
output "id" {
|
||||
value = google_compute_network.vpc_network.id
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = google_compute_network.vpc_network.name
|
||||
}
|
||||
|
||||
output "self_link" {
|
||||
value = google_compute_network.vpc_network.self_link
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
variable "project_id" {
|
||||
description = "The ID of the project in which the resource belongs."
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
description = "Name of the resource. Provided by the client when the resource is created."
|
||||
}
|
||||
|
||||
variable "nat_region" {
|
||||
description = "Region where Cloud NAT will be created"
|
||||
}
|
||||
@@ -8,8 +8,8 @@ In this example, we will deploy one or more Firezone Gateways in a single VPC on
|
||||
Google Cloud Platform (GCP) that are configured to egress traffic through a
|
||||
single Cloud NAT that is assigned a single static IP address.
|
||||
|
||||
This example is built on top of our example module for deploying a
|
||||
[Firezone Gateway in Google Cloud](https://github.com/firezone/terraform-google-gateway/tree/main/examples/nat-gateway).
|
||||
This example is built on top of our module for deploying a
|
||||
[Firezone Gateway in Google Cloud](/kb/automate/terraform/gcp).
|
||||
|
||||
## Common use cases
|
||||
|
||||
|
||||
@@ -131,8 +131,8 @@ traffic.
|
||||
|
||||
Scaling Firezone to support your rapidly growing organization is as simple as
|
||||
deploying additional Gateway servers. See our
|
||||
[Terraform Gateway deployment examples](https://github.com/firezone/firezone/tree/main/terraform/examples)
|
||||
for an idea of how to automate this process.
|
||||
[Terraform deployment examples](/kb/automate/terraform) for an idea of how to
|
||||
automate this process.
|
||||
|
||||
#### What protocol does Firezone use to encrypt traffic?
|
||||
|
||||
|
||||
@@ -22,10 +22,8 @@ After completing this guide, your team's traffic will be routed to a Firezone
|
||||
Gateway and then out to the internet using its public IP address.
|
||||
|
||||
<Alert color="info">
|
||||
See our our [Terraform
|
||||
examples](https://www.github.com/firezone/firezone/tree/main/terraform/examples)
|
||||
for a high availability example of this guide using Terraform on Google Cloud
|
||||
Platform.
|
||||
See our our [Terraform examples](/kb/automate/terraform) for a high
|
||||
availability example of this guide using Terraform on Google Cloud Platform.
|
||||
</Alert>
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -31,7 +31,7 @@ balanced across multiple Gateways for high availability.
|
||||
[Deploy a Gateway](/kb/deploy/gateways) if you haven't done so yet.
|
||||
|
||||
<Alert color="info">
|
||||
See our [Terraform examples](/kb/automate) to learn how to automate
|
||||
See our [Terraform examples](/kb/automate/terraform) to learn how to automate
|
||||
deployments to various cloud providers.
|
||||
</Alert>
|
||||
|
||||
|
||||
Reference in New Issue
Block a user