Initial commit

This commit is contained in:
hakobian4
2024-05-24 15:23:19 +04:00
commit eb6ed0b13e
32 changed files with 1318 additions and 0 deletions

View File

@@ -0,0 +1,74 @@
name: "Apply Terraform Composite Action"
description: "A composite action to apply terraform"
inputs:
workspace:
description: "Terraform Workspace"
required: true
plan_output:
description: "Terraform Plan Output FileName"
required: true
working_directory:
description: "Terraform Working Directory"
required: true
tf_api_token:
description: "Terraform API Token"
required: true
runs:
using: "composite"
steps:
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
with:
cli_config_credentials_token: ${{ inputs.tf_api_token }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
shell: bash
- name: Initializing Terraform
run: |
cd ${{ inputs.working_directory }}
terraform init
shell: bash
env:
TF_WORKSPACE: "default-ws"
- name: Selecting Terraform Workspace
id: workspace
run: |
cd ${{ inputs.working_directory }}
terraform workspace select -or-create ${{ inputs.workspace }}
shell: bash
- name: Validating Terraform
id: validate
run: |
cd ${{ inputs.working_directory }}
terraform validate -no-color
shell: bash
- name: Planning Terraform
id: plan
run: |
cd ${{ inputs.working_directory }}
terraform plan -out=${{ inputs.plan_output }}
shell: bash
- name: Applying Terraform
id: apply
run: |
cd ${{ inputs.working_directory }}
set +e
terraform apply ${{ inputs.plan_output }}
tf_exitcode=$?
set -e
if [ $tf_exitcode -ne 0 ];
then
echo """
Error: Unable to create a Cluster.
Message: This could be caused by a lack of resources in the server. Please review the server resources and try again.
"""
exit 1
fi
shell: bash

150
.github/workflows/create_cluster.yml vendored Normal file
View File

@@ -0,0 +1,150 @@
name: 'Apply Terraform'
on:
push:
branches:
- 'master'
paths:
- 'inputs.yaml'
env:
TF_CLOUD_ORGANIZATION: "infraheads"
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
TF_VAR_proxmox_token_id: "${{ secrets.PROXMOX_TOKEN_ID }}"
TF_VAR_proxmox_token_secret: "${{ secrets.PROXMOX_TOKEN_SECRET }}"
TF_VAR_github_token: "${{ secrets.TOKEN_GITHUB }}"
TF_VAR_netris_controller_host: "${{ vars.NETRIS_CONTROLLER_HOST }}"
TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}"
TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}"
TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}"
jobs:
setup_terraform:
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform/infrastructure"
outputs:
clusters: ${{ steps.output_variables.outputs.clusters }}
removable_workspaces: ${{ steps.output_variables.outputs.removable_workspaces }}
steps:
- uses: actions/checkout@v4
- name: Setup Terraform Environment
uses: hashicorp/setup-terraform@v3
with:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: |
echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
- name: Initializing Terraform
id: init
run: terraform init -upgrade
env:
TF_WORKSPACE: "default-ws"
- name: Extract values and set output variables
id: output_variables
run: |
clusters=$(yq e 'keys | .[]' ${GITHUB_WORKSPACE}/inputs.yaml | jq -R -s -c 'split("\n")[:-1]')
workspaces=$(terraform workspace list | grep -v 'default-ws' | sed 's/-infrastructure$//;s/-cluster$//' | uniq | grep -v '^$' | jq -R -s -c 'split("\n")[:-1] | map(ltrimstr(" "))')
echo "removable_workspaces=$(jq -n -c $workspaces-$clusters)" >> $GITHUB_OUTPUT
echo "clusters=$clusters" >> $GITHUB_OUTPUT
destroy_cluster:
needs: setup_terraform
if: ${{ needs.setup_terraform.outputs.removable_workspaces != '[]' }}
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform/infrastructure"
strategy:
matrix:
destroyable_resources: ${{ fromJSON(needs.setup_terraform.outputs.removable_workspaces) }}
steps:
- uses: actions/checkout@v4
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
with:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
- name: Initializing Terraform
run: terraform init
env:
TF_WORKSPACE: "default-ws"
TF_VAR_cluster_name: ${{ matrix.destroyable_resources }}
- name: Selecting Terraform Workspace
run: terraform workspace select ${{ matrix.destroyable_resources }}-infrastructure
- name: Destroying Terraform
run: |
terraform destroy -auto-approve
- name: Destroying Terraform Cloud Workspace
run: |
terraform workspace select default-ws
terraform workspace delete -force ${{ matrix.destroyable_resources }}-infrastructure
terraform workspace delete -force ${{ matrix.destroyable_resources }}-cluster
apply_cluster:
needs: setup_terraform
if: ${{ needs.setup_terraform.outputs.clusters != '[]' }}
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform"
strategy:
matrix:
cluster: ${{ fromJSON(needs.setup_terraform.outputs.clusters) }}
steps:
- name: Apply Terraform Infrastructure
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ matrix.cluster }}-infrastructure
plan_output: tfplan-${{ matrix.cluster }}-infrastructure
working_directory: "terraform/infrastructure"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
- name: Apply Terraform Cluster
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ matrix.cluster }}-cluster
plan_output: tfplan-${{ matrix.cluster }}-cluster
working_directory: "terraform/cluster"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
- name: Generating kube-config
uses: actions/upload-artifact@v4
with:
name: kube-config-${{ matrix.cluster }}
path: /opt/kubeconfig/${{ matrix.cluster }}
compression-level: 0
- name: Generate Markdown
run: |
echo "### turnk8s" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Here is the [GitHub URL](https://github.com/infraheads/${{ matrix.cluster }}) for this cluster. You can add your ArgoCD application or Kubernetes manifests and enjoy! :star_struck:" >> $GITHUB_STEP_SUMMARY
echo "Find `kubeconfig` file(s) to connect to the cluster(s), which is/are attached above! :point_up:" >> $GITHUB_STEP_SUMMARY

37
.gitignore vendored Normal file
View File

@@ -0,0 +1,37 @@
# Local .terraform directories
**/.terraform/*
# .tfstate files
*.tfstate
*.tfstate.*
# Crash log files
crash.log
crash.*.log
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
# password, private keys, and other secrets. These should not be part of version
# control as they are data points which are potentially sensitive and subject
# to change depending on the environment.
*.tfvars
*.tfvars.json
# Ignore override files as they are usually used to override resources locally and so
# are not checked in
override.tf
override.tf.json
*_override.tf
*_override.tf.json
# Include override files you do wish to add to version control using negated pattern
# !example_override.tf
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
# example: *tfplan*
# Ignore CLI configuration files
.terraformrc
terraform.rc
# ignore .idea from PyCharm
.idea

67
README.md Normal file
View File

@@ -0,0 +1,67 @@
turnk8s
=================
## Introduction
**turnk8s** is run Kubernetes service based on `yaml` configuration file. Clusters are run in [Talos Linux](https://www.talos.dev) on [Proxmox](https://www.proxmox.com) server.
For each cluster is created GitHub repository which contains Kubernetes manifests are managed by ArgoCD.
## GitHub
It is possible to manage cluster(s) via the `inputs.yaml` configuration file. You can create or destroy cluster(s)
by changing the configuration file.
<details>
<summary>Click here to see the structure of configuration file:</summary>
```yaml
turnk8s-cluster:
versions:
talos: v1.7.1
k8s: v1.30.0
controlplane:
cpu_cores: 2
memory: 4096
disk_size: 20
worker_node:
count: 2
cpu_cores: 2
memory: 4096
disc_size: 20
```
</details>
### Creating a Cluster
For creating a cluster you need to configure `inputs.yaml` configuration file by adding the above structure into inputs.yaml file.
Here is the descriptions of the configuration values:
* **the main key is the cluster name:(Required)** - The cluster name which must be unique
* **versions.talos:(Required)** - Talos Linux version: the possible versions are v1.7.1, v1.7.1, v1.6.7
* **versions.k8s:(Required)** - Kubernetes version: the possible versions are v1.30.0, v1.29.3, v1.6.7
* **controlplane.cpu_cores:(Required)** - CPU cores count of the ControlPlane:(minimum requirement count is 2)
* **controlplane.memory:(Required)** - RAM memory of the ControlPlane:(minimum requirement size is 2048)
* **controlplane.disk_size:(Required)** - Disk size of the ControlPlane:(minimum requirement size is 10)
* **worker_node.count:(Required)** - Count of the Worker Nodes
* **worker_node.cpu_cores:(Required)** - CPU cores count of the Worker Node:(minimum requirement count is 1)
* **worker_node.memory:(Required)** - RAM memory of the Worker Node:(minimum requirement size is 2048)
* **worker_node.disc_size:(Required)** - Disk size of the Worker Node:(minimum requirement size is 10)
Now all you need to do is pushing the changes into GitHub. Then it automatically implements changes appropriate to the configuration file.
It is completed within 5 minutes.
After workflow is completed, click on the `artifact` to download `kubeconfig` configuration file. You can go to a given GitHub url, add your Kubernetes manifests and enjoy the result.
### Destroying a Cluster
For destroying a cluster you need to remove the cluster configuration from `input.yaml` file and push it into GitHub.
Then it will be eliminated during 1-2 minutes.
## Kubectl
Now `unzip` the downloaded kubeconfig and export it as a `KUBECONFIG` variable.
<br>
Try `kubectl get node` to check Kubernetes is running.
#### Congratulations!!! Kubernetes cluster ready to deploy and manage your containerized applications.

11
inputs.yaml Normal file
View File

@@ -0,0 +1,11 @@
turnk8s-cluster:
controlplane:
cpu_cores: 2
memory: 4096
disk_size: 10
worker_node:
count: 1
cpu_cores: 2
memory: 2048
disc_size: 10

View File

@@ -0,0 +1,17 @@
applications:
- name: app-of-apps
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
project: default
source:
repoURL: git@github.com:example/project.git
targetRevision: HEAD
path: argocd_applications
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@@ -0,0 +1,6 @@
configs:
params:
server.insecure: true
server:
service:
type: "LoadBalancer"

49
modules/argocd/main.tf Normal file
View File

@@ -0,0 +1,49 @@
resource "helm_release" "argocd" {
name = "argocd"
namespace = "argocd"
chart = var.argocd_chart_name
version = var.argocd_chart_version
repository = var.argocd_chart_repository
create_namespace = true
recreate_pods = true
force_update = true
values = [file("${path.module}/argocd.yaml")]
set {
name = "configs.secret.argocdServerAdminPassword"
value = var.argocd_admin_password
}
set {
name = "global.image.repository"
value = "${var.registry}/argoproj/argocd"
}
set {
name = "dex.image.repository"
value = "${var.registry}/dexidp/dex"
}
set {
name = "redis.image.repository"
value = "${var.registry}/docker/library/redis"
}
}
resource "helm_release" "argocd-apps" {
depends_on = [helm_release.argocd]
name = "argocd-apps"
namespace = helm_release.argocd.namespace
chart = var.app_of_apps_chart_name
version = var.app_of_apps_chart_version
repository = var.app_of_apps_chart_repository
values = [file("${path.module}/app-of-apps.yaml")]
set {
name = "applications[0].source.repoURL"
value = var.git_repository_ssh_url
}
}

View File

@@ -0,0 +1,47 @@
variable "git_repository_ssh_url" {
description = "Git repository ssh url contains for workload"
type = string
default = "git@github.com:example/project.git"
}
variable "registry" {
description = "The registry from which images will be downloaded"
type = string
}
# ArgoCD variables
variable "argocd_chart_name" {
type = string
default = "argo-cd"
}
variable "argocd_chart_version" {
type = string
default = "6.7.18"
}
variable "argocd_chart_repository" {
type = string
default = "https://argoproj.github.io/argo-helm"
}
variable "argocd_admin_password" {
description = "Encrypted password for admin user"
type = string
}
# ArgoCD AppOfApps variables
variable "app_of_apps_chart_name" {
type = string
default = "argocd-apps"
}
variable "app_of_apps_chart_version" {
type = string
default = "1.6.2"
}
variable "app_of_apps_chart_repository" {
type = string
default = "https://argoproj.github.io/argo-helm"
}

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
cat <<EOF > "/opt/kubeconfig/$2"
$(echo "$1" | tail -n +2 | head -n -1)
EOF

9
scripts/talos_cli.sh Normal file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
talosctl gen config talos-proxmrox https://$CONTROLPLANE_IP:6443 -o _out --force
talosctl apply-config -n $CONTROLPLANE_IP --insecure -f _out/controlplane.yaml
talosctl apply-config -n $WORKER_NODE_IP --insecure -f _out/worker.yaml
# Run after booting vm
talosctl bootstrap -e $CONTROLPLANE_IP -n $CONTROLPLANE_IP --talosconfig _out/talosconfig
talosctl kubeconfig -e $CONTROLPLANE_IP -n $CONTROLPLANE_IP --talosconfig _out/talosconfig

24
scripts/tf_workflow.sh Normal file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
# Fetch clusters
clusters=$(yq e '[.[].cluster_name]' inputs.yaml | jq -r '.[]' | paste -sd "," -)
# Fetch workspaces, ensuring it is a comma-separated list
workspaces=$(terraform workspace list | awk '!/^\s*$/ {gsub(/\*/, ""); if ($1 != "default-ws") printf "%s, ", $1}' | sed 's/, $//' | sed 's/, \+/, /g')
# Convert comma-separated lists to arrays
IFS=',' read -r -a cluster_array <<< "$clusters"
IFS=',' read -r -a workspace_array <<< "$workspaces"
# Use associative arrays to find elements in workspaces not in clusters
declare -A cluster_map
for cluster in "${cluster_array[@]}"; do
cluster_map["$cluster"]=1
done
# Check each workspace to see if it's in the cluster map
for workspace in "${workspace_array[@]}"; do
if [[ -z ${cluster_map[$workspace]} ]]; then
echo "$workspace"
fi
done

88
terraform/cluster/.terraform.lock.hcl generated Normal file
View File

@@ -0,0 +1,88 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/helm" {
version = "2.13.2"
constraints = ">= 2.13.0"
hashes = [
"h1:nlSqCo0PajJzjSlx0lXNUq1YcOr8p9b3ahcUUYN2pEg=",
"zh:06c0663031ef5aa19e238fe50be5d3cbf5fb00548d2b26e779c607dfd2dc69a7",
"zh:1850b8f2e729553ba8b96d69dce035b814ce959c6805c25484f407c4e720c497",
"zh:1ec76814a99461cd79ee4c879ed455ab338a3cb9e63fbe9308f91b5515e72e42",
"zh:78546b2f0b2e9072370c017d8056a2ffda908c2e463d2792244e4be6562ab772",
"zh:9205eef438aa3d5e49505655b7c300f7cecfa30f8fa37ed84679f674420403f2",
"zh:9335c7300675e5088ab4090af3c8150701c0bb8ea67ad23ebd753f6ab3a922a9",
"zh:9722d8b419e9615a04b8fc9acb50e52d6ba988c7565cc517bc16faa0a9e895b3",
"zh:aa93d9fc7db91f261b6e41970453926341eaa4222c1b8d507cdeabd0be0af4eb",
"zh:c59a2af538de99c37e4ffe988f33633a9fb064e5360230adac5f6eb0fd473be8",
"zh:d6323f61f255131a7d9f5a645982eb0f0d12f685270f54beade95c0b51a7a6c9",
"zh:e7f46dd2aac9537d20aaac217806f2ebb3a347aaf6bbd28192c042286103635c",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/integrations/github" {
version = "6.2.1"
constraints = "6.2.1"
hashes = [
"h1:ip7024qn1ewDqlNucxh07DHvuhSLZSqtTGewxNLeYYU=",
"zh:172aa5141c525174f38504a0d2e69d0d16c0a0b941191b7170fe6ae4d7282e30",
"zh:1a098b731fa658c808b591d030cc17cc7dfca1bf001c3c32e596f8c1bf980e9f",
"zh:245d6a1c7e632d8ae4bdd2da2516610c50051e81505cf420a140aa5fa076ea90",
"zh:43c61c230fb4ed26ff1b04b857778e65be3d8f80292759abbe2a9eb3c95f6d97",
"zh:59bb7dd509004921e4322a196be476a2f70471b462802f09d03d6ce96f959860",
"zh:5cb2ab8035d015c0732107c109210243650b6eb115e872091b0f7b98c2763777",
"zh:69d2a6acfcd686f7e859673d1c8a07fc1fc1598a881493f19d0401eb74c0f325",
"zh:77f36d3f46911ace5c50dee892076fddfd64a289999a5099f8d524c0143456d1",
"zh:87df41097dfcde72a1fbe89caca882af257a4763c2e1af669c74dcb8530f9932",
"zh:899dbe621f32d58cb7c6674073a6db8328a9db66eecfb0cc3fc13299fd4e62e7",
"zh:ad2eb7987f02f7dd002076f65a685730705d04435313b5cf44d3a6923629fb29",
"zh:b2145ae7134dba893c7f74ad7dfdc65fdddf6c7b1d0ce7e2f3baa96212322fd8",
"zh:bd6bae3ac5c3f96ad9219d3404aa006ef1480e9041d4c95df1808737e37d911b",
"zh:e89758b20ae59f1b9a6d32c107b17846ddca9634b868cf8f5c927cbb894b1b1f",
]
}
provider "registry.terraform.io/siderolabs/talos" {
version = "0.5.0"
constraints = "0.5.0"
hashes = [
"h1:xogkLLCrJJmd278E+vNMnmQgaMD05Gd1QXN914xgVec=",
"zh:0f71f2624576224c9bc924b136b601b734243efa7a7ad8280dfd8bd583e4afa5",
"zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d",
"zh:33c50dacc5029fa20caed702001fb1439899c94f203b1f37dccb970f504bca45",
"zh:3c97a6e2692b88d3f4631a3f8769146f602c210e881b46fa1b3b82c545e51cd1",
"zh:44077a137613bcfe29eef00315b5aa50d83390c3c727580a4ff0f4b87f22d228",
"zh:5bd02f278aec5567f94dd057d1c758363998ce581ff17b0869515bb682c02186",
"zh:80f40939bc3b55f0005c03b77122ceea86ec4deb82f5557950a97ad96fbb1557",
"zh:94c1b17f25bc30eacde926e46f196f1f135032674730d9f50c986ef6b7a854f0",
"zh:95ad665b2fdeed38180f5c471164833a34d07c1ef0470c1652565fe8cf4e9c4a",
"zh:a50ef6088afcb129c176dd4ba86c345e9be7b14358bb3b21c34f06930d8f39ef",
"zh:aa71da1da00ed66f1dddf1b69c10b829f24ac89e207de07d32c455dd04482096",
"zh:abb7eeb2b089081b4814ed80a295673e1a92f82ce092dde37b5bc92e75efec2c",
"zh:db9b9b54a0db5ae151376d5a73e0d28497c3e06181840e71ef8349213ac03e50",
"zh:e50ed8aa90b736508fce63680e8339240cecb74709ab9563d34d2c2ce7bc8445",
"zh:f3a279723ff31a095d7bfff21857abfcc9a2cfdeeea8521d179630ae6565d581",
]
}
provider "registry.terraform.io/telmate/proxmox" {
version = "2.9.14"
hashes = [
"h1:H/f+LbVyPOLslHLAYnGuMMRqWFZ65K6E3V+MCYgfAyk=",
"zh:0d049d33f705e5b814d30028770c084151218439424e99684ce31d7e26a720b5",
"zh:20b1c64ed56d81de95f3f37b82b45b4654c0de26670c0e87a474c5cce13cd015",
"zh:2946058abd1d8e50e475b9ec39781eb02576b40dbd80f4653fade4493a4514c6",
"zh:29e50a25c456f040ce072f23ac57b5b82ebd3b916ca5ae6688332b5ec62adc4a",
"zh:3612932306ce5f08db94868f526cbb8c56d0d3c6ebe1c11a83f92bbf94354296",
"zh:42d1699b0abebaac82ea5a19f4393541d8bb2741bde204a8ac1028cdc29d1b14",
"zh:5ffd5dc567262eb8aafdf2f6eac63f7f21361da9c5d75a3c36b479638a0001b0",
"zh:6692ef323e3b89de99934ad731f6a1850525bf8142916ae28ea4e4048d73a787",
"zh:a5afc98e9a4038516bb58e788cb77dea67a60dce780dfcd206d7373c5a56b776",
"zh:bf902cded709d84fa27fbf91b589c241f2238a6c4924e4e479eebd74320b93a5",
"zh:cab0e1e72c9cebcf669fc6f35ec28cb8ab2dffb0237afc8860aa40d23bf8a49f",
"zh:e523b99a48beec83d9bc04b2d336266044f9f53514cefb652fe6768611847196",
"zh:f593915e8a24829d322d2eaeedcb153328cf9042f0d84f66040dde1be70ede04",
"zh:fba1aff541133e2129dfda0160369635ab48503d5c44b8407ce5922ecc15d0bd",
]
}

View File

@@ -0,0 +1,16 @@
module "argocd" {
source = "../../modules/argocd"
git_repository_ssh_url = data.terraform_remote_state.infrastructure.outputs.github_repo_url[var.cluster_name].http_clone_url
registry = var.image_registry
argocd_chart_name = var.argocd_chart_name
argocd_chart_version = var.argocd_chart_version
argocd_chart_repository = var.argocd_chart_repository
argocd_admin_password = var.argocd_admin_password
app_of_apps_chart_name = var.argocd_app_of_apps_chart_name
app_of_apps_chart_version = var.argocd_app_of_apps_chart_version
app_of_apps_chart_repository = var.argocd_app_of_apps_chart_repository
}

19
terraform/cluster/main.tf Normal file
View File

@@ -0,0 +1,19 @@
provider "helm" {
kubernetes {
host = data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.host
client_certificate = base64decode(data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.client_certificate)
client_key = base64decode(data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.client_key)
cluster_ca_certificate = base64decode(data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.ca_certificate)
}
}
data "terraform_remote_state" "infrastructure" {
backend = "remote"
config = {
organization = "infraheads"
workspaces = {
name = "turnk8s-${var.cluster_name}-infrastructure"
}
}
}

View File

@@ -0,0 +1,41 @@
resource "helm_release" "netris-operator" {
name = "netris-operator"
namespace = "netris-operator"
chart = "netris-operator"
version = "2.0.0"
repository = "https://netrisai.github.io/charts"
create_namespace = true
recreate_pods = true
force_update = true
set {
name = "controller.host"
value = var.netris_controller_host
}
set {
name = "controller.login"
value = var.netris_controller_login
}
set {
name = "controller.password"
value = var.netris_controller_password
}
set {
name = "controller.insecure"
value = false
}
set {
name = "image.repository"
value = "${var.image_registry}/netris-operator"
}
set {
name = "image.tag"
value = "v3.0.0"
}
}

View File

@@ -0,0 +1,18 @@
terraform {
required_version = ">= 1.7"
backend "remote" {
hostname = "app.terraform.io"
organization = "infraheads"
workspaces {
prefix = "turnk8s-"
}
}
required_providers {
helm = {
source = "hashicorp/helm"
version = ">= 2.13"
}
}
}

View File

@@ -0,0 +1,72 @@
variable "cluster_name" {
description = "The name of the cluster."
type = string
default = "turnk8s-cluster"
}
variable "talos_version" {
description = "Talos version to be used"
type = string
default = "v1.7.1"
}
variable "image_registry" {
description = "The registry from which images should be downloaded for cluster"
type = string
default = "192.168.2.4:6000"
}
# ArgoCD variables
variable "argocd_chart_name" {
type = string
default = "argo-cd"
}
variable "argocd_chart_version" {
type = string
default = "6.7.18"
}
variable "argocd_chart_repository" {
type = string
default = "https://argoproj.github.io/argo-helm"
}
variable "argocd_admin_password" {
description = "Encrypted password for Argocd admin"
type = string
}
# ArgoCD Apps variables
variable "argocd_app_of_apps_chart_name" {
type = string
default = "argocd-apps"
}
variable "argocd_app_of_apps_chart_version" {
type = string
default = "1.6.2"
}
variable "argocd_app_of_apps_chart_repository" {
type = string
default = "https://argoproj.github.io/argo-helm"
}
# Netris Configuration
variable "netris_controller_host" {
description = "Netris controller host."
type = string
}
variable "netris_controller_login" {
description = "Netris controller login"
type = string
sensitive = true
}
variable "netris_controller_password" {
description = "Netris controller password"
type = string
sensitive = true
}

View File

@@ -0,0 +1,69 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/integrations/github" {
version = "6.2.1"
constraints = "6.2.1"
hashes = [
"h1:ip7024qn1ewDqlNucxh07DHvuhSLZSqtTGewxNLeYYU=",
"zh:172aa5141c525174f38504a0d2e69d0d16c0a0b941191b7170fe6ae4d7282e30",
"zh:1a098b731fa658c808b591d030cc17cc7dfca1bf001c3c32e596f8c1bf980e9f",
"zh:245d6a1c7e632d8ae4bdd2da2516610c50051e81505cf420a140aa5fa076ea90",
"zh:43c61c230fb4ed26ff1b04b857778e65be3d8f80292759abbe2a9eb3c95f6d97",
"zh:59bb7dd509004921e4322a196be476a2f70471b462802f09d03d6ce96f959860",
"zh:5cb2ab8035d015c0732107c109210243650b6eb115e872091b0f7b98c2763777",
"zh:69d2a6acfcd686f7e859673d1c8a07fc1fc1598a881493f19d0401eb74c0f325",
"zh:77f36d3f46911ace5c50dee892076fddfd64a289999a5099f8d524c0143456d1",
"zh:87df41097dfcde72a1fbe89caca882af257a4763c2e1af669c74dcb8530f9932",
"zh:899dbe621f32d58cb7c6674073a6db8328a9db66eecfb0cc3fc13299fd4e62e7",
"zh:ad2eb7987f02f7dd002076f65a685730705d04435313b5cf44d3a6923629fb29",
"zh:b2145ae7134dba893c7f74ad7dfdc65fdddf6c7b1d0ce7e2f3baa96212322fd8",
"zh:bd6bae3ac5c3f96ad9219d3404aa006ef1480e9041d4c95df1808737e37d911b",
"zh:e89758b20ae59f1b9a6d32c107b17846ddca9634b868cf8f5c927cbb894b1b1f",
]
}
provider "registry.terraform.io/siderolabs/talos" {
version = "0.5.0"
constraints = "0.5.0"
hashes = [
"h1:xogkLLCrJJmd278E+vNMnmQgaMD05Gd1QXN914xgVec=",
"zh:0f71f2624576224c9bc924b136b601b734243efa7a7ad8280dfd8bd583e4afa5",
"zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d",
"zh:33c50dacc5029fa20caed702001fb1439899c94f203b1f37dccb970f504bca45",
"zh:3c97a6e2692b88d3f4631a3f8769146f602c210e881b46fa1b3b82c545e51cd1",
"zh:44077a137613bcfe29eef00315b5aa50d83390c3c727580a4ff0f4b87f22d228",
"zh:5bd02f278aec5567f94dd057d1c758363998ce581ff17b0869515bb682c02186",
"zh:80f40939bc3b55f0005c03b77122ceea86ec4deb82f5557950a97ad96fbb1557",
"zh:94c1b17f25bc30eacde926e46f196f1f135032674730d9f50c986ef6b7a854f0",
"zh:95ad665b2fdeed38180f5c471164833a34d07c1ef0470c1652565fe8cf4e9c4a",
"zh:a50ef6088afcb129c176dd4ba86c345e9be7b14358bb3b21c34f06930d8f39ef",
"zh:aa71da1da00ed66f1dddf1b69c10b829f24ac89e207de07d32c455dd04482096",
"zh:abb7eeb2b089081b4814ed80a295673e1a92f82ce092dde37b5bc92e75efec2c",
"zh:db9b9b54a0db5ae151376d5a73e0d28497c3e06181840e71ef8349213ac03e50",
"zh:e50ed8aa90b736508fce63680e8339240cecb74709ab9563d34d2c2ce7bc8445",
"zh:f3a279723ff31a095d7bfff21857abfcc9a2cfdeeea8521d179630ae6565d581",
]
}
provider "registry.terraform.io/telmate/proxmox" {
version = "3.0.1-rc1"
constraints = "3.0.1-rc1"
hashes = [
"h1:4xZeGV+uRpYX6Boe0kWI3Dw3B+x8P4tT4JTnUpE1FJU=",
"zh:4c4a5739ed8b0fdec644632de9cc3219a31022b03aaaf6b77d49efe58541d5c1",
"zh:5c97c58a1d15d3b77bade630c70c75f24cf884560625afa78f408f682c09cc05",
"zh:6b3b8a410cdf39a1cd9dffc2e62806ff91e23a77ccc310fd1ea130560a2f6634",
"zh:73fb750e3363cb1eefacd5fc714a93b9cd65f55459981eb27dd7f4ab7ae5aed7",
"zh:7b4bd5db2188cd21df1d7a49cbf893a18aaa915add400242b20f82cba41d3606",
"zh:8427be708a485325bb0cf70ff9470256e388353b80a554772f19103edf208107",
"zh:9bd7ffdcf8e19efcc90bdef55871d9e6c6d8bcaf46d6873d7faa01709154129c",
"zh:9f7dfe0f4c59fb593f936c67901e851fdfa279fa2aa5ae8f5fff29e763487861",
"zh:a61fd2386c116dd4ed1e202c10b4a3378290d29411f47f0460ba7b8d13e14c53",
"zh:cbe1be50efe3608d014c05503d65d8a3a98cec87962a8a0fdd95065b85db6d4f",
"zh:cdb175a0cb863a11090edbd50500b7d55137dbbb0d31fd119d727e12cadc6b4a",
"zh:cee3e0ed0ecaec22e58e5cec4fb202fb4f2a6779a87ef57c464c7b9c825f4c37",
"zh:e5d4e4fc9619bcfaaed159f9404457f29385dcb2f8fc42cf1b2dab4de7bdbf21",
"zh:f39ec72a5e8624949592d48dec11ef6abb811d0ae5a80b6fe356a47392a38a6a",
]
}

View File

@@ -0,0 +1,35 @@
resource "proxmox_vm_qemu" "controlplane" {
for_each = local.clusters
name = "${var.cluster_name}-cp"
target_node = local.proxmox_target_node
iso = local.talos_iso
cores = each.value.controlplane.cpu_cores
sockets = var.controlplane_sockets
cpu = var.controlplane_cpu
qemu_os = var.controlplane_qemu_os
scsihw = var.controlplane_scsihw
memory = each.value.controlplane.memory
agent = 1
disks {
scsi {
scsi0 {
disk {
storage = var.controlplane_disk_storage
size = each.value.controlplane.disk_size
iothread = true
asyncio = "native"
}
}
}
}
network {
bridge = var.controlplane_network_bridge
model = var.controlplane_network_model
firewall = var.controlplane_network_firewall
}
}

View File

@@ -0,0 +1,26 @@
resource "github_repository" "argocd_applications" {
depends_on = [proxmox_vm_qemu.controlplane, proxmox_vm_qemu.worker]
for_each = local.clusters
name = var.cluster_name
description = "This repo is for the ArgoCD Applications."
template {
owner = "infraheads"
repository = "turnk8s_template_repo"
include_all_branches = true
}
}
resource "github_repository_file" "argocd_application" {
for_each = local.clusters
repository = github_repository.argocd_applications[each.key].name
branch = "main"
file = "argocd_applications/infraheads.yaml"
content = templatefile("${path.module}/templates/argocd_application.yaml.tpl",
{
sourceRepoURL = github_repository.argocd_applications[each.key].http_clone_url
}
)
}

View File

@@ -0,0 +1,8 @@
resource "terraform_data" "kubeconfig" {
depends_on = [data.talos_cluster_kubeconfig.cp_ck]
for_each = local.clusters
provisioner "local-exec" {
command = "sh ../../scripts/create_kubeconfig.sh \"${yamlencode(data.talos_cluster_kubeconfig.cp_ck[each.key].kubeconfig_raw)}\" ${var.cluster_name}"
}
}

View File

@@ -0,0 +1,7 @@
locals {
proxmox_api_url = "https://${var.proxmox_ip}:8006/api2/json"
proxmox_target_node = var.proxmox_ip == "192.168.1.5" ? "pve01" : "pve02"
clusters = try({ tostring(var.cluster_name) = yamldecode(file("../../inputs.yaml"))[var.cluster_name] }, {})
talos_iso = "local:iso/metal-amd64-qemu-${var.talos_version}.iso"
}

View File

@@ -0,0 +1,11 @@
provider "proxmox" {
pm_api_url = local.proxmox_api_url
pm_api_token_id = var.proxmox_token_id
pm_api_token_secret = var.proxmox_token_secret
pm_tls_insecure = true
}
provider "github" {
token = var.github_token
owner = "infraheads"
}

View File

@@ -0,0 +1,8 @@
output "cluster_kubeconfig" {
value = data.talos_cluster_kubeconfig.cp_ck
sensitive = true
}
output "github_repo_url" {
value = github_repository.argocd_applications
}

View File

@@ -0,0 +1,107 @@
# Generates machine secrets for Talos cluster
resource "talos_machine_secrets" "talos_secrets" {
talos_version = var.talos_version
}
# Generates client configuration for a Talos cluster (talosconfig)
data "talos_client_configuration" "cp_cc" {
for_each = local.clusters
cluster_name = var.cluster_name
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
nodes = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
endpoints = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
}
# Generates a machine configuration for the control plane (controlplane.yaml)
data "talos_machine_configuration" "cp_mc" {
for_each = local.clusters
cluster_name = data.talos_client_configuration.cp_cc[each.key].cluster_name
machine_type = "controlplane"
cluster_endpoint = "https://${proxmox_vm_qemu.controlplane[each.key].default_ipv4_address}:6443"
machine_secrets = talos_machine_secrets.talos_secrets.machine_secrets
kubernetes_version = var.k8s_version
talos_version = var.talos_version
config_patches = [
templatefile("${path.module}/templates/controlplane.yaml.tpl",
{
talos-version = var.talos_version,
kubernetes-version = var.k8s_version,
registry = var.image_registry
}
)
]
}
# Applies machine configuration to the control plane
resource "talos_machine_configuration_apply" "cp_mca" {
# depends_on = [data.talos_machine_configuration.cp_mc]
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.cp_mc[each.key].machine_configuration
node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address
}
# Bootstraps the etcd cluster on the control plane
resource "talos_machine_bootstrap" "cp_mb" {
depends_on = [talos_machine_configuration_apply.cp_mca]
for_each = local.clusters
node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
}
# Retrieves the kubeconfig for a Talos cluster
data "talos_cluster_kubeconfig" "cp_ck" {
depends_on = [talos_machine_bootstrap.cp_mb]
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address
}
# Generates a machine configuration for the worker (worker.yaml)
data "talos_machine_configuration" "worker_mc" {
for_each = local.clusters
cluster_name = data.talos_client_configuration.cp_cc[each.key].cluster_name
machine_type = "worker"
cluster_endpoint = data.talos_machine_configuration.cp_mc[each.key].cluster_endpoint
machine_secrets = talos_machine_secrets.talos_secrets.machine_secrets
kubernetes_version = var.k8s_version
talos_version = var.talos_version
config_patches = [
templatefile("${path.module}/templates/worker.yaml.tpl",
{
talos-version = var.talos_version,
kubernetes-version = var.k8s_version,
registry = var.image_registry
}
)
]
}
# Applies machine configuration to the worker node
resource "talos_machine_configuration_apply" "worker_mca" {
# count = local.input_vars.worker_node.count
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
machine_configuration_input = data.talos_machine_configuration.worker_mc[each.key].machine_configuration
node = proxmox_vm_qemu.worker[each.key].default_ipv4_address
}
data "talos_cluster_health" "cluster_health" {
depends_on = [data.talos_cluster_kubeconfig.cp_ck]
for_each = local.clusters
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
control_plane_nodes = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
worker_nodes = [proxmox_vm_qemu.worker[each.key].default_ipv4_address]
endpoints = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address]
timeouts = {
read = "1h"
}
}

View File

@@ -0,0 +1,22 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: infraheads
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default # each application belongs to a single project. if unspecified, an application belongs to the "default" projects
source:
repoURL: ${sourceRepoURL}
targetRevision: main
path: kubernetes
destination:
server: https://kubernetes.default.svc # endpoint of Kubernetes API Server
namespace: default
syncPolicy:
automated:
selfHeal: true # by default, changes made to the live cluster will not trigger automated sync (override manual changes on cluster)
prune: true # by default, automatic sync will not delete resources

View File

@@ -0,0 +1,19 @@
machine:
kubelet:
image: ghcr.io/siderolabs/kubelet:${kubernetes-version}
install:
image: ghcr.io/siderolabs/installer-qemu:${talos-version}
registries:
mirrors:
'*':
endpoints:
- http://${registry}
cluster:
apiServer:
image: registry.k8s.io/kube-apiserver:${kubernetes-version}
controllerManager:
image: registry.k8s.io/kube-controller-manager:${kubernetes-version}
proxy:
image: registry.k8s.io/kube-proxy:${kubernetes-version}
scheduler:
image: registry.k8s.io/kube-scheduler:${kubernetes-version}

View File

@@ -0,0 +1,10 @@
machine:
kubelet:
image: ghcr.io/siderolabs/kubelet:${kubernetes-version}
install:
image: ghcr.io/siderolabs/installer-qemu:${talos-version}
registries:
mirrors:
'*':
endpoints:
- http://${registry}

View File

@@ -0,0 +1,26 @@
terraform {
required_version = ">= 1.7"
backend "remote" {
hostname = "app.terraform.io"
organization = "infraheads"
workspaces {
prefix = "turnk8s-"
}
}
required_providers {
proxmox = {
source = "telmate/proxmox"
version = "3.0.1-rc1"
}
talos = {
source = "siderolabs/talos"
version = "0.5.0"
}
github = {
source = "integrations/github"
version = "6.2.1"
}
}
}

View File

@@ -0,0 +1,185 @@
variable "proxmox_ip" {
description = "IP of the Proxmox server"
type = string
default = "192.168.1.5"
}
variable "proxmox_token_id" {
description = "This is an API token you have previously created for a specific user."
type = string
sensitive = true
}
variable "proxmox_token_secret" {
description = "This uuid is only available when the token was initially created."
type = string
sensitive = true
}
variable "talos_version" {
description = "Talos version to be used"
type = string
default = "v1.7.1"
}
variable "k8s_version" {
description = "K8s version to be used"
type = string
default = "v1.30.0"
}
variable "image_registry" {
description = "The registry from which images should be downloaded for cluster"
type = string
default = "192.168.2.4:6000"
}
variable "github_token" {
description = "Git repository token"
type = string
}
variable "cluster_name" {
description = "The name of the cluster."
type = string
default = "turnk8s-cluster"
}
variable "controlplane_cores" {
description = "The number of CPU cores per CPU socket to allocate to the VM."
type = number
default = 2
}
variable "controlplane_sockets" {
description = "The number of CPU sockets to allocate to the VM."
type = number
default = 1
}
variable "controlplane_cpu" {
description = "The type of CPU to emulate in the Guest."
type = string
default = "x86-64-v2-AES"
}
variable "controlplane_qemu_os" {
description = "The type of OS in the guest."
type = string
default = "l26"
}
variable "controlplane_scsihw" {
description = "The SCSI controller to emulate."
type = string
default = "virtio-scsi-single"
}
variable "controlplane_memory" {
description = "The amount of memory to allocate to the VM in Megabytes."
type = number
default = 4096
}
variable "controlplane_network_bridge" {
description = "Bridge to which the network device should be attached."
type = string
default = "vmbr1"
}
variable "controlplane_network_model" {
description = "Network Card Model"
type = string
default = "virtio"
}
variable "controlplane_network_firewall" {
description = "Whether to enable the Proxmox firewall on this network device."
type = bool
default = false
}
variable "controlplane_disk_storage" {
description = "The name of the storage pool on which to store the disk."
type = string
default = "local-lvm"
}
variable "controlplane_disk_size" {
description = "The size of the created disk in Gigabytes."
type = number
default = 32
}
# Node variables
variable "worker_nodes_count" {
description = "Count of the Worker Nodes."
type = number
default = 1
}
variable "worker_cores" {
description = "The number of CPU cores per CPU socket to allocate to the VM."
type = number
default = 2
}
variable "worker_sockets" {
description = "The number of CPU sockets to allocate to the VM."
type = number
default = 1
}
variable "worker_cpu" {
description = "The type of CPU to emulate in the Guest."
type = string
default = "x86-64-v2-AES"
}
variable "worker_qemu_os" {
description = "The type of OS in the guest."
type = string
default = "l26"
}
variable "worker_scsihw" {
description = "The SCSI controller to emulate."
type = string
default = "virtio-scsi-single"
}
variable "worker_memory" {
description = "The amount of memory to allocate to the VM in Megabytes."
type = number
default = 4096
}
variable "worker_network_bridge" {
description = "Bridge to which the network device should be attached."
type = string
default = "vmbr1"
}
variable "worker_network_model" {
description = "Network Card Model"
type = string
default = "virtio"
}
variable "worker_network_firewall" {
description = "Whether to enable the Proxmox firewall on this network device."
type = bool
default = false
}
variable "worker_disk_storage" {
description = "The name of the storage pool on which to store the disk."
type = string
default = "local-lvm"
}
variable "worker_disk_size" {
description = "The size of the created disk in Gigabytes."
type = number
default = 32
}

View File

@@ -0,0 +1,35 @@
resource "proxmox_vm_qemu" "worker" {
for_each = local.clusters
name = "${var.cluster_name}-worker-index"
target_node = local.proxmox_target_node
iso = local.talos_iso
cores = each.value.worker_node.cpu_cores
sockets = var.worker_sockets
cpu = var.worker_cpu
qemu_os = var.worker_qemu_os
scsihw = var.worker_scsihw
memory = each.value.worker_node.memory
agent = 1
disks {
scsi {
scsi0 {
disk {
storage = var.worker_disk_storage
size = each.value.worker_node.disc_size
iothread = true
asyncio = "native"
}
}
}
}
network {
bridge = var.worker_network_bridge
model = var.worker_network_model
firewall = var.worker_network_firewall
}
}