Sepseparated workflow for main (#18)

Separated workflow for main and other branches

1. From now on, the workflow defined in "pr_cluster_workflow.yaml" will be triggered on config.yaml changes. This can be used for PR testing.
2. Draft PRs are created when the source branch has logical modifications. Normal PRs are used for cluster management.
3.In the case of Draft PRs, which are for logical modifications, if a cluster is required, then the name of the cluster should adhere to the following convention "turnk8s-<PR_NUMBER>".
4. In the case of normal PRs, which are for cluster management, no cluster should be present in the config.yaml file matching the following cluster name regex turnk8s-.*
This commit is contained in:
Armen Hakobian
2024-07-16 18:12:43 +04:00
committed by GitHub
parent 36c3360758
commit 8ecd7d3867
14 changed files with 398 additions and 257 deletions

View File

@@ -16,23 +16,11 @@ inputs:
runs:
using: "composite"
steps:
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
- name: Initialize Terraform
uses: ./.github/actions/initialize_terraform_composite_action
with:
cli_config_credentials_token: ${{ inputs.tf_api_token }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
shell: bash
- name: Initializing Terraform
run: |
cd ${{ inputs.working_directory }}
terraform init
shell: bash
env:
TF_WORKSPACE: "default-ws"
working_directory: ${{ inputs.working_directory }}
tf_api_token: ${{ inputs.tf_api_token }}
- name: Selecting Terraform Workspace
id: workspace
@@ -59,16 +47,5 @@ runs:
id: apply
run: |
cd ${{ inputs.working_directory }}
set +e
terraform apply ${{ inputs.plan_output }}
tf_exitcode=$?
set -e
if [ $tf_exitcode -ne 0 ];
then
echo """
Error: Unable to create a Cluster.
Message: This could be caused by a lack of resources in the server. Please review the server resources and try again.
"""
exit 1
fi
shell: bash

View File

@@ -0,0 +1,29 @@
name: "Initialize Terraform Composite Action"
description: "A composite action to initialize terraform"
inputs:
working_directory:
description: "Terraform Working Directory"
required: true
tf_api_token:
description: "Terraform API Token"
required: true
runs:
using: "composite"
steps:
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
with:
cli_config_credentials_token: ${{ inputs.tf_api_token }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
shell: bash
- name: Initializing Terraform
run: |
cd ${{ inputs.working_directory }}
terraform init
shell: bash
env:
TF_WORKSPACE: "default-ws"

View File

@@ -1,150 +0,0 @@
name: 'Apply Terraform'
on:
push:
branches:
- 'main'
paths:
- 'config.yaml'
env:
TF_CLOUD_ORGANIZATION: "infraheads"
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
TF_VAR_proxmox_token_id: "${{ secrets.PROXMOX_TOKEN_ID }}"
TF_VAR_proxmox_token_secret: "${{ secrets.PROXMOX_TOKEN_SECRET }}"
TF_VAR_github_token: "${{ secrets.TOKEN_GITHUB }}"
TF_VAR_netris_controller_host: "${{ vars.NETRIS_CONTROLLER_HOST }}"
TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}"
TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}"
TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}"
jobs:
setup_terraform:
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform/infrastructure"
outputs:
clusters: ${{ steps.output_variables.outputs.clusters }}
removable_workspaces: ${{ steps.output_variables.outputs.removable_workspaces }}
steps:
- uses: actions/checkout@v4
- name: Setup Terraform Environment
uses: hashicorp/setup-terraform@v3
with:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: |
echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
- name: Initializing Terraform
id: init
run: terraform init -upgrade
env:
TF_WORKSPACE: "default-ws"
- name: Extract values and set output variables
id: output_variables
run: |
clusters=$(yq e 'keys | .[]' ${GITHUB_WORKSPACE}/config.yaml | jq -R -s -c 'split("\n")[:-1]')
workspaces=$(terraform workspace list | grep '\(-infrastructure\|-cluster\)$' | sed 's/-infrastructure$//;s/-cluster$//' | uniq -d | grep -v '^$' | jq -R -s -c 'split("\n")[:-1] | map(ltrimstr(" "))')
echo "removable_workspaces=$(jq -n -c $workspaces-$clusters)" >> $GITHUB_OUTPUT
echo "clusters=$clusters" >> $GITHUB_OUTPUT
destroy_cluster:
needs: setup_terraform
if: ${{ needs.setup_terraform.outputs.removable_workspaces != '[]' }}
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform/infrastructure"
strategy:
matrix:
destroyable_resources: ${{ fromJSON(needs.setup_terraform.outputs.removable_workspaces) }}
steps:
- uses: actions/checkout@v4
- name: Setup Terraform
uses: hashicorp/setup-terraform@v3
with:
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
terraform_version: 1.7.5
- name: Configure Terraform Cache
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
- name: Initializing Terraform
run: terraform init
env:
TF_WORKSPACE: "default-ws"
TF_VAR_cluster_name: ${{ matrix.destroyable_resources }}
- name: Selecting Terraform Workspace
run: terraform workspace select ${{ matrix.destroyable_resources }}-infrastructure
- name: Destroying Terraform
run: |
terraform destroy -auto-approve
- name: Destroying Terraform Cloud Workspace
run: |
terraform workspace select default-ws
terraform workspace delete -force ${{ matrix.destroyable_resources }}-infrastructure
terraform workspace delete -force ${{ matrix.destroyable_resources }}-cluster
apply_cluster:
needs: setup_terraform
if: ${{ needs.setup_terraform.outputs.clusters != '[]' }}
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform"
strategy:
matrix:
cluster: ${{ fromJSON(needs.setup_terraform.outputs.clusters) }}
steps:
- name: Infrastructure updates
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ matrix.cluster }}-infrastructure
plan_output: tfplan-${{ matrix.cluster }}-infrastructure
working_directory: "terraform/infrastructure"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
- name: Cluster updates
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ matrix.cluster }}-cluster
plan_output: tfplan-${{ matrix.cluster }}-cluster
working_directory: "terraform/cluster"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
- name: Generating kube-config
uses: actions/upload-artifact@v4
with:
name: kube-config-${{ matrix.cluster }}
path: /opt/kubeconfig/${{ matrix.cluster }}
compression-level: 0
- name: Generateing Markdown
run: |
echo "### turnk8s" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Push your Kubernetes service manifests to [GitHub URL](https://github.com/infraheads/${{ matrix.cluster }}) to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY
echo "Use `kubeconfig` file(s) to connect to the cluster, see the link above :point_up:" >> $GITHUB_STEP_SUMMARY

151
.github/workflows/main_workflow.yml vendored Normal file
View File

@@ -0,0 +1,151 @@
# This workflow is designed for creating clusters using "config.yaml" file to implement GitOps solution with the help of "turnk8s".
# It is started to run when PRs are merged into the 'main' branch.
# The workflow contains 3 jobs:
# 1) setup_terraform: for setup terraform and checking available changes(create, update and delete cluster).
# 2) destroy_cluster: it destroys terraform if existing cluster information is removed in config.yaml file.
# 3) apply_cluster: this job is for creating or updating clusters based on config.yaml file
name: 'Automated Terraform Cluster Setup and Cleanup'
on:
push:
branches:
- 'main'
paths:
- 'config.yaml'
env:
TF_CLOUD_ORGANIZATION: "infraheads"
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
TF_VAR_proxmox_token_id: "${{ secrets.PROXMOX_TOKEN_ID }}"
TF_VAR_proxmox_token_secret: "${{ secrets.PROXMOX_TOKEN_SECRET }}"
TF_VAR_github_token: "${{ secrets.TOKEN_GITHUB }}"
TF_VAR_netris_controller_host: "${{ vars.NETRIS_CONTROLLER_HOST }}"
TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}"
TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}"
TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}"
jobs:
setup_terraform:
runs-on: self-hosted
container:
image: ${{ vars.RUNNER_IMAGE }}
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform"
outputs:
desired_clusters: ${{ steps.filter_clusters.outputs.desired_clusters }}
removable_clusters: ${{ steps.filter_clusters.outputs.removable_clusters }}
steps:
- uses: actions/checkout@v4
# Using composite actions for initializing Terraform
- name: Initialize Terraform
uses: ./.github/actions/initialize_terraform_composite_action
with:
working_directory: "terraform/infrastructure"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
- name: Filter existing, desired and removable clusters
id: filter_clusters
run: |
# Get clusters from config.yaml file
desired_clusters=$(yq e 'keys | .[]' ${GITHUB_WORKSPACE}/config.yaml | jq -R -s -c 'split("\n")[:-1]')
# Get existing clusters
existing_clusters=$(terraform workspace list | grep '\(-infrastructure\|-cluster\)$' | sed 's/-infrastructure$//;s/-cluster$//' | uniq | grep -v '^$' | jq -R -s -c 'split("\n")[:-1] | map(ltrimstr(" "))')
# Filter all cluster must be removed
echo "removable_clusters=$(jq -n -c $existing_clusters-$desired_clusters)" >> $GITHUB_OUTPUT
echo "desired_clusters=$desired_clusters" >> $GITHUB_OUTPUT
destroy_cluster:
needs: setup_terraform
if: ${{ needs.setup_terraform.outputs.removable_clusters != '[]' }}
runs-on: self-hosted
container:
image: ${{ vars.RUNNER_IMAGE }}
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform/infrastructure"
strategy:
matrix:
cluster: ${{ fromJSON(needs.setup_terraform.outputs.removable_clusters) }}
steps:
# Using composite actions for initializing Terraform
- name: Initialize Terraform
uses: ./.github/actions/initialize_terraform_composite_action
with:
working_directory: "terraform/infrastructure"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
- name: Selecting Terraform Workspace
run: terraform workspace select ${{ matrix.cluster }}-infrastructure
- name: Destroying Terraform
run: |
terraform destroy -auto-approve
- name: Destroying Terraform Cloud Workspace
run: |
terraform workspace select default-ws
terraform workspace delete -force ${{ matrix.cluster }}-infrastructure
terraform workspace delete -force ${{ matrix.cluster }}-cluster
apply_cluster:
needs: setup_terraform
if: ${{ needs.setup_terraform.outputs.desired_clusters != '[]' }}
runs-on: self-hosted
container:
image: ${{ vars.RUNNER_IMAGE }}
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform"
strategy:
matrix:
cluster: ${{ fromJSON(needs.setup_terraform.outputs.desired_clusters) }}
steps:
# Using composite actions for applying cluster's infrastructure changes
- name: Infrastructure updates
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ matrix.cluster }}-infrastructure
plan_output: tfplan-${{ matrix.cluster }}-infrastructure
working_directory: "terraform/infrastructure"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
# Using composite actions for applying cluster's applications and tools changes
- name: Cluster updates
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ matrix.cluster }}-cluster
plan_output: tfplan-${{ matrix.cluster }}-cluster
working_directory: "terraform/cluster"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
env:
TF_VAR_cluster_name: ${{ matrix.cluster }}
- name: Generating kube-config
uses: actions/upload-artifact@v4
with:
name: kube-config-${{ matrix.cluster }}
path: /opt/kubeconfig/${{ matrix.cluster }}
compression-level: 0
- name: Generating Markdown
run: |
echo "### turnk8s" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Push your Kubernetes service manifests to [GitHub URL](https://github.com/infraheads/${{ matrix.cluster }}) to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY
echo "Use the 'kubeconfig' file(s) to connect to the cluster, which is(are) attached in 'Artifacts' section." >> $GITHUB_STEP_SUMMARY

View File

@@ -0,0 +1,114 @@
# This workflow is designed for creating clusters using "config.yaml" file to implement GitOps solution with the help of "turnk8s", which includes 2 jobs.
# It is possible to create or delete clusters based on the creating PRs, by updating the "config.yaml" file. In simple words, it describes the existing clusters on the server.
# The "config.yaml" file can be updated in the following ways:
# 1) For testing new features, the config.yaml file should only contain one cluster description(referred to as "test-cluster"), which must be in the "turnk8s-<PR_NUMBER>" format.This can be done by creating a draft PR.
# 2) For modifications(creating, updating, or deleting clusters), simply create a normal PR and describe the cluster(s) within the "config.yaml" file. The clusters must not start with the "turnk8s-" prefix.
# *Note: Modifications take effect upon merging normal PRs into the main branch.
# 3) All clusters are destroyed if the "config.yaml" file is empty.
# The above-described cases are checked in the first job, called "checking_yaml_correctness".
# The second job, "apply_cluster" starts only if the PR is a draft. As a result of the workflow, the cluster's kube-config file will be found attached in the "Artifacts" section.
# *Note: The "test-cluster" is destroyed after merging the draft PR into the main branch.
name: 'Automated Cluster Deployment for Pull Requests'
on:
pull_request:
branches:
- '*'
paths:
- 'config.yaml'
env:
TF_CLOUD_ORGANIZATION: "infraheads"
TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}"
TF_VAR_proxmox_token_id: "${{ secrets.PROXMOX_TOKEN_ID }}"
TF_VAR_proxmox_token_secret: "${{ secrets.PROXMOX_TOKEN_SECRET }}"
TF_VAR_github_token: "${{ secrets.TOKEN_GITHUB }}"
TF_VAR_netris_controller_host: "${{ vars.NETRIS_CONTROLLER_HOST }}"
TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}"
TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}"
TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}"
TF_VAR_cluster_name: "turnk8s-${{ github.event.number }}"
jobs:
checking_yaml_correctness:
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
container:
image: ${{ vars.RUNNER_IMAGE }}
steps:
- uses: actions/checkout@v4
# * In case of empty config.yaml destroys all clusters.
# * In case of "turnk8s" logic changes, we conventionally create a draft PR and rely on the type of PR in the pipeline's logic which must be in the "turnk8s-<PR_NUMBER>" format.
# * For cluster creation we use normal PRs. In such PRs, the added cluster name should not have "turnk8s-" prefix.
- name: Ensure validity of the config.yaml file
shell: bash
run: |
set -e
bash "${GITHUB_WORKSPACE}/scripts/validate_config_file.sh" "${GITHUB_WORKSPACE}/config.yaml"
clusters=$(yq e 'keys | .[]' ${GITHUB_WORKSPACE}/config.yaml | jq -R -s -c 'split("\n")[:-1]')
if [[ ${{ github.event.pull_request.draft }} == true ]] && [[ $(echo "$clusters" | jq -r '. | length') == 1 ]] && [[ $(echo "$clusters" | jq -r '.[0]') != "${{ env.TF_VAR_cluster_name }}" ]];
then
echo """
For draft PRs, the cluster name must be in the format \"turnk8s-<PR_NUMBER>\",
as it is assumed to be a test cluster for modifying logic and testing validations and features.
"""
exit 1
elif [[ ${{ github.event.pull_request.draft }} == false ]] && [[ $(echo "$clusters" | jq -r '. | map(startswith("turnk8s-")) | any') == true ]];
then
echo """
For non-draft PRs, the cluster name must not be started with "turnk8s-" prefix,
as these clusters are not for testing and should follow a different naming convention.
"""
exit 1
fi
apply_cluster:
needs: checking_yaml_correctness
if: ${{ github.event.pull_request.draft == true }}
runs-on: self-hosted
permissions:
contents: read
pull-requests: write
defaults:
run:
working-directory: "terraform"
container:
image: ${{ vars.RUNNER_IMAGE }}
steps:
- uses: actions/checkout@v4
# Using composite actions for applying cluster's infrastructure changes
- name: Infrastructure updates
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ env.TF_VAR_cluster_name }}-infrastructure
plan_output: tfplan-${{ env.TF_VAR_cluster_name }}-infrastructure
working_directory: "terraform/infrastructure"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
# Using composite actions for applying cluster's applications and tools changes
- name: Cluster updates
uses: ./.github/actions/apply_terraform_composite_action
with:
workspace: ${{ env.TF_VAR_cluster_name }}-cluster
plan_output: tfplan-${{ env.TF_VAR_cluster_name }}-cluster
working_directory: "terraform/cluster"
tf_api_token: ${{ secrets.TF_API_TOKEN }}
- name: Generating kube-config
uses: actions/upload-artifact@v4
with:
name: kube-config-${{ env.TF_VAR_cluster_name }}
path: /opt/kubeconfig/${{ env.TF_VAR_cluster_name }}
compression-level: 0
- name: Generating Markdown
run: |
echo "### turnk8s" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Push your Kubernetes service manifests to [GitHub URL](https://github.com/infraheads/${{ env.TF_VAR_cluster_name }}) to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY
echo "Use the 'kubeconfig' file to connect to the cluster, which is attached in 'Artifacts' section." >> $GITHUB_STEP_SUMMARY

View File

@@ -32,10 +32,10 @@ turnk8s-cluster:
cpu_cores: 2
memory: 4096
disk_size: 20
worker_node:
worker_nodes:
cpu_cores: 2
memory: 4096
disc_size: 20
disk_size: 20
```
</details>
@@ -46,14 +46,14 @@ Please note that you need Proxmox hosts deployed and available for your GitHub r
Configuration parameters:
* **the main key is the cluster name:(Required)** - A unique cluster name
* **versions.talos:(Required)** - Talos Linux version: Supported versions are v1.7.1, v1.7.1, v1.6.7
* **versions.talos:(Required)** - Talos Linux version: Supported versions are v1.7.1, v1.7.0, v1.6.7
* **versions.k8s:(Required)** - Kubernetes version: Supported versions are v1.30.0, v1.29.3
* **controlplane.cpu_cores:(Required)** - controlplane node cores :(min 2)
* **controlplane.memory:(Required)** - controlpalne node RAM (min 2048)
* **controlplane.disk_size:(Required)** - controlplane node disk size:(min 10)
* **worker_node.cpu_cores:(Required)** - worker node cores:(min 1)
* **worker_node.memory:(Required)** - worker node RAM:(min 2048)
* **worker_node.disc_size:(Required)** - worker node disk size:(min 10)
* **worker_nodes.cpu_cores:(Required)** - worker node cores:(min 1)
* **worker_nodes.memory:(Required)** - worker node RAM:(min 2048)
* **worker_nodes.disk_size:(Required)** - worker node disk size:(min 10)
Pushing config.yaml triggers a GitHub actions workflow. The Kubernetes configuration files and the Kubernetes services repo URL are shown on the summary page when the workflow is complete.

View File

@@ -1,4 +1,4 @@
internal:
turnk8s-18:
controlplane:
cpu_cores: 2
memory: 4096
@@ -7,4 +7,4 @@ internal:
count: 1
cpu_cores: 2
memory: 2048
disc_size: 10
disk_size: 20

80
scripts/validate_config_file.sh Executable file
View File

@@ -0,0 +1,80 @@
#!/bin/bash
# Exit immediately if a command exits with a non-zero status
set -e
# Path to the YAML file
FILE=$1
# Function to validate cluster
validate_cluster() {
local cluster=$1
controlplane_cpu=$(yq e ".$cluster.controlplane.cpu_cores" "$FILE")
controlplane_memory=$(yq e ".$cluster.controlplane.memory" "$FILE")
controlplane_disk=$(yq e ".$cluster.controlplane.disk_size" "$FILE")
worker_node_count=$(yq e ".$cluster.worker_nodes.count" "$FILE")
worker_node_cpu=$(yq e ".$cluster.worker_nodes.cpu_cores" "$FILE")
worker_node_memory=$(yq e ".$cluster.worker_nodes.memory" "$FILE")
worker_node_disk=$(yq e ".$cluster.worker_nodes.disk_size" "$FILE")
# Validate CPU cores of the Control Plane
if ! [[ "$controlplane_cpu" =~ ^(2|4|6|8)$ ]];
then
echo "Control Plane CPU cores must be one of the following values 2, 4, 6 or 8."
exit 1
fi
# Validate RAM Memory of the Control Plane
if ! [[ "$controlplane_memory" =~ ^(4096|6144|8192)$ ]];
then
echo "Control Plane Memory must be one of the following values 4096, 6144 or 8192."
exit 1
fi
# Validate Disk size of the Control Plane
if ! [[ "$controlplane_disk" =~ ^(10|20|40|60)$ ]];
then
echo "Control Plane Disk size must be one of the following values 10, 20, 40 or 60."
exit 1
fi
# Validate the Worker Nodes count
if ! [[ "$worker_node_count" =~ ^[1-5]$ ]];
then
echo "Worker Node count must be from 1 to 5 range."
exit 1
fi
# Validate CPU cores of the Worker Node
if ! [[ "$worker_node_cpu" =~ ^(2|4|6|8)$ ]];
then
echo "Worker Node CPU cores must be one of the following values 2, 4, 6 or 8."
exit 1
fi
# Validate RAM Memory of the Worker Node
if ! [[ "$worker_node_memory" =~ ^(2048|4096|6144)$ ]];
then
echo "Worker Node Memory must be one of the following values 2048, 4096 or 6144."
exit 1
fi
# Validate Disk size of the Worker Node
if ! [[ "$worker_node_disk" =~ ^(10|20|40|60)$ ]];
then
echo "Worker Node Disk size must be one of the following values 10, 20, 40 or 60."
exit 1
fi
}
# Checks if the YAML file is empty
if [[ $(yq e '.' "$FILE") ]];
then
# Extract all clusters
clusters=$(yq e 'keys | .[]' "$FILE")
# Validate each cluster
for cluster in $clusters; do
validate_cluster "$cluster"
done
echo "The $FILE is valid."
fi

View File

@@ -20,69 +20,3 @@ provider "registry.terraform.io/hashicorp/helm" {
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/integrations/github" {
version = "6.2.1"
constraints = "6.2.1"
hashes = [
"h1:ip7024qn1ewDqlNucxh07DHvuhSLZSqtTGewxNLeYYU=",
"zh:172aa5141c525174f38504a0d2e69d0d16c0a0b941191b7170fe6ae4d7282e30",
"zh:1a098b731fa658c808b591d030cc17cc7dfca1bf001c3c32e596f8c1bf980e9f",
"zh:245d6a1c7e632d8ae4bdd2da2516610c50051e81505cf420a140aa5fa076ea90",
"zh:43c61c230fb4ed26ff1b04b857778e65be3d8f80292759abbe2a9eb3c95f6d97",
"zh:59bb7dd509004921e4322a196be476a2f70471b462802f09d03d6ce96f959860",
"zh:5cb2ab8035d015c0732107c109210243650b6eb115e872091b0f7b98c2763777",
"zh:69d2a6acfcd686f7e859673d1c8a07fc1fc1598a881493f19d0401eb74c0f325",
"zh:77f36d3f46911ace5c50dee892076fddfd64a289999a5099f8d524c0143456d1",
"zh:87df41097dfcde72a1fbe89caca882af257a4763c2e1af669c74dcb8530f9932",
"zh:899dbe621f32d58cb7c6674073a6db8328a9db66eecfb0cc3fc13299fd4e62e7",
"zh:ad2eb7987f02f7dd002076f65a685730705d04435313b5cf44d3a6923629fb29",
"zh:b2145ae7134dba893c7f74ad7dfdc65fdddf6c7b1d0ce7e2f3baa96212322fd8",
"zh:bd6bae3ac5c3f96ad9219d3404aa006ef1480e9041d4c95df1808737e37d911b",
"zh:e89758b20ae59f1b9a6d32c107b17846ddca9634b868cf8f5c927cbb894b1b1f",
]
}
provider "registry.terraform.io/siderolabs/talos" {
version = "0.5.0"
constraints = "0.5.0"
hashes = [
"h1:xogkLLCrJJmd278E+vNMnmQgaMD05Gd1QXN914xgVec=",
"zh:0f71f2624576224c9bc924b136b601b734243efa7a7ad8280dfd8bd583e4afa5",
"zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d",
"zh:33c50dacc5029fa20caed702001fb1439899c94f203b1f37dccb970f504bca45",
"zh:3c97a6e2692b88d3f4631a3f8769146f602c210e881b46fa1b3b82c545e51cd1",
"zh:44077a137613bcfe29eef00315b5aa50d83390c3c727580a4ff0f4b87f22d228",
"zh:5bd02f278aec5567f94dd057d1c758363998ce581ff17b0869515bb682c02186",
"zh:80f40939bc3b55f0005c03b77122ceea86ec4deb82f5557950a97ad96fbb1557",
"zh:94c1b17f25bc30eacde926e46f196f1f135032674730d9f50c986ef6b7a854f0",
"zh:95ad665b2fdeed38180f5c471164833a34d07c1ef0470c1652565fe8cf4e9c4a",
"zh:a50ef6088afcb129c176dd4ba86c345e9be7b14358bb3b21c34f06930d8f39ef",
"zh:aa71da1da00ed66f1dddf1b69c10b829f24ac89e207de07d32c455dd04482096",
"zh:abb7eeb2b089081b4814ed80a295673e1a92f82ce092dde37b5bc92e75efec2c",
"zh:db9b9b54a0db5ae151376d5a73e0d28497c3e06181840e71ef8349213ac03e50",
"zh:e50ed8aa90b736508fce63680e8339240cecb74709ab9563d34d2c2ce7bc8445",
"zh:f3a279723ff31a095d7bfff21857abfcc9a2cfdeeea8521d179630ae6565d581",
]
}
provider "registry.terraform.io/telmate/proxmox" {
version = "2.9.14"
hashes = [
"h1:H/f+LbVyPOLslHLAYnGuMMRqWFZ65K6E3V+MCYgfAyk=",
"zh:0d049d33f705e5b814d30028770c084151218439424e99684ce31d7e26a720b5",
"zh:20b1c64ed56d81de95f3f37b82b45b4654c0de26670c0e87a474c5cce13cd015",
"zh:2946058abd1d8e50e475b9ec39781eb02576b40dbd80f4653fade4493a4514c6",
"zh:29e50a25c456f040ce072f23ac57b5b82ebd3b916ca5ae6688332b5ec62adc4a",
"zh:3612932306ce5f08db94868f526cbb8c56d0d3c6ebe1c11a83f92bbf94354296",
"zh:42d1699b0abebaac82ea5a19f4393541d8bb2741bde204a8ac1028cdc29d1b14",
"zh:5ffd5dc567262eb8aafdf2f6eac63f7f21361da9c5d75a3c36b479638a0001b0",
"zh:6692ef323e3b89de99934ad731f6a1850525bf8142916ae28ea4e4048d73a787",
"zh:a5afc98e9a4038516bb58e788cb77dea67a60dce780dfcd206d7373c5a56b776",
"zh:bf902cded709d84fa27fbf91b589c241f2238a6c4924e4e479eebd74320b93a5",
"zh:cab0e1e72c9cebcf669fc6f35ec28cb8ab2dffb0237afc8860aa40d23bf8a49f",
"zh:e523b99a48beec83d9bc04b2d336266044f9f53514cefb652fe6768611847196",
"zh:f593915e8a24829d322d2eaeedcb153328cf9042f0d84f66040dde1be70ede04",
"zh:fba1aff541133e2129dfda0160369635ab48503d5c44b8407ce5922ecc15d0bd",
]
}

View File

@@ -13,7 +13,7 @@ data "terraform_remote_state" "infrastructure" {
config = {
organization = "infraheads"
workspaces = {
name = "turnk8s-${var.cluster_name}-infrastructure"
name = "turnk8s-${ startswith(var.cluster_name, "turnk8s-") ? substr(var.cluster_name, 8, -1) : var.cluster_name }-infrastructure"
}
}
}

View File

@@ -1,5 +1,5 @@
variable "cluster_name" {
description = "The name of the cluster."
description = "The cluster name exists in config file."
type = string
default = "turnk8s-cluster"
}

View File

@@ -2,7 +2,7 @@ locals {
proxmox_api_url = "https://${var.proxmox_ip}:8006/api2/json"
proxmox_target_node = var.proxmox_ip == "192.168.1.5" ? "pve01" : "pve02"
clusters = try({ tostring(var.cluster_name) = yamldecode(file("../../config.yaml"))[var.cluster_name] }, {})
clusters = try({ tostring(var.cluster_name) = yamldecode(file(var.config_file_path))[var.cluster_name] }, {})
talos_iso = "local:iso/metal-amd64-qemu-${var.talos_version}.iso"
worker = flatten([
@@ -10,7 +10,7 @@ locals {
for i in range(cluster.worker_nodes.count):
{
cpu_cores = cluster.worker_nodes.cpu_cores
disc_size = cluster.worker_nodes.disc_size
disk_size = cluster.worker_nodes.disk_size
memory = cluster.worker_nodes.memory
}
]

View File

@@ -40,11 +40,17 @@ variable "github_token" {
}
variable "cluster_name" {
description = "The name of the cluster."
description = "The cluster name exists in config file."
type = string
default = "turnk8s-cluster"
}
variable "config_file_path" {
description = "The config.yaml file, where clusters are described."
type = string
default = "../../config.yaml"
}
variable "controlplane_cores" {
description = "The number of CPU cores per CPU socket to allocate to the VM."
type = number

View File

@@ -19,7 +19,7 @@ resource "proxmox_vm_qemu" "worker" {
scsi0 {
disk {
storage = var.worker_disk_storage
size = each.value.disc_size
size = each.value.disk_size
iothread = true
asyncio = "native"
}