mirror of
https://github.com/optim-enterprises-bv/turnk8s.git
synced 2025-10-29 09:23:03 +00:00
@@ -1,51 +0,0 @@
|
||||
name: "Apply Terraform Composite Action"
|
||||
description: "A composite action to apply terraform"
|
||||
inputs:
|
||||
workspace:
|
||||
description: "Terraform Workspace"
|
||||
required: true
|
||||
plan_output:
|
||||
description: "Terraform Plan Output FileName"
|
||||
required: true
|
||||
working_directory:
|
||||
description: "Terraform Working Directory"
|
||||
required: true
|
||||
tf_api_token:
|
||||
description: "Terraform API Token"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Initialize Terraform
|
||||
uses: ./.github/actions/initialize_terraform_composite_action
|
||||
with:
|
||||
working_directory: ${{ inputs.working_directory }}
|
||||
tf_api_token: ${{ inputs.tf_api_token }}
|
||||
|
||||
- name: Selecting Terraform Workspace
|
||||
id: workspace
|
||||
run: |
|
||||
cd ${{ inputs.working_directory }}
|
||||
terraform workspace select -or-create ${{ inputs.workspace }}
|
||||
shell: bash
|
||||
|
||||
- name: Validating Terraform
|
||||
id: validate
|
||||
run: |
|
||||
cd ${{ inputs.working_directory }}
|
||||
terraform validate -no-color
|
||||
shell: bash
|
||||
|
||||
- name: Planning Terraform
|
||||
id: plan
|
||||
run: |
|
||||
cd ${{ inputs.working_directory }}
|
||||
terraform plan -out=${{ inputs.plan_output }}
|
||||
shell: bash
|
||||
|
||||
- name: Applying Terraform
|
||||
id: apply
|
||||
run: |
|
||||
cd ${{ inputs.working_directory }}
|
||||
terraform apply ${{ inputs.plan_output }}
|
||||
shell: bash
|
||||
@@ -1,29 +0,0 @@
|
||||
name: "Initialize Terraform Composite Action"
|
||||
description: "A composite action to initialize terraform"
|
||||
inputs:
|
||||
working_directory:
|
||||
description: "Terraform Working Directory"
|
||||
required: true
|
||||
tf_api_token:
|
||||
description: "Terraform API Token"
|
||||
required: true
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Setup Terraform
|
||||
uses: hashicorp/setup-terraform@v3
|
||||
with:
|
||||
cli_config_credentials_token: ${{ inputs.tf_api_token }}
|
||||
terraform_version: 1.7.5
|
||||
|
||||
- name: Configure Terraform Cache
|
||||
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
|
||||
shell: bash
|
||||
|
||||
- name: Initializing Terraform
|
||||
run: |
|
||||
cd ${{ inputs.working_directory }}
|
||||
terraform init
|
||||
shell: bash
|
||||
env:
|
||||
TF_WORKSPACE: "default-ws"
|
||||
153
.github/workflows/main_workflow.yml
vendored
153
.github/workflows/main_workflow.yml
vendored
@@ -1,9 +1,6 @@
|
||||
# This workflow is designed for creating clusters using "config.yaml" file to implement GitOps solution with the help of "turnk8s".
|
||||
# It is started to run when PRs are merged into the 'main' branch.
|
||||
# The workflow contains 3 jobs:
|
||||
# 1) setup_terraform: for setup terraform and checking available changes(create, update and delete cluster).
|
||||
# 2) destroy_cluster: it destroys terraform if existing cluster information is removed in config.yaml file.
|
||||
# 3) apply_cluster: this job is for creating or updating clusters based on config.yaml file
|
||||
|
||||
|
||||
name: 'Automated Terraform Cluster Setup and Cleanup'
|
||||
|
||||
@@ -26,43 +23,7 @@ env:
|
||||
TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}"
|
||||
|
||||
jobs:
|
||||
setup_terraform:
|
||||
runs-on: self-hosted
|
||||
container:
|
||||
image: ${{ vars.RUNNER_IMAGE }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "terraform"
|
||||
outputs:
|
||||
desired_clusters: ${{ steps.filter_clusters.outputs.desired_clusters }}
|
||||
removable_clusters: ${{ steps.filter_clusters.outputs.removable_clusters }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Using composite actions for initializing Terraform
|
||||
- name: Initialize Terraform
|
||||
uses: ./.github/actions/initialize_terraform_composite_action
|
||||
with:
|
||||
working_directory: "terraform/infrastructure"
|
||||
tf_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||
|
||||
- name: Filter existing, desired and removable clusters
|
||||
id: filter_clusters
|
||||
run: |
|
||||
# Get clusters from config.yaml file
|
||||
desired_clusters=$(yq e 'keys | .[]' ${GITHUB_WORKSPACE}/config.yaml | jq -R -s -c 'split("\n")[:-1]')
|
||||
# Get existing clusters
|
||||
existing_clusters=$(terraform workspace list | grep '\(-infrastructure\|-cluster\)$' | sed 's/-infrastructure$//;s/-cluster$//' | uniq | grep -v '^$' | jq -R -s -c 'split("\n")[:-1] | map(ltrimstr(" "))')
|
||||
# Filter all cluster must be removed
|
||||
echo "removable_clusters=$(jq -n -c $existing_clusters-$desired_clusters)" >> $GITHUB_OUTPUT
|
||||
echo "desired_clusters=$desired_clusters" >> $GITHUB_OUTPUT
|
||||
|
||||
destroy_cluster:
|
||||
needs: setup_terraform
|
||||
if: ${{ needs.setup_terraform.outputs.removable_clusters != '[]' }}
|
||||
main_workflow:
|
||||
runs-on: self-hosted
|
||||
container:
|
||||
image: ${{ vars.RUNNER_IMAGE }}
|
||||
@@ -72,80 +33,78 @@ jobs:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "terraform/infrastructure"
|
||||
strategy:
|
||||
matrix:
|
||||
cluster: ${{ fromJSON(needs.setup_terraform.outputs.removable_clusters) }}
|
||||
steps:
|
||||
# Using composite actions for initializing Terraform
|
||||
- name: Initialize Terraform
|
||||
uses: ./.github/actions/initialize_terraform_composite_action
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Terraform
|
||||
uses: hashicorp/setup-terraform@v3
|
||||
with:
|
||||
working_directory: "terraform/infrastructure"
|
||||
tf_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
|
||||
terraform_version: 1.7.5
|
||||
|
||||
- name: Configure Terraform Cache
|
||||
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Initializing Terraform
|
||||
run: |
|
||||
terraform init -upgrade
|
||||
env:
|
||||
TF_VAR_cluster_name: ${{ matrix.cluster }}
|
||||
TF_WORKSPACE: "default-ws"
|
||||
|
||||
- name: Selecting Terraform Workspace
|
||||
run: terraform workspace select ${{ matrix.cluster }}-infrastructure
|
||||
|
||||
- name: Destroying Terraform
|
||||
- name: Filter desired and removable clusters
|
||||
id: filter_clusters
|
||||
run: |
|
||||
terraform destroy -auto-approve
|
||||
filtered_clusters=$(python3 ${GITHUB_WORKSPACE}/scripts/python/filter_clusters.py --yaml-path=${GITHUB_WORKSPACE}/config.yaml --existing-clusters="$(terraform workspace list)")
|
||||
# Get clusters from config.yaml file
|
||||
echo "desired_clusters=$(echo $filtered_clusters | cut -d',' -f1)" >> $GITHUB_OUTPUT
|
||||
# Get all cluster must be removed
|
||||
echo "removable_clusters=$(echo $filtered_clusters | cut -d',' -f2)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Destroying Terraform Cloud Workspace
|
||||
# Destroy clusters
|
||||
- name: Destroy Clusters using Terraform
|
||||
if: ${{ steps.filter_clusters.outputs.removable_clusters != '' }}
|
||||
# change list to strings
|
||||
run: |
|
||||
terraform workspace select default-ws
|
||||
terraform workspace delete -force ${{ matrix.cluster }}-infrastructure
|
||||
terraform workspace delete -force ${{ matrix.cluster }}-cluster
|
||||
bash ${GITHUB_WORKSPACE}/scripts/terraform/destroy.sh ${{ steps.filter_clusters.outputs.removable_clusters }}
|
||||
|
||||
apply_cluster:
|
||||
needs: setup_terraform
|
||||
if: ${{ needs.setup_terraform.outputs.desired_clusters != '[]' }}
|
||||
runs-on: self-hosted
|
||||
container:
|
||||
image: ${{ vars.RUNNER_IMAGE }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "terraform"
|
||||
strategy:
|
||||
matrix:
|
||||
cluster: ${{ fromJSON(needs.setup_terraform.outputs.desired_clusters) }}
|
||||
steps:
|
||||
# Using composite actions for applying cluster's infrastructure changes
|
||||
# Apply cluster's infrastructure changes
|
||||
- name: Infrastructure updates
|
||||
uses: ./.github/actions/apply_terraform_composite_action
|
||||
with:
|
||||
workspace: ${{ matrix.cluster }}-infrastructure
|
||||
plan_output: tfplan-${{ matrix.cluster }}-infrastructure
|
||||
working_directory: "terraform/infrastructure"
|
||||
tf_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||
env:
|
||||
TF_VAR_cluster_name: ${{ matrix.cluster }}
|
||||
if: ${{ steps.filter_clusters.outputs.desired_clusters != '' }}
|
||||
run: |
|
||||
bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh infrastructure ${{ steps.filter_clusters.outputs.desired_clusters }}
|
||||
|
||||
# Using composite actions for applying cluster's applications and tools changes
|
||||
- name: Initializing Terraform
|
||||
if: ${{ steps.filter_clusters.outputs.desired_clusters != '' }}
|
||||
run: |
|
||||
cd ${GITHUB_WORKSPACE}/terraform/cluster
|
||||
terraform init -upgrade
|
||||
env:
|
||||
TF_WORKSPACE: "default-ws"
|
||||
|
||||
# Apply cluster's applications and tools changes
|
||||
- name: Cluster updates
|
||||
uses: ./.github/actions/apply_terraform_composite_action
|
||||
with:
|
||||
workspace: ${{ matrix.cluster }}-cluster
|
||||
plan_output: tfplan-${{ matrix.cluster }}-cluster
|
||||
working_directory: "terraform/cluster"
|
||||
tf_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||
env:
|
||||
TF_VAR_cluster_name: ${{ matrix.cluster }}
|
||||
if: ${{ steps.filter_clusters.outputs.desired_clusters != '' }}
|
||||
run: |
|
||||
cd ${GITHUB_WORKSPACE}/terraform/cluster
|
||||
bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh cluster ${{ steps.filter_clusters.outputs.desired_clusters }}
|
||||
|
||||
- name: Generating kube-config
|
||||
- name: Merging kube-configs into one file
|
||||
run: bash ${GITHUB_WORKSPACE}/scripts/bash/merge_kubeconfigs.sh
|
||||
|
||||
- name: Generating kube-config artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kube-config-${{ matrix.cluster }}
|
||||
path: /opt/kubeconfig/${{ matrix.cluster }}
|
||||
name: kubeconfig
|
||||
path: ~/.kube/config
|
||||
compression-level: 0
|
||||
|
||||
- name: Generating Markdown
|
||||
run: |
|
||||
echo "### turnk8s" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Push your Kubernetes service manifests to [GitHub URL](https://github.com/infraheads/${{ matrix.cluster }}) to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Push your Kubernetes service manifests to the following GitHub repositories to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY
|
||||
for cluster_name in ${{ steps.filter_clusters.outputs.desired_clusters }};
|
||||
do
|
||||
echo "[$cluster_name](https://github.com/infraheads/$cluster_name)" >> $GITHUB_STEP_SUMMARY
|
||||
done
|
||||
echo "Use the 'kubeconfig' file(s) to connect to the cluster, which is(are) attached in 'Artifacts' section." >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
165
.github/workflows/pr_cluster_workflow.yml
vendored
165
.github/workflows/pr_cluster_workflow.yml
vendored
@@ -1,13 +1,19 @@
|
||||
# This workflow is designed for creating clusters using "config.yaml" file to implement GitOps solution with the help of "turnk8s", which includes 2 jobs.
|
||||
# It is possible to create or delete clusters based on the creating PRs, by updating the "config.yaml" file. In simple words, it describes the existing clusters on the server.
|
||||
# The "config.yaml" file can be updated in the following ways:
|
||||
# 1) For testing new features, the config.yaml file should only contain one cluster description(referred to as "test-cluster"), which must be in the "turnk8s-<PR_NUMBER>" format.This can be done by creating a draft PR.
|
||||
# 2) For modifications(creating, updating, or deleting clusters), simply create a normal PR and describe the cluster(s) within the "config.yaml" file. The clusters must not start with the "turnk8s-" prefix.
|
||||
# *Note: Modifications take effect upon merging normal PRs into the main branch.
|
||||
# 3) All clusters are destroyed if the "config.yaml" file is empty.
|
||||
# The above-described cases are checked in the first job, called "checking_yaml_correctness".
|
||||
# The second job, "apply_cluster" starts only if the PR is a draft. As a result of the workflow, the cluster's kube-config file will be found attached in the "Artifacts" section.
|
||||
# *Note: The "test-cluster" is destroyed after merging the draft PR into the main branch.
|
||||
# This workflow is designed for creating clusters using "config.yaml" and "test_config.yaml" files based on purpose of implementing GitOps solution with the help of "turnk8s".
|
||||
#
|
||||
# It is possible to create or delete clusters based on the creating PRs, by updating the configuration files.
|
||||
# The "config.yaml" file can be updated modifications(creating, updating, or deleting clusters) of cluster(s) described within the "config.yaml" file. In simple words, it describes the existing clusters on the server.
|
||||
# **Note: The clusters must not start with the "turnk8s-" prefix.
|
||||
# Modifications take effect upon merging a PRs into the main branch.
|
||||
# All clusters are destroyed if the "config.yaml" file is empty.
|
||||
#
|
||||
# For testing new features, the "test_config.yaml" file should only contain one cluster description(referred to as "test-cluster"), which must be in the "turnk8s-<PR_NUMBER>" format.
|
||||
# Only one cluster can be described in test_config.yaml file.
|
||||
# **Note: For merging a PR, test_config.yaml file must be empty.
|
||||
# Test cluster is destroyed if "test_config.yaml" file is empty.
|
||||
#
|
||||
# **Attention: The workflow contains a job called "enable_merge_pr", which enable ability of merging PRs.
|
||||
# As a result of the workflow, the cluster's kube-config file will be found attached in the "Artifacts" section.
|
||||
|
||||
|
||||
name: 'Automated Cluster Deployment for Pull Requests'
|
||||
|
||||
@@ -16,7 +22,7 @@ on:
|
||||
branches:
|
||||
- '*'
|
||||
paths:
|
||||
- 'config.yaml'
|
||||
- 'test_config.yaml'
|
||||
|
||||
env:
|
||||
TF_CLOUD_ORGANIZATION: "infraheads"
|
||||
@@ -28,78 +34,96 @@ env:
|
||||
TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}"
|
||||
TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}"
|
||||
TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}"
|
||||
TF_VAR_config_file_path: "../../test_config.yaml"
|
||||
TF_VAR_cluster_name: "turnk8s-${{ github.event.number }}"
|
||||
|
||||
jobs:
|
||||
checking_yaml_correctness:
|
||||
pr_workflow:
|
||||
runs-on: self-hosted
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
container:
|
||||
image: ${{ vars.RUNNER_IMAGE }}
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "terraform/infrastructure"
|
||||
outputs:
|
||||
config_is_empty: ${{ steps.check_config.outputs.config_is_empty }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# * In case of empty config.yaml destroys all clusters.
|
||||
# * In case of "turnk8s" logic changes, we conventionally create a draft PR and rely on the type of PR in the pipeline's logic which must be in the "turnk8s-<PR_NUMBER>" format.
|
||||
# * For cluster creation we use normal PRs. In such PRs, the added cluster name should not have "turnk8s-" prefix.
|
||||
- name: Ensure validity of the config.yaml file
|
||||
|
||||
- name: Checks if test config is empty
|
||||
id: check_config
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
bash "${GITHUB_WORKSPACE}/scripts/validate_config_file.sh" "${GITHUB_WORKSPACE}/config.yaml"
|
||||
clusters=$(yq e 'keys | .[]' ${GITHUB_WORKSPACE}/config.yaml | jq -R -s -c 'split("\n")[:-1]')
|
||||
if [[ ${{ github.event.pull_request.draft }} == true ]] && [[ $(echo "$clusters" | jq -r '. | length') == 1 ]] && [[ $(echo "$clusters" | jq -r '.[0]') != "${{ env.TF_VAR_cluster_name }}" ]];
|
||||
if [ -z "$(grep -v '^\s*$' ${GITHUB_WORKSPACE}/test_config.yaml)" ];
|
||||
then
|
||||
echo """
|
||||
For draft PRs, the cluster name must be in the format \"turnk8s-<PR_NUMBER>\",
|
||||
as it is assumed to be a test cluster for modifying logic and testing validations and features.
|
||||
"""
|
||||
exit 1
|
||||
elif [[ ${{ github.event.pull_request.draft }} == false ]] && [[ $(echo "$clusters" | jq -r '. | map(startswith("turnk8s-")) | any') == true ]];
|
||||
then
|
||||
echo """
|
||||
For non-draft PRs, the cluster name must not be started with "turnk8s-" prefix,
|
||||
as these clusters are not for testing and should follow a different naming convention.
|
||||
"""
|
||||
exit 1
|
||||
echo "config_is_empty=true" >> $GITHUB_OUTPUT
|
||||
# check how many clusters should be updated: only one cluster must be updated through each PR
|
||||
echo "The test_config.yaml file is empty and the PR is ready to merge."
|
||||
else
|
||||
echo "config_is_empty=false" >> $GITHUB_OUTPUT
|
||||
echo "The test_config.yaml file is not empty. For merging PRs it must be empty."
|
||||
fi
|
||||
|
||||
apply_cluster:
|
||||
needs: checking_yaml_correctness
|
||||
if: ${{ github.event.pull_request.draft == true }}
|
||||
runs-on: self-hosted
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "terraform"
|
||||
container:
|
||||
image: ${{ vars.RUNNER_IMAGE }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# Validates YAML configuration files: structure, empty lines, keys, etc.
|
||||
- name: Ensure validity of the configuration files
|
||||
run: |
|
||||
# in case of empty test_confi file, it must validate config.yaml file
|
||||
if ${{ steps.check_config.outputs.config_is_empty == 'true' }}; then
|
||||
python3 ${GITHUB_WORKSPACE}/scripts/python/validate_yaml.py --yaml-path=${GITHUB_WORKSPACE}/config.yaml
|
||||
else
|
||||
python3 ${GITHUB_WORKSPACE}/scripts/python/validate_yaml.py --yaml-path=${GITHUB_WORKSPACE}/test_config.yaml --cluster-name=${{ env.TF_VAR_cluster_name }}
|
||||
fi
|
||||
|
||||
# Using composite actions for applying cluster's infrastructure changes
|
||||
- name: Setup Terraform
|
||||
uses: hashicorp/setup-terraform@v3
|
||||
with:
|
||||
cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}
|
||||
terraform_version: 1.7.5
|
||||
|
||||
- name: Configure Terraform Cache
|
||||
run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Initializing Terraform
|
||||
run: |
|
||||
terraform init -upgrade
|
||||
env:
|
||||
TF_WORKSPACE: "default-ws"
|
||||
|
||||
# Test PR cluster is removed in case of empty test_config.yaml file
|
||||
- name: Destroying test cluster
|
||||
if: ${{ steps.check_config.outputs.config_is_empty == 'true' }}
|
||||
run: |
|
||||
if terraform workspace list | grep -w "${{ github.event.number }}-infrastructure"; then
|
||||
bash ${GITHUB_WORKSPACE}/scripts/terraform/destroy.sh ${{ env.TF_VAR_cluster_name }}
|
||||
fi
|
||||
|
||||
# Apply cluster's infrastructure changes
|
||||
- name: Infrastructure updates
|
||||
uses: ./.github/actions/apply_terraform_composite_action
|
||||
with:
|
||||
workspace: ${{ env.TF_VAR_cluster_name }}-infrastructure
|
||||
plan_output: tfplan-${{ env.TF_VAR_cluster_name }}-infrastructure
|
||||
working_directory: "terraform/infrastructure"
|
||||
tf_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||
if: ${{ steps.check_config.outputs.config_is_empty == 'false' }}
|
||||
run: |
|
||||
bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh infrastructure ${{ env.TF_VAR_cluster_name }}
|
||||
|
||||
# Using composite actions for applying cluster's applications and tools changes
|
||||
- name: Initializing Terraform
|
||||
if: ${{ steps.check_config.outputs.config_is_empty == 'false' }}
|
||||
run: |
|
||||
cd ${GITHUB_WORKSPACE}/terraform/cluster
|
||||
terraform init -upgrade
|
||||
env:
|
||||
TF_WORKSPACE: "default-ws"
|
||||
|
||||
# Apply cluster's applications and tools changes
|
||||
- name: Cluster updates
|
||||
uses: ./.github/actions/apply_terraform_composite_action
|
||||
with:
|
||||
workspace: ${{ env.TF_VAR_cluster_name }}-cluster
|
||||
plan_output: tfplan-${{ env.TF_VAR_cluster_name }}-cluster
|
||||
working_directory: "terraform/cluster"
|
||||
tf_api_token: ${{ secrets.TF_API_TOKEN }}
|
||||
if: ${{ steps.check_config.outputs.config_is_empty == 'false' }}
|
||||
run: |
|
||||
cd ${GITHUB_WORKSPACE}/terraform/cluster
|
||||
bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh cluster ${{ env.TF_VAR_cluster_name }}
|
||||
|
||||
- name: Generating kube-config
|
||||
- name: Generating kube-config as Artifact
|
||||
if: ${{ steps.check_config.outputs.config_is_empty == 'false' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: kube-config-${{ env.TF_VAR_cluster_name }}
|
||||
@@ -107,8 +131,27 @@ jobs:
|
||||
compression-level: 0
|
||||
|
||||
- name: Generating Markdown
|
||||
if: ${{ steps.check_config.outputs.config_is_empty == 'false' }}
|
||||
run: |
|
||||
echo "### turnk8s" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Push your Kubernetes service manifests to [GitHub URL](https://github.com/infraheads/${{ env.TF_VAR_cluster_name }}) to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Use the 'kubeconfig' file to connect to the cluster, which is attached in 'Artifacts' section." >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
enable_merge_pr:
|
||||
needs: pr_workflow
|
||||
runs-on: self-hosted
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
container:
|
||||
image: ${{ vars.RUNNER_IMAGE }}
|
||||
steps:
|
||||
# PR can be merged in case of empty test_config.yaml
|
||||
- name: Enable merge PR
|
||||
run: |
|
||||
set -e
|
||||
if ${{ needs.pr_workflow.outputs.config_is_empty == 'false' }}; then
|
||||
echo "The test_config.yaml file is not empty. For merging PRs the file must be empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
turnk8s-18:
|
||||
internal:
|
||||
controlplane:
|
||||
cpu_cores: 2
|
||||
memory: 4096
|
||||
@@ -7,4 +7,4 @@ turnk8s-18:
|
||||
count: 1
|
||||
cpu_cores: 2
|
||||
memory: 2048
|
||||
disk_size: 20
|
||||
disk_size: 20
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
applications:
|
||||
- name: app-of-apps
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
project: default
|
||||
source:
|
||||
repoURL: git@github.com:example/project.git
|
||||
targetRevision: HEAD
|
||||
path: argocd_applications
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
17
modules/argocd/app-of-apps.yaml.tpl
Normal file
17
modules/argocd/app-of-apps.yaml.tpl
Normal file
@@ -0,0 +1,17 @@
|
||||
applications:
|
||||
app-of-apps:
|
||||
namespace: argocd
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
project: default
|
||||
source:
|
||||
repoURL: ${repoURL}
|
||||
targetRevision: HEAD
|
||||
path: argocd_applications
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
@@ -40,10 +40,11 @@ resource "helm_release" "argocd-apps" {
|
||||
version = var.app_of_apps_chart_version
|
||||
repository = var.app_of_apps_chart_repository
|
||||
|
||||
values = [file("${path.module}/app-of-apps.yaml")]
|
||||
|
||||
set {
|
||||
name = "applications[0].source.repoURL"
|
||||
value = var.git_repository_ssh_url
|
||||
}
|
||||
values = [
|
||||
templatefile("${path.module}/app-of-apps.yaml.tpl",
|
||||
{
|
||||
repoURL = var.git_repository_ssh_url
|
||||
}
|
||||
)
|
||||
]
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
cat <<EOF > "/opt/kubeconfig/$2"
|
||||
$(echo "$1" | tail -n +2 | head -n -1)
|
||||
14
scripts/bash/destroy_cluster_nodes.sh
Normal file
14
scripts/bash/destroy_cluster_nodes.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
cluster_name=$1
|
||||
desired_worker_nodes_count=$2
|
||||
existing_worker_nodes_count=$(terraform state list | grep "proxmox_vm_qemu.worker" | wc -l)
|
||||
removable_worker_nodes_count=$(expr "$existing_worker_nodes_count" - "$desired_worker_nodes_count")
|
||||
|
||||
if [ "$removable_worker_nodes_count" -gt 0 ]; then
|
||||
export KUBECONFIG="/opt/kubeconfig/$cluster_name"
|
||||
for (( i="$desired_worker_nodes_count"; i<"$existing_worker_nodes_count"; i++ ))
|
||||
do
|
||||
kubectl delete node "$cluster_name-wn-$i"
|
||||
done
|
||||
fi
|
||||
5
scripts/bash/merge_kubeconfigs.sh
Normal file
5
scripts/bash/merge_kubeconfigs.sh
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
export KUBECONFIG=$(find /opt/kubeconfig -type f | tr '\n' ':')
|
||||
mkdir ~/.kube
|
||||
kubectl config view --flatten > ~/.kube/config
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
talosctl gen config talos-proxmrox https://$CONTROLPLANE_IP:6443 -o _out --force
|
||||
talosctl apply-config -n $CONTROLPLANE_IP --insecure -f _out/controlplane.yaml
|
||||
37
scripts/python/filter_clusters.py
Normal file
37
scripts/python/filter_clusters.py
Normal file
@@ -0,0 +1,37 @@
|
||||
# This script based on config.yaml file and existing terraform workspaces, decided which clusters must be deleted and/or updated
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
import pathlib
|
||||
import argparse
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def main():
|
||||
# Collect values from out of the file
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--yaml-path", type=pathlib.Path, help="YAML configuration file path.", required=True)
|
||||
parser.add_argument("--existing-clusters", type=str, help="Existing clusters name.", required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isfile(args.yaml_path):
|
||||
raise FileNotFoundError(f"File {args.yaml_path} does not exist.")
|
||||
|
||||
with open(args.yaml_path, 'r') as file:
|
||||
yaml_content = file.read()
|
||||
|
||||
loaded_yaml_content = yaml.safe_load(yaml_content)
|
||||
# Desired clusters must be applied
|
||||
desired_clusters = Counter([str(cluster) for cluster in loaded_yaml_content.keys()])
|
||||
# Existing clusters filtered from "terraform workspace list" and remove prefixes
|
||||
existing_clusters = Counter([re.sub(r'(-infrastructure|-cluster)$', '', cluster) for cluster in args.existing_clusters.split() if re.compile(r'^(?!\d).*(-infrastructure|-cluster)$').match(cluster)])
|
||||
# Removed unique name
|
||||
existing_clusters = Counter([cluster for cluster, count in existing_clusters.items() if count == 2])
|
||||
# The clusters must be destroyed
|
||||
removable_clusters = existing_clusters - desired_clusters
|
||||
|
||||
# print the output as comma separated
|
||||
print(" ".join(desired_clusters), " ".join(removable_clusters), sep=",")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
97
scripts/python/validate_yaml.py
Normal file
97
scripts/python/validate_yaml.py
Normal file
@@ -0,0 +1,97 @@
|
||||
# This script checks config.yaml and test_config.yaml files validity.
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import pathlib
|
||||
import argparse
|
||||
|
||||
from typing import Optional
|
||||
from schema import Schema, And, Use, Or, SchemaError
|
||||
|
||||
|
||||
# Validate YAML empty lines
|
||||
def check_empty_lines(yaml_content):
|
||||
|
||||
# Check for empty lines
|
||||
lines = yaml_content.splitlines()
|
||||
empty_lines = [i + 1 for i in range(len(lines)) if not lines[i].strip()]
|
||||
|
||||
if empty_lines:
|
||||
raise yaml.YAMLError(f"Empty lines found in YAML file at: {', '.join(map(str, empty_lines))} lines.")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# Custom validator for the cluster names
|
||||
def validate_cluster_names(cluster_config: dict, cluster_name: Optional[str]):
|
||||
|
||||
if not isinstance(cluster_config, dict):
|
||||
raise SchemaError(f"Cluster config contains unstructured lines.")
|
||||
|
||||
if cluster_name:
|
||||
if cluster_name not in cluster_config:
|
||||
raise SchemaError(f"Cluster name must be match with \"turnk8s-<PR_NUMBER>\" format.")
|
||||
elif len(cluster_config) != 1:
|
||||
raise SchemaError(f"Only one cluster must be described within test_config.yaml file.")
|
||||
else:
|
||||
for cluster_name, cluster_info in cluster_config.items():
|
||||
if cluster_name.startswith("turnk8s-"):
|
||||
raise SchemaError(f"Cluster name {cluster_name} does not start with \"turnk8s-\" prefix.")
|
||||
|
||||
return cluster_config
|
||||
|
||||
|
||||
cluster_schema = {
|
||||
"controlplane": {
|
||||
"cpu_cores": Or(2, 4, 6, 8,
|
||||
error="The number of CPU cores for the ControlPlane must be one of the following: 2, 4, 6, or 8."),
|
||||
"memory": Or(4096, 6144, 8192,
|
||||
error="The RAM memory size for the ControlPlane must be one of the following: 4096, 6144, or 8192."),
|
||||
"disk_size": And(Use(int), lambda n: 10 <= n <= 60,
|
||||
error="The DiskSize for the ControlPlane must be within the range of 10 to 60.")
|
||||
},
|
||||
"worker_nodes": {
|
||||
"count": And(Use(int), lambda n: 1 <= n <= 5,
|
||||
error="The Count for the WorkerNodes must be within the range of 1 to 5."),
|
||||
"cpu_cores": Or(2, 4, 6, 8,
|
||||
error="The number of CPU cores for the WorkerNodes must be one of the following: 2, 4, 6, or 8."),
|
||||
"memory": Or(2048, 4096, 6144,
|
||||
error="The RAM memory size for the WorkerNodes must be one of the following: 2048, 4096 or 6144."),
|
||||
"disk_size": And(Use(int), lambda n: 10 <= n <= 60,
|
||||
error="The DiskSize for the WorkerNodes must be within the range of 10 to 60.")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
# Collect values from out of the file
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--yaml-path", type=pathlib.Path, help="YAML configuration file path.", required=True)
|
||||
parser.add_argument("--cluster-name", type=str, help="A cluster name for checking the validity.", default=None)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isfile(args.yaml_path):
|
||||
raise FileNotFoundError(f"File {args.yaml_path} does not exist.")
|
||||
|
||||
with open(args.yaml_path, 'r') as file:
|
||||
yaml_content = file.read()
|
||||
|
||||
try:
|
||||
# Check if file is not empty
|
||||
if len(yaml_content.strip()):
|
||||
loaded_yaml_content = yaml.safe_load(yaml_content)
|
||||
check_empty_lines(yaml_content=yaml_content)
|
||||
# Wrap the cluster schema with the cluster names validator
|
||||
schema = Schema(And(lambda cluster_schema: validate_cluster_names(cluster_schema, args.cluster_name), {str: cluster_schema}))
|
||||
schema.validate(loaded_yaml_content)
|
||||
print("YAML configuration file is valid.")
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing YAML configuration file: {e}")
|
||||
sys.exit(1)
|
||||
except SchemaError as e:
|
||||
print(f"Invalid YAML configuration: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
15
scripts/terraform/apply.sh
Normal file
15
scripts/terraform/apply.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
type=$1
|
||||
clusters=( "${@:2}" )
|
||||
|
||||
for cluster in "${clusters[@]}"
|
||||
do
|
||||
export TF_VAR_cluster_name="$cluster"
|
||||
workspace="${cluster#turnk8s-}-$type"
|
||||
terraform workspace select -or-create "$workspace"
|
||||
terraform validate -no-color
|
||||
terraform plan -out="tfplan-$workspace"
|
||||
terraform apply "tfplan-$workspace"
|
||||
done
|
||||
15
scripts/terraform/destroy.sh
Normal file
15
scripts/terraform/destroy.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
clusters=( "$@" )
|
||||
|
||||
for cluster in "${clusters[@]}"
|
||||
do
|
||||
export TF_VAR_cluster_name="$cluster"
|
||||
cluster_without_prefix="${cluster#turnk8s-}"
|
||||
terraform workspace select "$cluster_without_prefix-infrastructure"
|
||||
terraform destroy -auto-approve
|
||||
terraform workspace select "default-ws"
|
||||
terraform workspace delete -force "$cluster_without_prefix-infrastructure"
|
||||
terraform workspace delete -force "$cluster_without_prefix-cluster"
|
||||
done
|
||||
@@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status
|
||||
set -e
|
||||
# Path to the YAML file
|
||||
FILE=$1
|
||||
|
||||
# Function to validate cluster
|
||||
validate_cluster() {
|
||||
local cluster=$1
|
||||
|
||||
controlplane_cpu=$(yq e ".$cluster.controlplane.cpu_cores" "$FILE")
|
||||
controlplane_memory=$(yq e ".$cluster.controlplane.memory" "$FILE")
|
||||
controlplane_disk=$(yq e ".$cluster.controlplane.disk_size" "$FILE")
|
||||
worker_node_count=$(yq e ".$cluster.worker_nodes.count" "$FILE")
|
||||
worker_node_cpu=$(yq e ".$cluster.worker_nodes.cpu_cores" "$FILE")
|
||||
worker_node_memory=$(yq e ".$cluster.worker_nodes.memory" "$FILE")
|
||||
worker_node_disk=$(yq e ".$cluster.worker_nodes.disk_size" "$FILE")
|
||||
|
||||
# Validate CPU cores of the Control Plane
|
||||
if ! [[ "$controlplane_cpu" =~ ^(2|4|6|8)$ ]];
|
||||
then
|
||||
echo "Control Plane CPU cores must be one of the following values 2, 4, 6 or 8."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate RAM Memory of the Control Plane
|
||||
if ! [[ "$controlplane_memory" =~ ^(4096|6144|8192)$ ]];
|
||||
then
|
||||
echo "Control Plane Memory must be one of the following values 4096, 6144 or 8192."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate Disk size of the Control Plane
|
||||
if ! [[ "$controlplane_disk" =~ ^(10|20|40|60)$ ]];
|
||||
then
|
||||
echo "Control Plane Disk size must be one of the following values 10, 20, 40 or 60."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate the Worker Nodes count
|
||||
if ! [[ "$worker_node_count" =~ ^[1-5]$ ]];
|
||||
then
|
||||
echo "Worker Node count must be from 1 to 5 range."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate CPU cores of the Worker Node
|
||||
if ! [[ "$worker_node_cpu" =~ ^(2|4|6|8)$ ]];
|
||||
then
|
||||
echo "Worker Node CPU cores must be one of the following values 2, 4, 6 or 8."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate RAM Memory of the Worker Node
|
||||
if ! [[ "$worker_node_memory" =~ ^(2048|4096|6144)$ ]];
|
||||
then
|
||||
echo "Worker Node Memory must be one of the following values 2048, 4096 or 6144."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate Disk size of the Worker Node
|
||||
if ! [[ "$worker_node_disk" =~ ^(10|20|40|60)$ ]];
|
||||
then
|
||||
echo "Worker Node Disk size must be one of the following values 10, 20, 40 or 60."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks if the YAML file is empty
|
||||
if [[ $(yq e '.' "$FILE") ]];
|
||||
then
|
||||
# Extract all clusters
|
||||
clusters=$(yq e 'keys | .[]' "$FILE")
|
||||
# Validate each cluster
|
||||
for cluster in $clusters; do
|
||||
validate_cluster "$cluster"
|
||||
done
|
||||
echo "The $FILE is valid."
|
||||
fi
|
||||
@@ -24,7 +24,7 @@ variable "argocd_chart_name" {
|
||||
|
||||
variable "argocd_chart_version" {
|
||||
type = string
|
||||
default = "6.7.18"
|
||||
default = "7.3.4"
|
||||
}
|
||||
|
||||
variable "argocd_chart_repository" {
|
||||
@@ -45,7 +45,7 @@ variable "argocd_app_of_apps_chart_name" {
|
||||
|
||||
variable "argocd_app_of_apps_chart_version" {
|
||||
type = string
|
||||
default = "1.6.2"
|
||||
default = "2.0.0"
|
||||
}
|
||||
|
||||
variable "argocd_app_of_apps_chart_repository" {
|
||||
|
||||
@@ -12,6 +12,7 @@ resource "proxmox_vm_qemu" "controlplane" {
|
||||
qemu_os = var.controlplane_qemu_os
|
||||
scsihw = var.controlplane_scsihw
|
||||
memory = each.value.controlplane.memory
|
||||
onboot = true
|
||||
agent = 1
|
||||
|
||||
disks {
|
||||
|
||||
12
terraform/infrastructure/destroy-cluster-nodes.tf
Normal file
12
terraform/infrastructure/destroy-cluster-nodes.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
resource "terraform_data" "delete_nodes" {
|
||||
depends_on = [terraform_data.kubeconfig]
|
||||
|
||||
# Ensure to delete worker nodes when cluster is scaled down
|
||||
triggers_replace = [
|
||||
length(local.workers)
|
||||
]
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "bash ../../scripts/bash/destroy_cluster_nodes.sh ${var.cluster_name} ${length(local.workers)}"
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,12 @@ resource "terraform_data" "kubeconfig" {
|
||||
depends_on = [data.talos_cluster_kubeconfig.cp_ck]
|
||||
for_each = local.clusters
|
||||
|
||||
# Ensure to retrieve kubeconfig when worker nodes count is changed
|
||||
triggers_replace = [
|
||||
length(local.workers)
|
||||
]
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "sh ../../scripts/create_kubeconfig.sh \"${yamlencode(data.talos_cluster_kubeconfig.cp_ck[each.key].kubeconfig_raw)}\" ${var.cluster_name}"
|
||||
command = "bash ../../scripts/bash/create_kubeconfig.sh \"${yamlencode(data.talos_cluster_kubeconfig.cp_ck[each.key].kubeconfig_raw)}\" ${var.cluster_name}"
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ locals {
|
||||
clusters = try({ tostring(var.cluster_name) = yamldecode(file(var.config_file_path))[var.cluster_name] }, {})
|
||||
talos_iso = "local:iso/metal-amd64-qemu-${var.talos_version}.iso"
|
||||
|
||||
worker = flatten([
|
||||
workers = flatten([
|
||||
for cluster_key, cluster in local.clusters : [
|
||||
for i in range(cluster.worker_nodes.count):
|
||||
{
|
||||
|
||||
@@ -29,6 +29,7 @@ data "talos_machine_configuration" "cp_mc" {
|
||||
talos-version = var.talos_version,
|
||||
kubernetes-version = var.k8s_version,
|
||||
registry = var.image_registry
|
||||
node-name = "${var.cluster_name}-cp"
|
||||
}
|
||||
)
|
||||
]
|
||||
@@ -63,6 +64,7 @@ data "talos_cluster_kubeconfig" "cp_ck" {
|
||||
|
||||
# Generates a machine configuration for the worker (worker.yaml)
|
||||
data "talos_machine_configuration" "worker_mc" {
|
||||
depends_on = [proxmox_vm_qemu.worker]
|
||||
for_each = local.clusters
|
||||
|
||||
cluster_name = data.talos_client_configuration.cp_cc[each.key].cluster_name
|
||||
@@ -71,22 +73,37 @@ data "talos_machine_configuration" "worker_mc" {
|
||||
machine_secrets = talos_machine_secrets.talos_secrets.machine_secrets
|
||||
kubernetes_version = var.k8s_version
|
||||
talos_version = var.talos_version
|
||||
config_patches = [
|
||||
}
|
||||
|
||||
# Applies machine configuration to the worker node
|
||||
resource "talos_machine_configuration_apply" "worker_mca" {
|
||||
depends_on = [data.talos_machine_configuration.worker_mc]
|
||||
for_each = { for idx, worker in local.workers : idx => worker }
|
||||
|
||||
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.worker_mc[var.cluster_name].machine_configuration
|
||||
node = proxmox_vm_qemu.worker[each.key].default_ipv4_address
|
||||
|
||||
config_patches = [
|
||||
templatefile("${path.module}/templates/worker.yaml.tpl",
|
||||
{
|
||||
talos-version = var.talos_version,
|
||||
kubernetes-version = var.k8s_version,
|
||||
registry = var.image_registry
|
||||
node-name = "${var.cluster_name}-wn-${each.key}"
|
||||
}
|
||||
)
|
||||
]
|
||||
}
|
||||
|
||||
# Applies machine configuration to the worker node
|
||||
resource "talos_machine_configuration_apply" "worker_mca" {
|
||||
for_each = { for idx, worker in local.worker : idx => worker }
|
||||
data "talos_cluster_health" "cluster_health" {
|
||||
depends_on = [data.talos_cluster_kubeconfig.cp_ck]
|
||||
|
||||
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
|
||||
machine_configuration_input = data.talos_machine_configuration.worker_mc[var.cluster_name].machine_configuration
|
||||
node = proxmox_vm_qemu.worker[each.key].default_ipv4_address
|
||||
}
|
||||
client_configuration = talos_machine_secrets.talos_secrets.client_configuration
|
||||
control_plane_nodes = [for controlplane in proxmox_vm_qemu.controlplane : controlplane.default_ipv4_address]
|
||||
worker_nodes = [for worker in proxmox_vm_qemu.worker : worker.default_ipv4_address]
|
||||
endpoints = [for controlplane in proxmox_vm_qemu.controlplane : controlplane.default_ipv4_address]
|
||||
timeouts = {
|
||||
read = "1h"
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,8 @@ machine:
|
||||
'*':
|
||||
endpoints:
|
||||
- http://${registry}
|
||||
network:
|
||||
hostname: ${node-name}
|
||||
cluster:
|
||||
apiServer:
|
||||
image: registry.k8s.io/kube-apiserver:${kubernetes-version}
|
||||
|
||||
@@ -7,4 +7,6 @@ machine:
|
||||
mirrors:
|
||||
'*':
|
||||
endpoints:
|
||||
- http://${registry}
|
||||
- http://${registry}
|
||||
network:
|
||||
hostname: ${node-name}
|
||||
@@ -1,5 +1,5 @@
|
||||
resource "proxmox_vm_qemu" "worker" {
|
||||
for_each = { for idx, worker in local.worker : idx => worker }
|
||||
for_each = { for idx, worker in local.workers : idx => worker }
|
||||
|
||||
name = "${var.cluster_name}-worker-${each.key}"
|
||||
target_node = local.proxmox_target_node
|
||||
@@ -12,6 +12,7 @@ resource "proxmox_vm_qemu" "worker" {
|
||||
qemu_os = var.worker_qemu_os
|
||||
scsihw = var.worker_scsihw
|
||||
memory = each.value.memory
|
||||
onboot = true
|
||||
agent = 1
|
||||
|
||||
disks {
|
||||
|
||||
10
test_config.yaml
Normal file
10
test_config.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
turnk8s-26:
|
||||
controlplane:
|
||||
cpu_cores: 2
|
||||
memory: 4096
|
||||
disk_size: 10
|
||||
worker_nodes:
|
||||
count: 1
|
||||
cpu_cores: 2
|
||||
memory: 2048
|
||||
disk_size: 20
|
||||
Reference in New Issue
Block a user