chore: cleanup

This commit is contained in:
JJGadgets
2024-11-17 17:16:37 +08:00
parent 0e0129b2ba
commit 879a856261
11 changed files with 54 additions and 61 deletions

2
.gitignore vendored
View File

@@ -7,6 +7,8 @@ talosconfig
clusterconfig/
**/clusterconfig
**/clusterconfig/*
**/clusterconfig*
**/clusterconfig*/*
**/charts/cilium/*
**/cilium*/app/bootstrap-install/charts/*
**/cilium*/app/bootstrap-install/base-values.yaml

View File

@@ -5,6 +5,7 @@ K9S_CONFIG_DIR = "{{config_root}}/dots/k9s"
K9S_DEFAULT_PF_ADDRESS = "[::]"
K9S_FG_NODE_SHELL = "true"
#SSH_AUTH_SOCK = $(gpgconf --list-dirs agent-ssh-socket)
UV_PYTHON = "3.11"
_.python.venv = { path = ".venv", create = true } # create the venv if it doesn't exist
[settings]
@@ -15,7 +16,7 @@ pipx_uvx = true
[tools]
neovim = ["0.10.1"]
kubectl = ["1.29.2"]
kubectl = ["1.30.1"]
krew = ["0.4.4"]
kubectx = ["0.9.5"]
fzf = ["0.52.1"] # used by kubectx interactive mode
@@ -34,7 +35,7 @@ soft-serve = ["0.7.4"]
python = ["3.11"]
uv = ["0.4.1"] # faster than pipx, and can be installed with mise directly
"pipx:flux-local" = ["5.5.1"]
"pipx:robusta-dev/krr" = ["v1.13.0"]
"pipx:robusta-dev/krr" = ["v1.17.0"]
rust = ["1.80.1"]
"cargo:cargo-binstall" = ["1.10.3"]
"cargo:atac" = ["0.17.0"]

View File

@@ -131,8 +131,8 @@ tasks:
# lowercase, used for resource names etc
- grep -lR 'APPNAME' ./kube/deploy/apps/{{.APP}}/ | xargs -I% sed -i 's/${APPNAME}/{{.APP}}/g' %
# uppercase, for variable substitution references e.g. ${APP_DNS_AUTHENTIK}
- grep -lR 'APPNAME' ./kube/deploy/apps/{{.APP}}/ | xargs -I% sed -i 's/_APPNAME:=/_{{.APP}}:=/g;s/\(_{{.APP}}:=\)/\U\1/g' %
- grep -lR 'APPNAME' ./kube/deploy/apps/{{.APP}}/ | xargs -I% sed -i 's/_APPNAME}/_{{.APP}}}/g;s/\(_{{.APP}}}\)/\U\1/g' %
- grep -lR 'APPNAME' ./kube/deploy/apps/{{.APP}}/ | xargs -I% sed -i 's/_APPNAME:=/_{{.APP}}:=/g' %
shortnames:
desc: List all installed CRDs and their short names.

View File

@@ -127,3 +127,7 @@ tasks:
NS: *ns-default
cmds:
- kubectl get pods -n {{.NS}} -l postgres-operator.crunchydata.com/pgbackrest-dedicated=,postgres-operator.crunchydata.com/cluster={{.PG}} -o name | xargs -oI% kubectl exec -it -n {{.NS}} % -c pgbackrest -- pgbackrest expire --stanza=db --repo=1 --repo1-retention-full=1 --repo1-retention-diff=1
crunchy-wipe-replicas:
cmds:
- kubectl get pod -A -l postgres-operator.crunchydata.com/role=replica -o jsonpath='{range .items[*]}{.metadata.namespace}{range .metadata.ownerReferences[*]}{" "}{.name}{"\n"}{end}{end}' | while read -r i; do kubectl delete pvc -n ${i}-pgdata --wait=false; kubectl delete sts -n ${i}; done

View File

@@ -11,13 +11,27 @@ includes:
taskfile: ../k8s
tasks:
unlock:
unlock: &un
desc: Unlock Restic repo using env vars from cluster secrets.
vars:
NS: &ns-fail '{{ or .NS (fail "Missing `NS` environment variable!") }}'
REPO: &restic-repo '{{ or .REPO (fail "Missing `REPO` environment variable!") }}'
TS: '{{now | unixEpoch}}'
cmds:
- kubectl patch --field-manager=flux-client-side-apply replicationsources -n {{.NS}} {{.REPO}} --type merge --patch '{"spec":{"restic":{"unlock":"{{.TS}}"}}}'
unlock-local:
<<: *un
cmds:
- env $(kubectl get secrets -n {{.NS}} {{.REPO}} -o yaml | yq '.data | to_entries | map(.key + "=" + (.value | @base64d | @sh)) | join("\n")') restic unlock --remove-all
unlock-all:
dir: "/{{.ROOT_DIR}}"
vars:
FAILCONTINUE: '{{ .FAILCONTINUE | default "false" }}'
cmds:
- |
kubectl get replicationsources -A -o jsonpath='{range .items[*]}{"NS="}{.metadata.namespace}{" "}{"REPO="}{.metadata.name}{"\n"}{end}' | while read -r i; do
task volsync:unlock ${i} || {{.FAILCONTINUE}}
done
run:
desc: Unlock Restic repo using env vars from cluster secrets.
vars:

1
.venv/.gitignore vendored
View File

@@ -1,3 +1,4 @@
# this gitignore is to have the directory present so Mise can create the venv with 0 user interaction
*
!.gitignore
!.mise-py-pkg

1
.venv/.mise-py-pkg Normal file
View File

@@ -0,0 +1 @@
flux-local

View File

@@ -99,7 +99,7 @@ require("lazy").setup({
build = ":TSUpdate",
config = function()
require("nvim-treesitter.configs").setup({
ensure_installed = { "c", "lua", "vim", "vimdoc", "yaml", "go", "dockerfile", "fish", "bash", "python", "javascript", "typescript", "html", "css", "nix" },
ensure_installed = { "c", "lua", "vim", "vimdoc", "yaml", "json", "json5", "go", "dockerfile", "fish", "bash", "python", "javascript", "typescript", "html", "css", "nix" },
--ensure_installed = 'all',
ignore_install = { 'org' }, -- nvim-orgmode compatibility
sync_install = false,
@@ -343,20 +343,25 @@ require("lazy").setup({
lsp.taplo.setup { capabilities = caps(), settings = { evenBetterToml = { schema = { associations = {
['^\\.mise\\.toml$'] = 'https://mise.jdx.dev/schema/mise.json',
}}}}}
if vim.bo.filetype == "json" then lsp.jsonls.setup {
local jsonls_config = {
-- lsp.jsonls.setup {
filetypes = {"json", "jsonc", "json5"},
capabilities = caps(),
settings = {
json = {
validate = { enable = true },
schemas = require('schemastore').json.schemas {
schemas = require('schemastore').json.schemas({
select = {
'Renovate',
'GitHub Workflow Template Properties'
}
},
}),
}
}
}; end
}
if vim.bo.filetype == "json" then lsp.jsonls.setup(jsonls_config); end
if vim.bo.filetype == "json5" then lsp.jsonls.setup(jsonls_config); end
-- lsp.jsonls.setup(jsonls_config)
lsp.helm_ls.setup{capabilities = caps(),}
lsp.lua_ls.setup{capabilities = caps(),}
lsp.dockerls.setup{capabilities = caps(),}

View File

@@ -9,31 +9,31 @@
"cmp-nvim-lsp": { "branch": "main", "commit": "39e2eda76828d88b773cc27a3f61d2ad782c922d" },
"cmp-nvim-lsp-signature-help": { "branch": "main", "commit": "031e6ba70b0ad5eee49fd2120ff7a2e325b17fa7" },
"cmp-path": { "branch": "main", "commit": "91ff86cd9c29299a64f968ebb45846c485725f23" },
"gitsigns.nvim": { "branch": "main", "commit": "1ef74b546732f185d0f806860fa5404df7614f28" },
"gitsigns.nvim": { "branch": "main", "commit": "863903631e676b33e8be2acb17512fdc1b80b4fb" },
"headlines.nvim": { "branch": "master", "commit": "bf17c96a836ea27c0a7a2650ba385a7783ed322e" },
"indent-blankline.nvim": { "branch": "master", "commit": "18603eb949eba08300799f64027af11ef922283f" },
"indent-blankline.nvim": { "branch": "master", "commit": "e7a4442e055ec953311e77791546238d1eaae507" },
"indent-rainbowline.nvim": { "branch": "master", "commit": "4977a9735583f13d5c1114f373342745dd35b3b4" },
"kubernetes.nvim": { "branch": "main", "commit": "101e63f8f92b2ae9cf6a78560bc2b2321d1264af" },
"lazy.nvim": { "branch": "main", "commit": "460e1cd8f24e364d54543a4b0e83f6f4ec1f65fb" },
"lazy.nvim": { "branch": "main", "commit": "1159bdccd8910a0fd0914b24d6c3d186689023d9" },
"lsp_signature.nvim": { "branch": "master", "commit": "fc38521ea4d9ec8dbd4c2819ba8126cea743943b" },
"mason-lspconfig.nvim": { "branch": "main", "commit": "25c11854aa25558ee6c03432edfa0df0217324be" },
"mason.nvim": { "branch": "main", "commit": "e2f7f9044ec30067bc11800a9e266664b88cda22" },
"nvim-autopairs": { "branch": "master", "commit": "f158dcb865c36f72c92358f87787dab2c272eaf3" },
"nvim-autopairs": { "branch": "master", "commit": "ee297f215e95a60b01fde33275cc3c820eddeebe" },
"nvim-cmp": { "branch": "main", "commit": "ae644feb7b67bf1ce4260c231d1d4300b19c6f30" },
"nvim-lspconfig": { "branch": "master", "commit": "aaec5d049af99e7b975bdf68aca582f1aa91b338" },
"nvim-notify": { "branch": "master", "commit": "d333b6f167900f6d9d42a59005d82919830626bf" },
"nvim-treesitter": { "branch": "master", "commit": "2b2ac302324f4cd2194c5dcaa59332b65e1c6403" },
"nvim-lspconfig": { "branch": "master", "commit": "d141895d1d9f41048fff201d62a2d6e96d299e32" },
"nvim-notify": { "branch": "master", "commit": "fbef5d32be8466dd76544a257d3f3dce20082a07" },
"nvim-treesitter": { "branch": "master", "commit": "5a2ff8b7ca5470b1011ed82ef3fdd53139ffc467" },
"nvim-ufo": { "branch": "main", "commit": "203c9f434feec57909ab4b1e028abeb3349b7847" },
"org-bullets.nvim": { "branch": "main", "commit": "46ae687e22192fb806b5977d664ec98af9cf74f6" },
"orgmode": { "branch": "master", "commit": "a006c9318132d51d3b7058cf2a8cc557c7fa4f22" },
"orgmode": { "branch": "master", "commit": "a5aeb14663ef08e0bb4bb847f8d79f9c253094a0" },
"plenary.nvim": { "branch": "master", "commit": "2d9b06177a975543726ce5c73fca176cedbffe9d" },
"promise-async": { "branch": "main", "commit": "119e8961014c9bfaf1487bf3c2a393d254f337e2" },
"rainbow-delimiters.nvim": { "branch": "master", "commit": "5f73b24aeb94f5274c218955573153c69ce4d1ee" },
"schemastore.nvim": { "branch": "main", "commit": "6e2af7e093a4b92e3dce014aa1cbb5449ad1ebf9" },
"telescope.nvim": { "branch": "master", "commit": "b5fd7f7ae0ea4537511077ed8ef4a6021cedba2f" },
"rainbow-delimiters.nvim": { "branch": "master", "commit": "d227e6c9879bb50af35cd733461198666981d482" },
"schemastore.nvim": { "branch": "main", "commit": "6f86b595c24ba3d6d1de23e219bf3be6131aa617" },
"telescope.nvim": { "branch": "master", "commit": "df534c3042572fb958586facd02841e10186707c" },
"tiny-inline-diagnostic.nvim": { "branch": "main", "commit": "1a83e7ce5c9d0ae4d89fc5c812b55ff8ed1d39e7" },
"tokyonight.nvim": { "branch": "main", "commit": "817bb6ffff1b9ce72cdd45d9fcfa8c9cd1ad3839" },
"trouble.nvim": { "branch": "main", "commit": "6efc446226679fda0547c0fd6a7892fd5f5b15d8" },
"which-key.nvim": { "branch": "main", "commit": "fb070344402cfc662299d9914f5546d840a22126" },
"tokyonight.nvim": { "branch": "main", "commit": "2c85fad417170d4572ead7bf9fdd706057bd73d7" },
"trouble.nvim": { "branch": "main", "commit": "254145ffd528b98eb20be894338e2d5c93fa02c2" },
"which-key.nvim": { "branch": "main", "commit": "8badb359f7ab8711e2575ef75dfe6fbbd87e4821" },
"yaml-companion.nvim": { "branch": "main", "commit": "131b0d67bd2e0f1a02e0daf2f3460482221ce3c0" }
}

View File

@@ -1,35 +0,0 @@
#+TITLE: Kuberhazard
* Let's-a Go!
** Clone repo
#+BEGIN_SRC shell
git clone git@github.com:JJGadgets/Biohazard.git && cd ./Biohazard/kube
#+END_SRC
** Install Kubernetes
+ Use Talos Linux to install, the install script & patches are in `0-install` folder.
#+BEGIN_SRC shell
cd ./0-install && chmod +x ./talosInstall.sh && ./talosInstall.sh
#+END_SRC
+ Or use Sidero, whatever floats your boat.
** Install & Bootstrap Flux
Run `kubectl apply` twice due to CRD race conditions, and applying the same manifests multiple times won't duplicate deployed resources anyway so it's probably a good idea.
(TODO: add SOPS steps after deploying it)
#+BEGIN_SRC shell
cd .. && kubectl apply -k ./1-bootstrap/flux/flux-system && kubectl apply -k ./1-bootstrap/flux/flux-system
#+END_SRC
* Hardware & Network Grid
| Name | OS | Role | CPU | RAM | Storage | VLAN | IP |
| thunder | Proxmox | Hypervisor (R730xd) | 20C40T 2xE5-2660v3 | 64GB DDR4 + 32GB zstd zram | ZFS Mirror 2xSN550 1TB + ZFS RAID10 HDDs | LAN | masked |
| pve2 | Proxmox | Hypervisor (Supermicro Dual) | 16C32T 2xE5-2670v1 | 64GB DDR3 + 32GB zstd zram | ZFS 1x870Evo 1TB + ext4 1TB HDD boot | LAN | masked |
| pve2 | Proxmox | Hypervisor (Supermicro Single) | 8C16T 1xE5-2670v1 | 64GB DDR3 + 32GB zstd zram | ZFS Mirror 2x500GB HDDs | LAN | masked |
| Blackhawk | Void | kubectl client | 8C16T Ryzen 4750U | 48GB SODIMM | ext4 SN520 512GB OS + ZFS 1xSN550 1TB /home | LAN/JJ | DHCP |
| OPNsense | FreeBSD | Firewall Router (VM) | vCPU E5-2660v3 | 8GB VM | 40GB zvol boot | LAN/58 | masked |
| Sidero | Alpine | Talos Manager (inactive) | vCPU E5-2660v3 | 6GB VM | 50GB zvol boot | 58 | 172.27.27.28 |
| cp-vip | Talos | Control Plane VIP | - | - | - | 58 | 172.27.27.27 |
| kube-pve-master1 | Talos | Untainted Master Node 1 | 6C vCPU E5-2660v3 | 12GB VM | 100GB qcow2 boot + 100GB qcow2 Ceph | 58 | 172.27.27.18 |
| kube-pve-master2 | Talos | Untainted Master Node 2 | 6C vCPU E5-2670v1 | 10GB VM | 100GB qcow2 boot + 100GB qcow2 Ceph | 58 | 172.27.27.19 |
| kube-pve-master3 | Talos | Control Plane & Master 3 | 6C vCPU E5-2660v3 | 4GB VM | 100GB qcow2 boot | 58 | 172.27.27.20 |
| kube-pve-worker3 | Talos | Worker Node 3 | 6C vCPU E5-2660v3 | 12GB VM | 100GB qcow2 boot + 100GB qcow2 Ceph | 58 | 172.27.27.23 |
| kube-pve-worker1 | Talos | Worker Node 1 (Inactive, no RAM) | 6C vCPU E5-2660v3 | 6GB VM | 100GB qcow2 boot | 58 | 172.27.27.21 |
| kube-pve-worker2 | Talos | Worker Node 2 (Inactive, no RAM) | 6C vCPU E5-2670v2 | 6GB VM | 100GB qcow2 boot | 58 | 172.27.27.22 |

View File

@@ -119,8 +119,8 @@ spec:
controller: ${APPNAME}
type: LoadBalancer
annotations:
coredns.io/hostname: "${APP_DNS_APPNAME}"
"io.cilium/lb-ipam-ips": "${APP_IP_APPNAME}"
coredns.io/hostname: "${APP_DNS_APPNAME:=${APPNAME}}"
"io.cilium/lb-ipam-ips": "${APP_IP_APPNAME:=127.0.0.1}"
ports:
http:
port: 443
@@ -138,7 +138,7 @@ spec:
main:
className: nginx-internal
annotations:
external-dns.alpha.kubernetes.io/target: "${DNS_CF}"
external-dns.alpha.kubernetes.io/target: "${DNS_CF:=cf}"
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# https://github.com/kubernetes/ingress-nginx/issues/6728