feat: docs and taskfiles

This commit is contained in:
Toboshii Nakama
2021-06-01 04:42:07 -05:00
parent a696ca71a9
commit 044462cb7e
24 changed files with 968 additions and 0 deletions

72
.taskfiles/ansible.yml Normal file
View File

@@ -0,0 +1,72 @@
---
version: '3'
env:
ANSIBLE_CONFIG: "{{.PROJECT_DIR}}/server/ansible/ansible.cfg"
vars:
ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks"
ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory"
ANSIBLE_MASTER_GROUP: "master-nodes"
ANSIBLE_WORKER_GROUP: "worker-nodes"
tasks:
list:
desc: List all the k8s nodes
cmds:
- "ansible {{.ANSIBLE_MASTER_GROUP}}:{{.ANSIBLE_WORKER_GROUP}} -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml --list-hosts"
silent: true
ping:
desc: Ping all the k8s nodes
cmds:
- "ansible {{.ANSIBLE_MASTER_GROUP}}:{{.ANSIBLE_WORKER_GROUP}} -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml --one-line -m 'ping'"
silent: true
uptime:
desc: Uptime of all the k8s nodes
cmds:
- "ansible {{.ANSIBLE_MASTER_GROUP}}:{{.ANSIBLE_WORKER_GROUP}} -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml --one-line -a 'uptime -p'"
silent: true
clean-images:
desc: Clean up stale container images
cmds:
- "ansible {{.ANSIBLE_MASTER_GROUP}}:{{.ANSIBLE_WORKER_GROUP}} -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml --one-line -a 'k3s crictl rmi --prune' --become"
silent: true
purge-manifests:
desc: Delete all manifests under /var/lib/rancher/k3s/server/manifests
cmds:
- "ansible {{.ANSIBLE_MASTER_GROUP}} -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml --one-line -a 'rm /var/lib/rancher/k3s/server/manifests/*' --become"
playbook:ubuntu-prepare:
desc: Prepare all the k8s nodes for running k3s
cmds:
- "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/ubuntu/prepare.yml"
silent: true
playbook:ubuntu-upgrade:
desc: Upgrade all the k8s nodes operating system
cmds:
- "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/ubuntu/upgrade.yml"
silent: true
playbook:ubuntu-reboot:
desc: Reboot all the k8s nodes
cmds:
- "ansible {{.ANSIBLE_MASTER_GROUP}}:{{.ANSIBLE_WORKER_GROUP}} -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml -m reboot"
silent: true
playbook:k3s-install:
desc: Install k3s on the nodes
cmds:
- "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/k3s/install.yml"
silent: true
playbook:k3s-upgrade:
desc: Install k3s on the nodes
cmds:
- "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/home-cluster/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/k3s/upgrade.yml"
silent: true

20
.taskfiles/blocky.yml Normal file
View File

@@ -0,0 +1,20 @@
---
version: '3'
tasks:
enable:
desc: Enable adblocking in blocky
cmds:
- "{{.PROJECT_DIR}}/hack/blocky.sh enable"
preconditions:
- "test -f {{.PROJECT_DIR}}/hack/blocky.sh"
silent: true
disable:
desc: Disable adblocking in blocky
cmds:
- "{{.PROJECT_DIR}}/hack/blocky.sh disable"
preconditions:
- "test -f {{.PROJECT_DIR}}/hack/blocky.sh"
silent: true

10
.taskfiles/flux.yml Normal file
View File

@@ -0,0 +1,10 @@
---
version: '3'
tasks:
sync:
desc: Sync flux-system with the Git Repository
cmds:
- flux reconcile source git flux-system
silent: true

26
Taskfile.yml Normal file
View File

@@ -0,0 +1,26 @@
---
version: '3'
vars:
PROJECT_DIR:
sh: "git rev-parse --show-toplevel"
CLUSTER_DIR: "{{.PROJECT_DIR}}/cluster"
ANSIBLE_DIR: "{{.PROJECT_DIR}}/server/ansible"
env:
KUBECONFIG: "{{.PROJECT_DIR}}/kubeconfig"
includes:
ansible: .taskfiles/ansible.yml
blocky: .taskfiles/blocky.yml
flux: .taskfiles/flux.yml
tasks:
kubeconfig:
desc: Remotely fetch kubeconfig from k3s
cmds:
- rsync --verbose --progress --partial --rsync-path="sudo rsync" ubuntu@10.75.40.10:/etc/rancher/k3s/k3s.yaml ./kubeconfig
- sed -i '' 's/127.0.0.1/10.75.45.5/g' ./kubeconfig
- chmod go-r kubeconfig
silent: true

Binary file not shown.

After

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 176 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

97
docs/_static/custom.css vendored Normal file
View File

@@ -0,0 +1,97 @@
@import url("https://fonts.googleapis.com/css?family=Source+Sans+Pro&display=swap");
body {
font-family: "SourceSansPro", sans-serif;
}
.md-logo {
width: 40px;
height: 40px;
padding-bottom: 2px;
padding-top: 2px;
}
.md-logo img {
width: 40px;
height: 40px;
}
.md-header, .md-footer-nav {
background-image: linear-gradient(45deg, #B284BE 0%, #9A6DBE 24%, #775BBE 53%, #734eac 78%, #66419e 100%);
}
.md-header-nav__title {
font-size: .85rem;
}
.check-bullet {
color:#07bfa5;
background-color: white;
margin-left:-22px;
}
/* Progress bar styling */
.progress-label {
position: absolute;
text-align: center;
font-weight: 700;
width: 100%;
/* remove original styling for thin styling
margin: 0 ! important; */
margin-top: -0.4rem ! important;
line-height: 1.2rem;
white-space: nowrap;
overflow: hidden;
}
.progress-bar {
/*remove original styling for thin styling
height: 1.2rem; */
height: 0.4rem;
float: left;
background: repeating-linear-gradient(
45deg,
rgba(255, 255, 255, 0.2),
rgba(255, 255, 255, 0.2) 10px,
rgba(255, 255, 255, 0.3) 10px,
rgba(255, 255, 255, 0.3) 20px
) #2979ff;
border-radius: 2px;
}
.progress {
display: block;
width: 100%;
/* remove original styling for thin styling
margin: 0.5rem 0;
height: 1.2rem; */
margin-top: 0.9rem;
height: 0.4rem;
background-color: #eeeeee;
position: relative;
border-radius: 2px;
}
.progress-100plus .progress-bar {
background-color: #00c853;
}
.progress-80plus .progress-bar {
background-color: #64dd17;
}
.progress-60plus .progress-bar {
background-color: #fbc02d;
}
.progress-40plus .progress-bar {
background-color: #ff9100;
}
.progress-20plus .progress-bar {
background-color: #ff5252;
}
.progress-0plus .progress-bar {
background-color: #ff1744;
}

62
docs/calico-metrics.md Normal file
View File

@@ -0,0 +1,62 @@
# calico metrics
## calico-node
```sh
calicoctl patch felixConfiguration default --patch '{"spec":{"prometheusMetricsEnabled": true}}'
kubectl -n calico-system edit ds calico-node
```
Under `spec.template.spec.containers`:
```yaml
# ...
ports:
- containerPort: 9091
name: http-metrics
protocol: TCP
# ...
```
## calico-typha
```sh
kubectl -n calico-system edit deployment calico-typha
```
Under `spec.template.spec.containers`:
```yaml
# ...
- env:
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "9092"
# ...
ports:
- containerPort: 9092
name: http-metrics
protocol: TCP
# ...
```
## calico-kube-controllers
This is not working I am unable to patch `kubecontrollersconfiguration` with the prometheus port
```sh
calicoctl patch kubecontrollersconfiguration default --patch '{"spec":{"prometheusMetricsPort": 9094}}'
kubectl -n calico-system edit deployment calico-kube-controllers
```
Under `spec.template.spec.containers`:
```yaml
# ...
ports:
- containerPort: 9094
name: http-metrics
protocol: TCP
# ...
```

21
docs/external-secrets.md Normal file
View File

@@ -0,0 +1,21 @@
# External Secrets
!!! note "Work in progress"
This document is a work in progress.
## Create secret for External Secrets using AWS Secrets Manager
```sh
kubectl create secret generic aws-credentials \
--from-literal=id="access-key-id" \
--from-literal=key="access-secret-key" \
--namespace kube-system
```
## Create a secret using aws-cli
```sh
aws secretsmanager create-secret \
--name namespace/secret-name \
--secret-string "secret-data"
```

57
docs/flux.md Normal file
View File

@@ -0,0 +1,57 @@
# Flux
!!! note "Work in progress"
This document is a work in progress.
## Install the CLI tool
```sh
brew install fluxcd/tap/flux
```
## Install the cluster components
_For full installation guide visit the [Flux installation guide](https://toolkit.fluxcd.io/guides/installation/)_
Check if you cluster is ready for Flux
```sh
flux check --pre
```
Install Flux into your cluster
```sh
set -x GITHUB_TOKEN xyz;
flux bootstrap github \
--version=v0.12.1 \
--owner=onedr0p \
--repository=home-cluster \
--path=cluster/base \
--personal \
--private=false \
--network-policy=false
```
**Note**: When using k3s I found that the network-policy flag has to be set to false, or Flux will not work
## Useful commands
Force flux to sync your repository:
```sh
flux reconcile source git flux-system
```
Force flux to sync a helm release:
```sh
flux reconcile helmrelease sonarr -n default
```
Force flux to sync a helm repository:
```sh
flux reconcile source helm ingress-nginx-charts -n flux-system
```

3
docs/index.md Normal file
View File

@@ -0,0 +1,3 @@
# Home Cluster
Welcome to the docs on my home Kubernetes cluster.

4
docs/opnsense/bgp.md Normal file
View File

@@ -0,0 +1,4 @@
# Opnsense | BGP
!!! note "Work in progress"
This document is a work in progress.

23
docs/opnsense/pxe.md Normal file
View File

@@ -0,0 +1,23 @@
# Opnsense | PXE
!!! note "Work in progress"
This document is a work in progress.
## Setting up TFTP
- Enable `dnsmasq` in the Opnsense services settings (set port to `63`)
- Copy over `pxe.conf` to `/usr/local/etc/dnsmasq.conf.d/pxe.conf`
- SSH into opnsense and run the following commands...
```console
$ mkdir -p /var/lib/tftpboot/pxelinux/
$ wget https://releases.ubuntu.com/20.04/ubuntu-20.04.2-live-server-amd64.iso -O /var/lib/tftpboot/ubuntu-20.04.2-live-server-amd64.iso
$ mount -t cd9660 /dev/`mdconfig -f /var/lib/tftpboot/ubuntu-20.04.2-live-server-amd64.iso` /mnt
$ cp /mnt/casper/vmlinuz /var/lib/tftpboot/pxelinux/
$ cp /mnt/casper/initrd /var/lib/tftpboot/pxelinux/
$ umount /mnt
$ wget http://archive.ubuntu.com/ubuntu/dists/focal/main/uefi/grub2-amd64/current/grubnetx64.efi.signed -O /var/lib/tftpboot/pxelinux/pxelinux.0
```
- Copy `grub/grub.conf` into `/var/lib/tftpboot/grub/grub.conf`
- Copy `nodes/` into `/var/lib/tftpboot/nodes`

115
docs/restore.md Normal file
View File

@@ -0,0 +1,115 @@
# Restoring after a cluster failure or rebuild
## Restoring Flux state
### 1. Locate cluster GPG key
```sh
export GPG_TTY=$(tty)
export FLUX_KEY_NAME="56k prod cluster (Flux) <email>"
gpg --list-secret-keys "${FLUX_KEY_NAME}"
# pub rsa4096 2021-03-11 [SC]
# 772154FFF783DE317KLCA0EC77149AC618D75581
# uid [ultimate] 56k prod cluster (Flux) <email>
# sub rsa4096 2021-03-11 [E]
export FLUX_KEY_FP=772154FFF783DE317KLCA0EC77149AC618D75581
```
### 2. Verify cluster is ready for Flux
```sh
flux --kubeconfig=./kubeconfig check --pre
# ► checking prerequisites
# ✔ kubectl 1.21.0 >=1.18.0-0
# ✔ Kubernetes 1.20.5+k3s1 >=1.16.0-0
# ✔ prerequisites checks passed
```
### 3. Pre-create the `flux-system` namespace
```sh
kubectl --kubeconfig=./kubeconfig create namespace flux-system --dry-run=client -o yaml | kubectl --kubeconfig=./kubeconfig apply -f -
```
### 4. Add the Flux GPG key in-order for Flux to decrypt SOPS secrets
```sh
gpg --export-secret-keys --armor "${FLUX_KEY_FP}" |
kubectl --kubeconfig=./kubeconfig create secret generic sops-gpg \
--namespace=flux-system \
--from-file=sops.asc=/dev/stdin
```
### 5. Install Flux
!!! warning "Due to race conditions with the Flux CRDs you will have to run the below command twice. There should be no errors on this second run."
```sh
kubectl --kubeconfig=./kubeconfig apply --kustomize=./cluster/base/flux-system
# namespace/flux-system configured
# customresourcedefinition.apiextensions.k8s.io/alerts.notification.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/buckets.source.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/gitrepositories.source.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/helmcharts.source.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/helmreleases.helm.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/helmrepositories.source.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/kustomizations.kustomize.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/providers.notification.toolkit.fluxcd.io created
# customresourcedefinition.apiextensions.k8s.io/receivers.notification.toolkit.fluxcd.io created
# serviceaccount/helm-controller created
# serviceaccount/kustomize-controller created
# serviceaccount/notification-controller created
# serviceaccount/source-controller created
# clusterrole.rbac.authorization.k8s.io/crd-controller-flux-system created
# clusterrolebinding.rbac.authorization.k8s.io/cluster-reconciler-flux-system created
# clusterrolebinding.rbac.authorization.k8s.io/crd-controller-flux-system created
# service/notification-controller created
# service/source-controller created
# service/webhook-receiver created
# deployment.apps/helm-controller created
# deployment.apps/kustomize-controller created
# deployment.apps/notification-controller created
# deployment.apps/source-controller created
# unable to recognize "./cluster/base/flux-system": no matches for kind "Kustomization" in version "kustomize.toolkit.fluxcd.io/v1beta1"
# unable to recognize "./cluster/base/flux-system": no matches for kind "GitRepository" in version "source.toolkit.fluxcd.io/v1beta1"
# unable to recognize "./cluster/base/flux-system": no matches for kind "HelmRepository" in version "source.toolkit.fluxcd.io/v1beta1"
# unable to recognize "./cluster/base/flux-system": no matches for kind "HelmRepository" in version "source.toolkit.fluxcd.io/v1beta1"
# unable to recognize "./cluster/base/flux-system": no matches for kind "HelmRepository" in version "source.toolkit.fluxcd.io/v1beta1"
# unable to recognize "./cluster/base/flux-system": no matches for kind "HelmRepository" in version "source.toolkit.fluxcd.io/v1beta1"
```
:tada: at this point after reconciliation Flux state should be restored.
## Restoring PVCs using Kasten
Recovering from a K10 backup involves the following sequence of actions:
### 1. Create a Kubernetes Secret, k10-dr-secret, using the passphrase provided while enabling DR
```sh
kubectl create secret generic k10-dr-secret \
--namespace kasten-io \
--from-literal key=<passphrase>
```
### 2. Install a fresh K10 instance
!!! info "Ensure that Flux has correctly deployed K10 to it's namespace `kasten-io`"
### 3. Provide bucket information and credentials for the object storage location
!!! info "Ensure that Flux has correctly deployed the `minio` storage profile and that it's accessible within K10"
### 4. Restoring the K10 backup
Install the helm chart that creates the K10 restore job and wait for completion of the `k10-restore` job
```sh
helm install k10-restore kasten/k10restore --namespace=kasten-io \
--set sourceClusterID=<source-clusterID> \
--set profile.name=<location-profile-name>
```
### 5. Application recovery
Upon completion of the DR Restore job, go to the Applications card, select `Removed` under the `Filter by status` drop-down menu.
Click restore under the application and select a restore point to recover from.
![Kasten remove applications](./_files/kasten_removed_applications.png)

View File

@@ -0,0 +1,143 @@
# Rook-Ceph Maintenance
!!! note "Work in progress"
This document is a work in progress.
## Accessing volumes
Sometimes I am required to access the data in the `pvc`, below is an example on how I access the `pvc` data for my `zigbee2mqtt` deployment.
First start by scaling the app deployment to 0 replicas:
```sh
kubectl scale deploy/zigbee2mqtt --replicas 0 -n home
```
Get the `rbd` image name for the app:
```sh
kubectl get pv/(kubectl get pv | grep plex-config-v1 | awk -F' ' '{print $1}') -n home -o json | jq -r '.spec.csi.volumeAttributes.imageName'
```
Exec into the `rook-direct-mount` toolbox:
```sh
kubectl -n rook-ceph exec -it (kubectl -n rook-ceph get pod -l "app=rook-direct-mount" -o jsonpath='{.items[0].metadata.name}') bash
```
Create a directory to mount the volume to:
```sh
mkdir -p /mnt/data
```
!!! hint "Mounting a NFS share"
This can be useful if you want to move data from or to a `nfs` share
Create a directory to mount the `nfs` share to:
```sh
mkdir -p /mnt/nfsdata
```
Mount the `nfs` share:
```sh
mount -t nfs -o "tcp,intr,rw,noatime,nodiratime,rsize=65536,wsize=65536,hard" 192.168.42.50:/volume1/Data /mnt/nfs
```
List all the `rbd` block device names:
```sh
rbd list --pool replicapool
```
Map the `rbd` block device to a `/dev/rbdX` device:
```sh
rbd map -p replicapool csi-vol-9a010830-8b0a-11eb-b291-6aaa17155076
```
Mount the `/dev/rbdX` device:
```sh
mount /dev/rbdX /mnt/data
```
At this point you'll be able to access the volume data under `/mnt/data`, you can change files in any way.
!!! hint "Backing up or restoring data from a NFS share"
Restoring data:
```sh
rm -rf /mnt/data/*
tar xvf /mnt/nfsdata/backups/zigbee2mqtt.tar.gz -C /mnt/data
chown -R 568:568 /mnt/data/
```
Backing up data:
```sh
tar czvf /mnt/nfsdata/backups/zigbee2mqtt.tar.gz -C /mnt/data/ .
```
When done you can unmount `/mnt/data` and unmap the `rbd` device:
```sh
umount /mnt/data
rbd unmap -p replicapool csi-vol-9a010830-8b0a-11eb-b291-6aaa17155076
```
Lastly you need to scale the deployment replicas back up to 1:
```sh
kubectl scale deploy/zigbee2mqtt --replicas 1 -n home
```
## Handling crashes
Sometimes rook-ceph will report a `HEALTH_WARN` even when the health is fine, in order to get ceph to report back healthy do the following...
```sh
# list all the crashes
ceph crash ls
# if you want to read the message
ceph crash info <id>
# archive crash report
ceph crash archive <id>
# or, archive all crash reports
ceph crash archive-all
```
## Helpful links
* [Common issues](https://rook.io/docs/rook/v1.5/ceph-common-issues.html)
```
kubectl -n rook-ceph exec -it (kubectl -n rook-ceph get pod -l "app=rook-direct-mount" -o jsonpath='{.items[0].metadata.name}') bash
mount -t nfs -o "tcp,intr,rw,noatime,nodiratime,rsize=65536,wsize=65536,hard" 192.168.42.50:/volume1/Data /mnt/nfs
kubectl get pv/(kubectl get pv \
| grep "tautulli" \
| awk -F' ' '{print $1}') -n media -o json \
| jq -r '.spec.csi.volumeAttributes.imageName'
rbd map -p replicapool csi-vol-2bd198a6-9a7d-11eb-ae97-9a71104156fa \
| xargs -I{} mount {} /mnt/data
rm -rf /mnt/data/*
tar xvf /mnt/nfs/backups/tautulli.tar.gz -C /mnt/data
# chown -R 568:568 /mnt/data/
umount /mnt/data && \
rbd unmap -p replicapool csi-vol-2bd198a6-9a7d-11eb-ae97-9a71104156fa
```
```
ceph mgr module enable rook
ceph orch set backend rook
ceph dashboard set-alertmanager-api-host http://kube-prometheus-stack-alertmanager.monitoring.svc:9093
ceph dashboard set-prometheus-api-host http://kube-prometheus-stack-prometheus.monitoring.svc:9090
```

View File

@@ -0,0 +1,28 @@
k scale deployment/frigate -n home --replicas 0
kubectl get pv/(kubectl get pv | grep "frigate-config[[:space:]+]" | awk -F' ' '{print $1}') -n home -o json | jq -r '.spec.csi.volumeAttributes.imageName'
# csi-vol-e210e08c-80f5-11eb-bb77-f25ddf8c8685
kubectl get pv/(kubectl get pv | grep "frigate-config-v1[[:space:]+]" | awk -F' ' '{print $1}') -n home -o json | jq -r '.spec.csi.volumeAttributes.imageName'
# csi-vol-de945d8c-8fc2-11eb-b291-6aaa17155076
## Toolbox
Access rook direct mount
```sh
kubectl -n rook-ceph exec -it (kubectl -n rook-ceph get pod -l "app=rook-direct-mount" -o jsonpath='{.items[0].metadata.name}') bash
```
```sh
mkdir -p /mnt/{old,new}
rbd map -p replicapool csi-vol-e210e08c-80f5-11eb-bb77-f25ddf8c8685 | xargs -I{} mount {} /mnt/old
rbd map -p replicapool csi-vol-de945d8c-8fc2-11eb-b291-6aaa17155076 | xargs -0 -I{} sh -c 'mkfs.ext4 {}; mount {} /mnt/new'
cp -rp /mnt/old/. /mnt/new
chown -R 568:568 /mnt/new
umount /mnt/old
umount /mnt/new
rbd unmap -p replicapool csi-vol-e210e08c-80f5-11eb-bb77-f25ddf8c8685 && \
rbd unmap -p replicapool csi-vol-de945d8c-8fc2-11eb-b291-6aaa17155076
```

43
docs/sealed-secrets.md Normal file
View File

@@ -0,0 +1,43 @@
# Sealed Secrets
!!! note "Work in progress"
This document is a work in progress.
## Install the CLI tool
```sh
brew install kubeseal
```
## Install the cluster components
```yaml
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: sealed-secrets
namespace: kube-system
spec:
interval: 5m
chart:
spec:
chart: sealed-secrets
version: 1.13.2
sourceRef:
kind: HelmRepository
name: sealed-secrets-charts
namespace: flux-system
interval: 5m
values:
ingress:
enabled: false
```
## Fetch the Sealed Secrets public certificate
```sh
kubeseal \
--controller-name sealed-secrets \
--fetch-cert > ./sealed-secrets-public-cert.pem
```

92
docs/snmp-exporter.md Normal file
View File

@@ -0,0 +1,92 @@
# SNMP Exporter
!!! note "Work in progress"
This document is a work in progress.
I am using [snmp-exporter](https://github.com/prometheus/snmp_exporter) for getting metrics from my Cyberpower PDUs (`PDU41001`) and my APC UPS (`Smart-UPS 1500`) into Prometheus
## Clone and build the snmp-exporter generator
```sh
sudo apt-get install unzip build-essential libsnmp-dev golang
go get github.com/prometheus/snmp_exporter/generator
cd ${GOPATH-$HOME/go}/src/github.com/prometheus/snmp_exporter/generator
go build
make mibs
```
## Update generator.yml
!!! hint "Dealing with configmaps"
Kubernetes `configmap`'s have a max size. I needed to strip out all the other modules.
```yaml
modules:
apcups:
version: 1
walk:
- sysUpTime
- interfaces
- 1.3.6.1.4.1.318.1.1.1.2 # upsBattery
- 1.3.6.1.4.1.318.1.1.1.3 # upsInput
- 1.3.6.1.4.1.318.1.1.1.4 # upsOutput
- 1.3.6.1.4.1.318.1.1.1.7.2 # upsAdvTest
- 1.3.6.1.4.1.318.1.1.1.8.1 # upsCommStatus
- 1.3.6.1.4.1.318.1.1.1.12 # upsOutletGroups
- 1.3.6.1.4.1.318.1.1.10.2.3.2 # iemStatusProbesTable
- 1.3.6.1.4.1.318.1.1.26.8.3 # rPDU2BankStatusTable
lookups:
- source_indexes: [upsOutletGroupStatusIndex]
lookup: upsOutletGroupStatusName
drop_source_indexes: true
- source_indexes: [iemStatusProbeIndex]
lookup: iemStatusProbeName
drop_source_indexes: true
overrides:
ifType:
type: EnumAsInfo
rPDU2BankStatusLoadState:
type: EnumAsStateSet
upsAdvBatteryCondition:
type: EnumAsStateSet
upsAdvBatteryChargingCurrentRestricted:
type: EnumAsStateSet
upsAdvBatteryChargerStatus:
type: EnumAsStateSet
cyberpower:
version: 1
walk:
- ePDUIdentName
- ePDUIdentHardwareRev
- ePDUStatusInputVoltage ## input voltage (0.1 volts)
- ePDUStatusInputFrequency ## input frequency (0.1 Hertz)
- ePDULoadStatusLoad ## load (tenths of Amps)
- ePDULoadStatusVoltage ## voltage (0.1 volts)
- ePDULoadStatusActivePower ## active power (watts)
- ePDULoadStatusApparentPower ## apparent power (VA)
- ePDULoadStatusPowerFactor ## power factor of the output (hundredths)
- ePDULoadStatusEnergy ## apparent power measured (0.1 kw/h).
- ePDUOutletControlOutletName ## The name of the outlet.
- ePDUOutletStatusLoad ## Outlet load (tenths of Amps)
- ePDUOutletStatusActivePower ## Outlet load (watts)
- envirTemperature ## temp expressed (1/10 ºF)
- envirTemperatureCelsius ## temp expressed (1/10 ºF)
- envirHumidity ## relative humidity (%)
```
## Get the Cyberpower MIB
```sh
wget https://dl4jz3rbrsfum.cloudfront.net/software/CyberPower_MIB_v2.9.MIB.zip
unzip CyberPower_MIB_v2.9.MIB.zip
mv CyberPower_MIB_v2.9.MIB mibs/
```
## Generate the snmp.yml
This will create a `snmp.yml` file which will be needed for the `configmap` for the `snmp-exporter` deployment
```sh
export MIBDIRS=mibs
./generator generate
```

25
hack/blocky.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
ACTION="${1}"
DURATION="${2}"
NAMESPACE="networking"
BLOCKY_PODS=$(kubectl get pods -n "${NAMESPACE}" -o=jsonpath="{range .items[*]}{.metadata.name} " -l app.kubernetes.io/name=blocky)
for pod in $BLOCKY_PODS; do
case "${ACTION}" in
status)
kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking status
;;
enable)
kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking enable
;;
disable)
if [ -z "${DURATION}" ]; then
kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking disable
else
kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking disable --duration "${DURATION}"
fi
;;
esac
done

10
hack/playlist.sh Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
while true; do
figlet -f slant "media" | lolcat
kubecolor get po -n media; sleep 5; clear;
figlet -f slant "home" | lolcat
kubecolor get po -n home; sleep 5; clear;
figlet -f slant "nodes" | lolcat
kubecolor get nodes; sleep 5; clear;
done

117
mkdocs.yml Normal file
View File

@@ -0,0 +1,117 @@
---
site_name: Toboshii | Home Cluster
site_description: My home Kubernetes (k3s) cluster managed by GitOps (Flux2)
site_author: Toboshii Nakama
site_url: https://toboshii.github.io/home-cluster/
repo_name: toboshii/home-cluster
repo_url: https://github.com/toboshii/home-cluster
edit_uri: "edit/main/docs/"
theme:
name: material
icon:
repo: fontawesome/brands/github-alt
language: en
features:
- navigation.sections
# - navigation.tabs
- navigation.tracking
# - navigation.indexes
- search.highlight
- search.share
- search.suggest
palette:
- media: "(prefers-color-scheme: light)"
scheme: default
primary: blue
accent: indigo
toggle:
icon: material/weather-sunny
name: Switch to dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
primary: blue
accent: indigo
toggle:
icon: material/weather-night
name: Switch to light mode
font:
text: Roboto
code: Roboto Mono
docs_dir: ./docs
extra_css:
- _static/custom.css
# Plugins
plugins:
- search
- minify:
minify_html: true
# Customization
extra:
social:
- icon: fontawesome/brands/github
link: https://github.com/toboshii
- icon: fontawesome/brands/discord
link: https://discord.gg/sTMX7Vh
- icon: fontawesome/brands/docker
link: https://hub.docker.com/u/toboshii
# Extensions
markdown_extensions:
- admonition
- abbr
- attr_list
- def_list
- footnotes
- meta
- md_in_html
- toc:
permalink: true
- pymdownx.arithmatex:
generic: true
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- pymdownx.critic
- pymdownx.details
- pymdownx.emoji:
emoji_index: !!python/name:materialx.emoji.twemoji
emoji_generator: !!python/name:materialx.emoji.to_svg
- pymdownx.highlight
- pymdownx.inlinehilite
- pymdownx.keys
- pymdownx.magiclink:
repo_url_shorthand: true
user: toboshii
repo: home-cluster
- pymdownx.mark
- pymdownx.smartsymbols
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid-experimental
format: !!python/name:pymdownx.superfences.fence_code_format
- pymdownx.tabbed
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.tilde
# Page tree
nav:
- Introduction: index.md
- Restore Process: restore.md
# - External Secrets: external-secrets.md
# - Flux: flux.md
# - Opnsense:
# - BGP: opnsense/bgp.md
# - PXE: opnsense/pxe.md
# - Rook-Ceph Maintenance: rook-ceph-maintenance.md
# - Sealed Secrets: sealed-secrets.md
# - Snmp Exporter: snmp-exporter.md
# - Velero: velero.md