chore(kanidm): app-template v2, cleanup

This commit is contained in:
JJGadgets
2024-02-09 14:41:24 +08:00
parent 4373dc2585
commit 36d6707d8f
9 changed files with 153 additions and 250 deletions

View File

@@ -5,33 +5,66 @@ metadata:
name: &app kanidm
namespace: *app
spec:
interval: 5m
chart:
spec:
chart: app-template
version: 1.5.1
version: "2.5.0"
sourceRef:
name: bjw-s
kind: HelmRepository
namespace: flux-system
values:
automountServiceAccountToken: false
controller:
type: statefulset
image:
repository: docker.io/kanidm/server
tag: latest@sha256:c10a2938d3a8c15169a3ed2f6d08d25430d22cef3d5749d57ab3a9052d60354c
env:
TZ: "${CONFIG_TZ}"
controllers:
main:
type: deployment
replicas: 1
pod:
labels:
ingress.home.arpa/world: "allow"
ingress.home.arpa/cluster: "allow"
containers:
main:
image: &img
repository: "docker.io/kanidm/server"
tag: "1.1.0-rc.16@sha256:3d676ffb429f8367e01b4b9a9e8e88410ee4684891446fc9ab2b2bf2481e31c6"
env:
TZ: "${CONFIG_TZ}"
securityContext: &sc
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
resources:
requests:
cpu: "10m"
memory: "128Mi"
limits:
cpu: "3000m"
memory: "6000Mi"
initContainers:
01-init-kanidm-admin-password:
command:
- /bin/sh
- -c
- "[ -s /data/kanidm.db ] || /sbin/kanidmd recover_account -c /data/server.toml admin"
image: *img
securityContext: *sc
resources:
requests:
cpu: "10m"
limits:
memory: "150Mi"
service:
main:
enabled: true
primary: true
controller: main
type: LoadBalancer
externalTrafficPolicy: Local
externalTrafficPolicy: Cluster
annotations:
coredns.io/hostname: "${APP_DNS_KANIDM}"
"io.cilium/lb-ipam-ips": "${APP_IP_KANIDM}"
externalIPs:
- "${APP_IP_KANIDM}"
ports:
http:
enabled: true
@@ -48,74 +81,37 @@ spec:
port: 636
targetPort: 3636
protocol: UDP
ingress:
main:
enabled: true
primary: true
ingressClassName: "nginx-internal"
annotations:
external-dns.alpha.kubernetes.io/target: "${DNS_SHORT_CF}"
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# https://github.com/kubernetes/ingress-nginx/issues/6728
nginx.ingress.kubernetes.io/server-snippet: |
proxy_ssl_name ${APP_DNS_KANIDM};
proxy_ssl_server_name on;
large_client_header_buffers 4 8k;
client_header_buffer_size 8k;
# without header buffer size, will get following errors due to hardening ingress-nginx number of header buffers to 2 and header buffer size to 1k:
# HTTP1.1 /v1/auth/valid: 400 Request Header Or Cookie Too Large
# HTTP2 /v1/auth/valid: HTTP/2 stream was not closed cleanly before end of the underlying stream
hosts:
- host: &host "${APP_DNS_KANIDM}"
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- *host
# dnsConfig:
# options:
# - name: ndots
# value: "1"
podSecurityContext:
runAsUser: &uid ${APP_UID_KANIDM}
runAsGroup: *uid
fsGroup: *uid
fsGroupChangePolicy: Always
volumeClaimTemplates:
- name: data
mountPath: /data
accessMode: ReadWriteOnce
size: 20Gi
storageClass: block
- name: backup
mountPath: /backup
accessMode: ReadWriteOnce
size: 20Gi
storageClass: block
persistence:
config:
enabled: true
type: configMap
name: kanidm-config
subPath: server.toml
mountPath: /data/server.toml
readOnly: true
tls-fullchain:
name: "kanidm-config"
advancedMounts:
main:
main:
- subPath: "server.toml"
path: "/data/server.toml"
readOnly: true
data:
enabled: true
existingClaim: "kanidm-data"
globalMounts:
- subPath: "data"
path: "/data"
tls:
enabled: true
type: secret
name: kanidm-tls
subPath: tls.crt
mountPath: /tls/fullchain.pem
readOnly: true
tls-privkey:
enabled: true
type: secret
name: kanidm-tls
subPath: tls.key
mountPath: /tls/privkey.pem
readOnly: true
name: "short-domain-tls"
defaultMode: 0400
advancedMounts:
main:
main:
- subPath: "tls.crt"
path: "/tls/fullchain.pem"
readOnly: true
- subPath: "tls.key"
path: "/tls/privkey.pem"
readOnly: true
configMaps:
config:
enabled: true
@@ -132,26 +128,27 @@ spec:
trust_x_forward_for = true
db_path = "/data/kanidm.db"
db_fs_type = "other"
[online_backup]
path = "/backup/"
schedule = "0 0 22 * * * *"
versions = 7
resources:
requests:
cpu: 10m
memory: 128Mi
limits:
memory: 6000Mi
initContainers:
01-init-kanidm-admin-password:
command:
- /bin/sh
- -c
- '[ -s /data/kanidm.db ] || /sbin/kanidmd recover_account -c /data/server.toml admin'
image: docker.io/kanidm/server:latest@sha256:c10a2938d3a8c15169a3ed2f6d08d25430d22cef3d5749d57ab3a9052d60354c
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /data
name: data
- mountPath: /config
name: config
defaultPodOptions:
automountServiceAccountToken: false
enableServiceLinks: false
securityContext:
runAsNonRoot: true
runAsUser: &uid ${APP_UID_KANIDM}
runAsGroup: *uid
fsGroup: *uid
fsGroupChangePolicy: "Always"
seccompProfile: { type: "RuntimeDefault" }
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: "DoNotSchedule"
labelSelector:
matchLabels:
app.kubernetes.io/name: *app
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "fuckoff.home.arpa/kanidm"
operator: "DoesNotExist"

View File

@@ -1,73 +0,0 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/cilium.io/ciliumnetworkpolicy_v2.json
apiVersion: cilium.io/v2
kind: CiliumNetworkPolicy
metadata:
name: &app kanidm
namespace: *app
spec:
endpointSelector: {}
ingress:
# same namespace
- fromEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: *app
# ingress controller
- fromEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: ingress
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
toPorts:
- ports:
- port: "443"
protocol: TCP
- port: "8443"
protocol: TCP
# terminatingTLS:
# secret:
# name: "kanidm-tls"
# namespace: *app
# originatingTLS:
# secret:
# name: "kanidm-tls"
# namespace: *app
# rules:
# http:
# - {}
- ports:
- port: "636"
protocol: TCP
- port: "3636"
protocol: TCP
- fromCIDRSet:
- cidr: "${IP_ROUTER_LAN_CIDR}"
- cidr: "${IP_WG_USER_1_V4}"
- cidr: "${IP_WG_GUEST_V4}"
toPorts:
- ports:
- port: "443"
protocol: TCP
- port: "8443"
protocol: TCP
# terminatingTLS:
# secret:
# name: "kanidm-tls"
# namespace: *app
# originatingTLS:
# secret:
# name: "kanidm-tls"
# namespace: *app
# rules:
# http:
# - {}
- ports:
- port: "636"
protocol: TCP
- port: "3636"
protocol: TCP
egress:
# same namespace
- toEndpoints:
- matchLabels:
io.kubernetes.pod.namespace: *app

View File

@@ -1,36 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
name: kanidm-restic
namespace: kanidm
type: Opaque
stringData:
RESTIC_REPOSITORY: ${SECRET_VOLSYNC_R2_REPO}/kanidm
RESTIC_PASSWORD: ${SECRET_VOLSYNC_PASSWORD}
AWS_ACCESS_KEY_ID: ${SECRET_VOLSYNC_R2_ID}
AWS_SECRET_ACCESS_KEY: ${SECRET_VOLSYNC_R2_KEY}
---
apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
metadata:
name: kanidm-restic
namespace: kanidm
spec:
sourcePVC: backup-kanidm-0
trigger:
schedule: "0 22 * * *" # 6am GMT+8
restic:
copyMethod: Snapshot
pruneIntervalDays: 14
repository: kanidm-restic
cacheCapacity: 2Gi
volumeSnapshotClassName: block
storageClassName: block
moverSecurityContext:
runAsUser: ${APP_UID_KANIDM}
runAsGroup: ${APP_UID_KANIDM}
fsGroup: ${APP_UID_KANIDM}
retain:
daily: 14
within: 7d

View File

@@ -1,5 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kanidm

View File

@@ -1,20 +0,0 @@
---
# yaml-language-server: $schema=https://crds.jank.ing/cert-manager.io/certificate_v1.json
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kanidm
namespace: kanidm
spec:
secretName: kanidm-tls
issuerRef:
name: letsencrypt-production
kind: ClusterIssuer
privateKey:
algorithm: ECDSA
size: 384
commonName: ${DNS_SHORT}
dnsNames:
- ${DNS_SHORT}
- "*.${DNS_SHORT}"
- "*.damn.${DNS_SHORT}"

View File

@@ -1,28 +1,44 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kanidm-deps
namespace: flux-system
spec:
path: ./kube/deploy/apps/kanidm/deps
dependsOn:
- name: 1-core-tls-cert-manager-config
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kanidm-app
namespace: flux-system
labels: &l
app.kubernetes.io/name: "kanidm"
spec:
commonMetadata:
labels: *l
path: ./kube/deploy/apps/kanidm/app
targetNamespace: "kanidm"
dependsOn:
- name: kanidm-deps
- name: kanidm-pvc
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: kanidm-pvc
namespace: flux-system
labels: &l
app.kubernetes.io/name: "kanidm"
spec:
commonMetadata:
labels: *l
path: ./kube/deploy/core/storage/volsync/template
targetNamespace: "kanidm"
dependsOn:
- name: 1-core-storage-volsync-app
- name: 1-core-storage-rook-ceph-cluster
- name: 1-core-ingress-nginx-app
#- name: 1-core-storage-volsync-app
healthChecks:
- name: kanidm
namespace: kanidm
kind: HelmRelease
apiVersion: helm.toolkit.fluxcd.io/v2beta1
postBuild:
substitute:
PVC: "kanidm-data"
SIZE: "10Gi"
SC: &sc "file"
SNAP: *sc
ACCESSMODE: "ReadWriteMany"
RUID: !!str &uid |
${APP_UID_KANIDM}
RGID: !!str |
${APP_UID_KANIDM}
RFSG: !!str |
${APP_UID_KANIDM}

View File

@@ -2,4 +2,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ns.yaml
- ks.yaml

View File

@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kanidm
labels:
kustomize.toolkit.fluxcd.io/prune: disabled
pod-security.kubernetes.io/enforce: &ps restricted
pod-security.kubernetes.io/audit: *ps
pod-security.kubernetes.io/warn: *ps

View File

@@ -15,6 +15,19 @@ spec:
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: labelled-allow-ingress-world
spec:
endpointSelector:
matchLabels:
ingress.home.arpa/cluster: "allow"
ingress:
- fromEntities:
- "cluster"
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/datreeio/CRDs-catalog/main/cilium.io/ciliumclusterwidenetworkpolicy_v2.json
apiVersion: cilium.io/v2
kind: CiliumClusterwideNetworkPolicy
metadata:
name: labelled-allow-ingress-internet
spec:
@@ -92,4 +105,4 @@ spec:
ingress.home.arpa/wg-guest: "allow"
ingress:
- fromCIDRSet:
- cidr: "${IP_WG_GUEST_V4}"
- cidr: "${IP_WG_GUEST_V4}"