From 586ded1bf0dd7c26c91ab6ae2671b8405263ec56 Mon Sep 17 00:00:00 2001 From: JJGadgets Date: Tue, 11 Nov 2025 00:13:36 +0800 Subject: [PATCH] feat: add strfry --- kube/clusters/biohazard/flux/flux-repo.yaml | 1 + .../biohazard/flux/kustomization.yaml | 1 + kube/deploy/apps/strfry/app/hr.yaml | 175 ++++++++++++++++++ kube/deploy/apps/strfry/ks.yaml | 63 +++++++ kube/deploy/apps/strfry/kustomization.yaml | 8 + kube/deploy/apps/strfry/ns.yaml | 10 + kube/deploy/apps/strfry/vars.yaml | 33 ++++ kube/templates/test/app/hr.yaml | 15 +- kube/templates/test/ks.yaml | 14 +- kube/templates/test/kustomization.yaml | 2 + kube/templates/test/vars.yaml | 41 ++++ 11 files changed, 350 insertions(+), 13 deletions(-) create mode 100644 kube/deploy/apps/strfry/app/hr.yaml create mode 100644 kube/deploy/apps/strfry/ks.yaml create mode 100644 kube/deploy/apps/strfry/kustomization.yaml create mode 100644 kube/deploy/apps/strfry/ns.yaml create mode 100644 kube/deploy/apps/strfry/vars.yaml create mode 100644 kube/templates/test/vars.yaml diff --git a/kube/clusters/biohazard/flux/flux-repo.yaml b/kube/clusters/biohazard/flux/flux-repo.yaml index a1f3909b..d0bfd42e 100644 --- a/kube/clusters/biohazard/flux/flux-repo.yaml +++ b/kube/clusters/biohazard/flux/flux-repo.yaml @@ -82,6 +82,7 @@ spec: sourceRef: kind: GitRepository name: flux-system + namespace: flux-system target: group: kustomize.toolkit.fluxcd.io version: v1 diff --git a/kube/clusters/biohazard/flux/kustomization.yaml b/kube/clusters/biohazard/flux/kustomization.yaml index f0b7bc52..79f37e2b 100644 --- a/kube/clusters/biohazard/flux/kustomization.yaml +++ b/kube/clusters/biohazard/flux/kustomization.yaml @@ -162,6 +162,7 @@ resources: - ../../../deploy/apps/kiwix/ - ../../../deploy/apps/trilium/ - ../../../deploy/apps/etherpad/ + - ../../../deploy/apps/strfry/ - ../../../deploy/vm/_kubevirt/ #- ../../../deploy/vm/_base/ - ../../../deploy/vm/ad/ diff --git a/kube/deploy/apps/strfry/app/hr.yaml b/kube/deploy/apps/strfry/app/hr.yaml new file mode 100644 index 00000000..da0006f3 --- /dev/null +++ b/kube/deploy/apps/strfry/app/hr.yaml @@ -0,0 +1,175 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-4.4.0/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app strfry + namespace: *app +spec: + interval: 5m + chart: + spec: + chart: app-template + version: 4.4.0 + sourceRef: + name: bjw-s + kind: HelmRepository + namespace: flux-system + values: + controllers: + app: + type: deployment + replicas: 1 + strategy: RollingUpdate + pod: + labels: + ingress.home.arpa/nginx-internal: allow + egress.home.arpa/internet: allow + containers: + app: + image: &img + repository: ghcr.io/hoytech/strfry + tag: latest@sha256:03e9572b264bb6c136339c1c169af8c4c6f92febc35506e60a8aa7ff4e6281b5 + env: &env + TZ: "${CONFIG_TZ}" + securityContext: &sc + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + resources: + requests: + cpu: "10m" + limits: + cpu: "1" + memory: "512Mi" + probes: + liveness: + enabled: true + readiness: + enabled: true + service: + app: + controller: app + ports: + http: + port: 80 + targetPort: 8080 + protocol: HTTP + appProtocol: http + ingress: + app: + className: nginx-public + annotations: + external-dns.alpha.kubernetes.io/target: "${DNS_CF:=cf}" + external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" + hosts: + - host: &host "strfry.jjgadgets.tech" + paths: &paths + - path: / + pathType: Prefix + service: + identifier: app + port: http + tls: + - hosts: [*host] + persistence: + config: + type: configMap + identifier: config + advancedMounts: + app: + app: + - subPath: strfry.conf + path: /etc/strfry.conf + data: + existingClaim: strfry-data + advancedMounts: + app: + app: + - subPath: data + path: /data + # tmp: + # type: emptyDir + # medium: Memory + # sizeLimit: 16Mi + # globalMounts: + # - subPath: tmp + # path: /tmp + configMaps: + config: + data: + strfry.conf: | + db = "/data" + + dbParams { + # Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required) + noReadAhead = false + } + + events { + # Maximum size of normalised JSON, in bytes + maxEventSize = 65536 + # Maximum number of tags allowed + maxNumTags = 2000 + # Maximum size for tag values, in bytes + maxTagValSize = 1024 + } + + relay { + bind = "0.0.0.0" + port = 8080 + # Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required) + nofiles = 1000000 + # HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case) + realIpHeader = "x-real-ip" + info { + name = "The JJGadgets Hut" + description = "Home to the Destroyer of Tech, JJGadgets." + pubkey = "" # NIP-11: Administrative nostr pubkey, for contact purposes + contact = "https://jjgadgets.tech" + icon = "https://jjgadgets.tech/images/icon.png" + } + + # Maximum accepted incoming websocket frame size (should be larger than max event) (restart required) + maxWebsocketPayloadSize = 131072 + # Maximum number of filters allowed in a REQ + maxReqFilterSize = 200 + # Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required) + autoPingSeconds = 55 + # If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) + enableTcpKeepalive = true + + writePolicy { + plugin = "" + } + + logging { + invalidEvents = false + } + } + defaultPodOptions: + automountServiceAccountToken: false + enableServiceLinks: false + hostAliases: + - ip: "${APP_IP_AUTHENTIK:=127.0.0.1}" + hostnames: ["${APP_DNS_AUTHENTIK:=authentik}"] + dnsConfig: + options: + - name: ndots + value: "1" + hostUsers: false + securityContext: + runAsNonRoot: true + runAsUser: &uid 65534 + runAsGroup: *uid + fsGroup: *uid + fsGroupChangePolicy: Always + seccompProfile: { type: "RuntimeDefault" } + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "fuckoff.home.arpa/{{ .Release.Name }}" + operator: DoesNotExist diff --git a/kube/deploy/apps/strfry/ks.yaml b/kube/deploy/apps/strfry/ks.yaml new file mode 100644 index 00000000..7e21d1d3 --- /dev/null +++ b/kube/deploy/apps/strfry/ks.yaml @@ -0,0 +1,63 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: strfry-app + namespace: &ns strfry + labels: &l + app.kubernetes.io/name: "strfry" +spec: + targetNamespace: *ns + commonMetadata: + labels: *l + path: ./kube/deploy/apps/strfry/app + components: + - ../../../core/flux-system/alerts/template/ + dependsOn: + - name: crds + namespace: flux-system + - name: strfry-pvc +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: strfry-pvc + namespace: &ns strfry + labels: &l + app.kubernetes.io/name: strfry + pvc.home.arpa/volsync: "true" +spec: + targetNamespace: &ns strfry + commonMetadata: + labels: *l + path: ./kube/deploy/core/storage/volsync/template + dependsOn: + - name: crds + namespace: flux-system + postBuild: + substitute: + PVC: &pvc "strfry-data" + SIZE: "10Gi" + SC: &sc "file" + SNAP: *sc + ACCESSMODE: "ReadWriteMany" + SNAP_ACCESSMODE: "ReadOnlyMany" + VS_APP_CURRENT_VERSION: "ghcr.io/hoytech/strfry:latest@sha256:03e9572b264bb6c136339c1c169af8c4c6f92febc35506e60a8aa7ff4e6281b5" + RUID: &uid "1000" + RGID: *uid + RFSG: *uid + healthChecks: + - apiVersion: v1 + kind: PersistentVolumeClaim + name: *pvc + namespace: *ns + - apiVersion: volsync.backube/v1alpha1 + kind: ReplicationSource + name: "strfry-data-r2-updates-restic" + namespace: *ns + healthCheckExprs: + - apiVersion: volsync.backube/v1alpha1 + kind: ReplicationSource + current: status.conditions.filter(s, s.status == "False").all(s, s.reason == "WaitingForManual" || s.reason == "WaitingForSchedule") && status.latestMoverStatus.result == "Successful" + # inProgress: status.conditions.filter(s, s.status == "True").all(s, s.reason == "SyncInProgress") # TODO: somehow the inProgress healthCheckExprs CEL expression causes Flux to infinitely think it's inProgress + failed: status.latestMoverStatus.result == "Failed" # TODO: add failed conditions diff --git a/kube/deploy/apps/strfry/kustomization.yaml b/kube/deploy/apps/strfry/kustomization.yaml new file mode 100644 index 00000000..9a921460 --- /dev/null +++ b/kube/deploy/apps/strfry/kustomization.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: strfry +resources: + - ns.yaml + - vars.yaml + - ks.yaml diff --git a/kube/deploy/apps/strfry/ns.yaml b/kube/deploy/apps/strfry/ns.yaml new file mode 100644 index 00000000..3d167a6e --- /dev/null +++ b/kube/deploy/apps/strfry/ns.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: strfry + labels: + kustomize.toolkit.fluxcd.io/prune: disabled + pod-security.kubernetes.io/enforce: &ps restricted + pod-security.kubernetes.io/audit: *ps + pod-security.kubernetes.io/warn: *ps diff --git a/kube/deploy/apps/strfry/vars.yaml b/kube/deploy/apps/strfry/vars.yaml new file mode 100644 index 00000000..743afed4 --- /dev/null +++ b/kube/deploy/apps/strfry/vars.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://crds.jank.ing/external-secrets.io/externalsecret_v1.json +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: &name "${CLUSTER_NAME}-vars" + namespace: strfry +spec: + refreshInterval: 1m + secretStoreRef: + kind: ClusterSecretStore + name: 1p + dataFrom: + - extract: + key: ".${CLUSTER_NAME}-vars" + - extract: + key: "authentik vars - ${CLUSTER_NAME}" + - extract: + key: "strfry - ${CLUSTER_NAME}" + target: + creationPolicy: Owner + deletionPolicy: Retain + name: *name + template: + type: Opaque + data: + # Core cluster-wide vars + CLUSTER_NAME: "${CLUSTER_NAME:=biohazard}" + CONFIG_TZ: '{{ .CONFIG_TZ }}' + # App specific + APP_DNS_APPNAME: '{{ .APP_DNS_APPNAME }}' + # Other cluster-wide vars + DNS_CF: '{{ .DNS_CF }}' diff --git a/kube/templates/test/app/hr.yaml b/kube/templates/test/app/hr.yaml index 48c113b1..2d28eaa8 100644 --- a/kube/templates/test/app/hr.yaml +++ b/kube/templates/test/app/hr.yaml @@ -1,5 +1,5 @@ --- -# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-4.2.0/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-4.4.0/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json apiVersion: helm.toolkit.fluxcd.io/v2 kind: HelmRelease metadata: @@ -10,7 +10,7 @@ spec: chart: spec: chart: app-template - version: 4.1.2 + version: 4.4.0 sourceRef: name: bjw-s kind: HelmRepository @@ -168,15 +168,18 @@ spec: type: configMap identifier: config advancedMounts: - ${APPNAME}: + app: app: - subPath: server.toml path: /data/server.toml data: existingClaim: ${APPNAME}-data globalMounts: - - subPath: data - path: /data + advancedMounts: + app: + app: + - subPath: data + path: /data nfs: type: nfs server: "${IP_TRUENAS:=127.0.0.1}" @@ -196,7 +199,7 @@ spec: name: ${APPNAME}-tls defaultMode: 0400 advancedMounts: - ${APPNAME}: + app: app: - subPath: tls.crt path: /tls/fullchain.pem diff --git a/kube/templates/test/ks.yaml b/kube/templates/test/ks.yaml index 62ff4b71..5d7b68bb 100644 --- a/kube/templates/test/ks.yaml +++ b/kube/templates/test/ks.yaml @@ -23,12 +23,12 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: name: ${APPNAME}-pvc - namespace: flux-system + namespace: &ns ${APPNAME} labels: &l app.kubernetes.io/name: ${APPNAME} pvc.home.arpa/volsync: "true" spec: - targetNamespace: &ns ${APPNAME} + targetNamespace: *ns commonMetadata: labels: *l path: ./kube/deploy/core/storage/volsync/template @@ -59,15 +59,15 @@ spec: healthCheckExprs: - apiVersion: volsync.backube/v1alpha1 kind: ReplicationSource - current: status.conditions.all(s, s.reason == "WaitingForManual") && status.latestMoverStatus.result == "Successful" - inProgress: status.conditions.all(s, s.reason == "SyncInProgress") + current: status.conditions.filter(s, s.status == "False").all(s, s.reason == "WaitingForManual" || s.reason == "WaitingForSchedule") && status.latestMoverStatus.result == "Successful" + # inProgress: status.conditions.filter(s, s.status == "True").all(s, s.reason == "SyncInProgress") # TODO: somehow the inProgress healthCheckExprs CEL expression causes Flux to infinitely think it's inProgress failed: status.latestMoverStatus.result == "Failed" # TODO: add failed conditions --- apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization metadata: name: ${APPNAME}-db - namespace: flux-system + namespace: &ns pg labels: &l prune.flux.home.arpa/enabled: "true" db.home.arpa/pg: "pg-default" @@ -76,7 +76,7 @@ spec: commonMetadata: labels: *l path: ./kube/deploy/core/db/pg/clusters/template/pguser - targetNamespace: "pg" + targetNamespace: *ns dependsOn: - name: 1-core-db-pg-clusters-default - name: 1-core-secrets-es-k8s @@ -85,4 +85,4 @@ spec: PG_NAME: "default" PG_DB_USER: &app "${APPNAME}" PG_APP_NS: *app - PG_APP_CURRENT_VERSION: "{{.IMAGENAME}}:{{.IMAGETAG}}" + PG_APP_CURRENT_VERSION: "${IMAGENAME}:${IMAGETAG}" diff --git a/kube/templates/test/kustomization.yaml b/kube/templates/test/kustomization.yaml index 5eeb2657..364454f9 100644 --- a/kube/templates/test/kustomization.yaml +++ b/kube/templates/test/kustomization.yaml @@ -1,6 +1,8 @@ --- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization +namespace: ${APPNAME} resources: - ns.yaml + - vars.yaml - ks.yaml diff --git a/kube/templates/test/vars.yaml b/kube/templates/test/vars.yaml new file mode 100644 index 00000000..18db2cec --- /dev/null +++ b/kube/templates/test/vars.yaml @@ -0,0 +1,41 @@ +--- +# yaml-language-server: $schema=https://crds.jank.ing/external-secrets.io/externalsecret_v1.json +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: &name "${CLUSTER_NAME:=biohazard}-vars" + namespace: ${APPNAME} +spec: + refreshInterval: 1m + secretStoreRef: + kind: ClusterSecretStore + name: 1p + dataFrom: + - extract: + key: ".${CLUSTER_NAME}-vars" + - extract: + key: "authentik vars - ${CLUSTER_NAME}" + - extract: + key: "${APPNAME} - ${CLUSTER_NAME}" + target: + creationPolicy: Owner + deletionPolicy: Retain + name: *name + template: + type: Opaque + data: + # Core cluster-wide vars + CLUSTER_NAME: "${CLUSTER_NAME:=biohazard}" + CONFIG_TZ: '{{ .CONFIG_TZ }}' + # App specific + APP_DNS_APPNAME: '{{ .APP_DNS_APPNAME }}' + APP_UID_APPNAME: '{{ .APP_UID_APPNAME }}' + APP_IP_APPNAME: '{{ .APP_IP_APPNAME }}' + # authentik hostAliases + APP_DNS_AUTHENTIK: '{{ .APP_DNS_AUTHENTIK }}' + APP_IP_AUTHENTIK: '{{ .APP_IP_AUTHENTIK }}' + # Other cluster-wide vars + IP_JJ_V4: '{{ .IP_JJ_V4 }}' + DNS_CF: '{{ .DNS_CF }}' + IP_TRUENAS: '{{ .IP_TRUENAS }}' + PATH_NAS_PERSIST_K8S: '{{ .PATH_NAS_PERSIST_K8S }}'