feat: add strfry

This commit is contained in:
JJGadgets
2025-11-11 00:13:36 +08:00
parent bbde9eea0d
commit 586ded1bf0
11 changed files with 350 additions and 13 deletions

View File

@@ -82,6 +82,7 @@ spec:
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
target:
group: kustomize.toolkit.fluxcd.io
version: v1

View File

@@ -162,6 +162,7 @@ resources:
- ../../../deploy/apps/kiwix/
- ../../../deploy/apps/trilium/
- ../../../deploy/apps/etherpad/
- ../../../deploy/apps/strfry/
- ../../../deploy/vm/_kubevirt/
#- ../../../deploy/vm/_base/
- ../../../deploy/vm/ad/

View File

@@ -0,0 +1,175 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-4.4.0/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: &app strfry
namespace: *app
spec:
interval: 5m
chart:
spec:
chart: app-template
version: 4.4.0
sourceRef:
name: bjw-s
kind: HelmRepository
namespace: flux-system
values:
controllers:
app:
type: deployment
replicas: 1
strategy: RollingUpdate
pod:
labels:
ingress.home.arpa/nginx-internal: allow
egress.home.arpa/internet: allow
containers:
app:
image: &img
repository: ghcr.io/hoytech/strfry
tag: latest@sha256:03e9572b264bb6c136339c1c169af8c4c6f92febc35506e60a8aa7ff4e6281b5
env: &env
TZ: "${CONFIG_TZ}"
securityContext: &sc
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
resources:
requests:
cpu: "10m"
limits:
cpu: "1"
memory: "512Mi"
probes:
liveness:
enabled: true
readiness:
enabled: true
service:
app:
controller: app
ports:
http:
port: 80
targetPort: 8080
protocol: HTTP
appProtocol: http
ingress:
app:
className: nginx-public
annotations:
external-dns.alpha.kubernetes.io/target: "${DNS_CF:=cf}"
external-dns.alpha.kubernetes.io/cloudflare-proxied: "true"
hosts:
- host: &host "strfry.jjgadgets.tech"
paths: &paths
- path: /
pathType: Prefix
service:
identifier: app
port: http
tls:
- hosts: [*host]
persistence:
config:
type: configMap
identifier: config
advancedMounts:
app:
app:
- subPath: strfry.conf
path: /etc/strfry.conf
data:
existingClaim: strfry-data
advancedMounts:
app:
app:
- subPath: data
path: /data
# tmp:
# type: emptyDir
# medium: Memory
# sizeLimit: 16Mi
# globalMounts:
# - subPath: tmp
# path: /tmp
configMaps:
config:
data:
strfry.conf: |
db = "/data"
dbParams {
# Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
noReadAhead = false
}
events {
# Maximum size of normalised JSON, in bytes
maxEventSize = 65536
# Maximum number of tags allowed
maxNumTags = 2000
# Maximum size for tag values, in bytes
maxTagValSize = 1024
}
relay {
bind = "0.0.0.0"
port = 8080
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
nofiles = 1000000
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
realIpHeader = "x-real-ip"
info {
name = "The JJGadgets Hut"
description = "Home to the Destroyer of Tech, JJGadgets."
pubkey = "" # NIP-11: Administrative nostr pubkey, for contact purposes
contact = "https://jjgadgets.tech"
icon = "https://jjgadgets.tech/images/icon.png"
}
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
maxWebsocketPayloadSize = 131072
# Maximum number of filters allowed in a REQ
maxReqFilterSize = 200
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
autoPingSeconds = 55
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
enableTcpKeepalive = true
writePolicy {
plugin = ""
}
logging {
invalidEvents = false
}
}
defaultPodOptions:
automountServiceAccountToken: false
enableServiceLinks: false
hostAliases:
- ip: "${APP_IP_AUTHENTIK:=127.0.0.1}"
hostnames: ["${APP_DNS_AUTHENTIK:=authentik}"]
dnsConfig:
options:
- name: ndots
value: "1"
hostUsers: false
securityContext:
runAsNonRoot: true
runAsUser: &uid 65534
runAsGroup: *uid
fsGroup: *uid
fsGroupChangePolicy: Always
seccompProfile: { type: "RuntimeDefault" }
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "fuckoff.home.arpa/{{ .Release.Name }}"
operator: DoesNotExist

View File

@@ -0,0 +1,63 @@
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: strfry-app
namespace: &ns strfry
labels: &l
app.kubernetes.io/name: "strfry"
spec:
targetNamespace: *ns
commonMetadata:
labels: *l
path: ./kube/deploy/apps/strfry/app
components:
- ../../../core/flux-system/alerts/template/
dependsOn:
- name: crds
namespace: flux-system
- name: strfry-pvc
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: strfry-pvc
namespace: &ns strfry
labels: &l
app.kubernetes.io/name: strfry
pvc.home.arpa/volsync: "true"
spec:
targetNamespace: &ns strfry
commonMetadata:
labels: *l
path: ./kube/deploy/core/storage/volsync/template
dependsOn:
- name: crds
namespace: flux-system
postBuild:
substitute:
PVC: &pvc "strfry-data"
SIZE: "10Gi"
SC: &sc "file"
SNAP: *sc
ACCESSMODE: "ReadWriteMany"
SNAP_ACCESSMODE: "ReadOnlyMany"
VS_APP_CURRENT_VERSION: "ghcr.io/hoytech/strfry:latest@sha256:03e9572b264bb6c136339c1c169af8c4c6f92febc35506e60a8aa7ff4e6281b5"
RUID: &uid "1000"
RGID: *uid
RFSG: *uid
healthChecks:
- apiVersion: v1
kind: PersistentVolumeClaim
name: *pvc
namespace: *ns
- apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
name: "strfry-data-r2-updates-restic"
namespace: *ns
healthCheckExprs:
- apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
current: status.conditions.filter(s, s.status == "False").all(s, s.reason == "WaitingForManual" || s.reason == "WaitingForSchedule") && status.latestMoverStatus.result == "Successful"
# inProgress: status.conditions.filter(s, s.status == "True").all(s, s.reason == "SyncInProgress") # TODO: somehow the inProgress healthCheckExprs CEL expression causes Flux to infinitely think it's inProgress
failed: status.latestMoverStatus.result == "Failed" # TODO: add failed conditions

View File

@@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: strfry
resources:
- ns.yaml
- vars.yaml
- ks.yaml

View File

@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: strfry
labels:
kustomize.toolkit.fluxcd.io/prune: disabled
pod-security.kubernetes.io/enforce: &ps restricted
pod-security.kubernetes.io/audit: *ps
pod-security.kubernetes.io/warn: *ps

View File

@@ -0,0 +1,33 @@
---
# yaml-language-server: $schema=https://crds.jank.ing/external-secrets.io/externalsecret_v1.json
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: &name "${CLUSTER_NAME}-vars"
namespace: strfry
spec:
refreshInterval: 1m
secretStoreRef:
kind: ClusterSecretStore
name: 1p
dataFrom:
- extract:
key: ".${CLUSTER_NAME}-vars"
- extract:
key: "authentik vars - ${CLUSTER_NAME}"
- extract:
key: "strfry - ${CLUSTER_NAME}"
target:
creationPolicy: Owner
deletionPolicy: Retain
name: *name
template:
type: Opaque
data:
# Core cluster-wide vars
CLUSTER_NAME: "${CLUSTER_NAME:=biohazard}"
CONFIG_TZ: '{{ .CONFIG_TZ }}'
# App specific
APP_DNS_APPNAME: '{{ .APP_DNS_APPNAME }}'
# Other cluster-wide vars
DNS_CF: '{{ .DNS_CF }}'

View File

@@ -1,5 +1,5 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-4.2.0/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-4.4.0/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
@@ -10,7 +10,7 @@ spec:
chart:
spec:
chart: app-template
version: 4.1.2
version: 4.4.0
sourceRef:
name: bjw-s
kind: HelmRepository
@@ -168,15 +168,18 @@ spec:
type: configMap
identifier: config
advancedMounts:
${APPNAME}:
app:
app:
- subPath: server.toml
path: /data/server.toml
data:
existingClaim: ${APPNAME}-data
globalMounts:
- subPath: data
path: /data
advancedMounts:
app:
app:
- subPath: data
path: /data
nfs:
type: nfs
server: "${IP_TRUENAS:=127.0.0.1}"
@@ -196,7 +199,7 @@ spec:
name: ${APPNAME}-tls
defaultMode: 0400
advancedMounts:
${APPNAME}:
app:
app:
- subPath: tls.crt
path: /tls/fullchain.pem

View File

@@ -23,12 +23,12 @@ apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: ${APPNAME}-pvc
namespace: flux-system
namespace: &ns ${APPNAME}
labels: &l
app.kubernetes.io/name: ${APPNAME}
pvc.home.arpa/volsync: "true"
spec:
targetNamespace: &ns ${APPNAME}
targetNamespace: *ns
commonMetadata:
labels: *l
path: ./kube/deploy/core/storage/volsync/template
@@ -59,15 +59,15 @@ spec:
healthCheckExprs:
- apiVersion: volsync.backube/v1alpha1
kind: ReplicationSource
current: status.conditions.all(s, s.reason == "WaitingForManual") && status.latestMoverStatus.result == "Successful"
inProgress: status.conditions.all(s, s.reason == "SyncInProgress")
current: status.conditions.filter(s, s.status == "False").all(s, s.reason == "WaitingForManual" || s.reason == "WaitingForSchedule") && status.latestMoverStatus.result == "Successful"
# inProgress: status.conditions.filter(s, s.status == "True").all(s, s.reason == "SyncInProgress") # TODO: somehow the inProgress healthCheckExprs CEL expression causes Flux to infinitely think it's inProgress
failed: status.latestMoverStatus.result == "Failed" # TODO: add failed conditions
---
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: ${APPNAME}-db
namespace: flux-system
namespace: &ns pg
labels: &l
prune.flux.home.arpa/enabled: "true"
db.home.arpa/pg: "pg-default"
@@ -76,7 +76,7 @@ spec:
commonMetadata:
labels: *l
path: ./kube/deploy/core/db/pg/clusters/template/pguser
targetNamespace: "pg"
targetNamespace: *ns
dependsOn:
- name: 1-core-db-pg-clusters-default
- name: 1-core-secrets-es-k8s
@@ -85,4 +85,4 @@ spec:
PG_NAME: "default"
PG_DB_USER: &app "${APPNAME}"
PG_APP_NS: *app
PG_APP_CURRENT_VERSION: "{{.IMAGENAME}}:{{.IMAGETAG}}"
PG_APP_CURRENT_VERSION: "${IMAGENAME}:${IMAGETAG}"

View File

@@ -1,6 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ${APPNAME}
resources:
- ns.yaml
- vars.yaml
- ks.yaml

View File

@@ -0,0 +1,41 @@
---
# yaml-language-server: $schema=https://crds.jank.ing/external-secrets.io/externalsecret_v1.json
apiVersion: external-secrets.io/v1
kind: ExternalSecret
metadata:
name: &name "${CLUSTER_NAME:=biohazard}-vars"
namespace: ${APPNAME}
spec:
refreshInterval: 1m
secretStoreRef:
kind: ClusterSecretStore
name: 1p
dataFrom:
- extract:
key: ".${CLUSTER_NAME}-vars"
- extract:
key: "authentik vars - ${CLUSTER_NAME}"
- extract:
key: "${APPNAME} - ${CLUSTER_NAME}"
target:
creationPolicy: Owner
deletionPolicy: Retain
name: *name
template:
type: Opaque
data:
# Core cluster-wide vars
CLUSTER_NAME: "${CLUSTER_NAME:=biohazard}"
CONFIG_TZ: '{{ .CONFIG_TZ }}'
# App specific
APP_DNS_APPNAME: '{{ .APP_DNS_APPNAME }}'
APP_UID_APPNAME: '{{ .APP_UID_APPNAME }}'
APP_IP_APPNAME: '{{ .APP_IP_APPNAME }}'
# authentik hostAliases
APP_DNS_AUTHENTIK: '{{ .APP_DNS_AUTHENTIK }}'
APP_IP_AUTHENTIK: '{{ .APP_IP_AUTHENTIK }}'
# Other cluster-wide vars
IP_JJ_V4: '{{ .IP_JJ_V4 }}'
DNS_CF: '{{ .DNS_CF }}'
IP_TRUENAS: '{{ .IP_TRUENAS }}'
PATH_NAS_PERSIST_K8S: '{{ .PATH_NAS_PERSIST_K8S }}'