diff --git a/kube/deploy/apps/audiobookshelf/app/volsync.yaml b/kube/deploy/apps/audiobookshelf/app/volsync.yaml index 8e793e47..5cf38a41 100644 --- a/kube/deploy/apps/audiobookshelf/app/volsync.yaml +++ b/kube/deploy/apps/audiobookshelf/app/volsync.yaml @@ -44,7 +44,7 @@ spec: trigger: manual: "restore-once-bootstrap" restic: - repository: "audiobookshelf-restic" + repository: "audiobookshelf-config-restic" copyMethod: "Snapshot" volumeSnapshotClassName: "file" storageClassName: "file" diff --git a/kube/deploy/apps/jellyfin/app/_nfs.yaml b/kube/deploy/apps/jellyfin/app/_nfs.yaml deleted file mode 100644 index bda26149..00000000 --- a/kube/deploy/apps/jellyfin/app/_nfs.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: jellyfin-nas-media -spec: - storageClassName: jellyfin-nas-media - capacity: - storage: 1Mi - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - nfs: - server: "${IP_TRUENAS}" - path: "${PATH_NAS_MEDIA}" - mountOptions: - - nfsvers=4.2 - - tcp - - intr - - soft - - noatime - - nodiratime - - nocto - - nconnect=8 - - rsize=131072 - - wsize=131072 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: jellyfin-nas-media - namespace: jellyfin -spec: - accessModes: - - ReadWriteMany - storageClassName: jellyfin-nas-media - resources: - requests: - storage: 1Mi diff --git a/kube/deploy/apps/jellyfin/app/hr.yaml b/kube/deploy/apps/jellyfin/app/hr.yaml index d5a9db52..96e56b20 100644 --- a/kube/deploy/apps/jellyfin/app/hr.yaml +++ b/kube/deploy/apps/jellyfin/app/hr.yaml @@ -20,8 +20,10 @@ spec: egress.home.arpa/world-https: "allow" egress.home.arpa/dlna: "allow" authentik.home.arpa/ldap: "allow" + egress.home.arpa/r2: "allow" controller: - type: statefulset + type: "deployment" + replicas: 1 image: repository: ghcr.io/onedr0p/jellyfin tag: 10.8.9@sha256:01a6497a1290b73f63251aebe6d01327c15b3413c3e1013c2a396ffe116ced4d @@ -55,20 +57,17 @@ spec: fsGroup: 568 fsGroupChangePolicy: OnRootMismatch supplementalGroups: [44, 104, 109, 6969] - volumeClaimTemplates: - - name: config - mountPath: /config - accessMode: ReadWriteOnce - size: 50Gi - storageClass: block persistence: + data: + enabled: true + existingClaim: "jellyfin-data" + mountPath: "/config" media: enabled: true - type: nfs + type: "nfs" server: "${IP_TRUENAS}" path: "${PATH_NAS_MEDIA}" - # existingClaim: jellyfin-nas-media - mountPath: /media + mountPath: "/media" readOnly: true transcode: enabled: true @@ -80,4 +79,5 @@ spec: cpu: 100m memory: 1000Mi limits: + cpu: 3000m memory: 6000Mi diff --git a/kube/deploy/apps/jellyfin/app/kustomization.yaml b/kube/deploy/apps/jellyfin/app/kustomization.yaml index e2baa693..8400f5c6 100644 --- a/kube/deploy/apps/jellyfin/app/kustomization.yaml +++ b/kube/deploy/apps/jellyfin/app/kustomization.yaml @@ -2,9 +2,17 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - _nfs.yaml + # - _nfs.yaml - pvc.yaml - hr.yaml - gpu-patch.yaml # - netpol.yaml - - volsync.yaml \ No newline at end of file + - volsync.yaml +# secretGenerator: +# - name: jellyfin-litestream-config +# namespace: jellyfin +# type: Opaque +# files: +# - "./litestream.yml" +# generatorOptions: +# disableNameSuffixHash: true diff --git a/kube/deploy/apps/jellyfin/app/pvc.yaml b/kube/deploy/apps/jellyfin/app/pvc.yaml index f0138c31..9a44fda1 100644 --- a/kube/deploy/apps/jellyfin/app/pvc.yaml +++ b/kube/deploy/apps/jellyfin/app/pvc.yaml @@ -2,16 +2,19 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: jellyfin-data - namespace: &app jellyfin + name: "jellyfin-data" + namespace: &app "jellyfin" labels: app.kubernetes.io/name: *app app.kubernetes.io/instance: *app snapshot.home.arpa/enabled: "true" spec: - storageClassName: file - accessModes: - - ReadWriteMany + storageClassName: "file" + accessModes: ["ReadWriteMany"] resources: requests: - storage: 50Gi \ No newline at end of file + storage: "50Gi" + dataSourceRef: + apiGroup: "volsync.backube" + kind: "ReplicationDestination" + name: "jellyfin-data-bootstrap" diff --git a/kube/deploy/apps/jellyfin/app/volsync.yaml b/kube/deploy/apps/jellyfin/app/volsync.yaml index 06b7964f..37a5042b 100644 --- a/kube/deploy/apps/jellyfin/app/volsync.yaml +++ b/kube/deploy/apps/jellyfin/app/volsync.yaml @@ -2,31 +2,31 @@ apiVersion: v1 kind: Secret metadata: - name: jellyfin-restic - namespace: jellyfin + name: "jellyfin-data-restic" + namespace: "jellyfin" type: Opaque stringData: - RESTIC_REPOSITORY: ${SECRET_VOLSYNC_R2_REPO}/jellyfin - RESTIC_PASSWORD: ${SECRET_VOLSYNC_PASSWORD} - AWS_ACCESS_KEY_ID: ${SECRET_VOLSYNC_R2_ID} - AWS_SECRET_ACCESS_KEY: ${SECRET_VOLSYNC_R2_KEY} + RESTIC_REPOSITORY: "${SECRET_VOLSYNC_R2_REPO}/jellyfin" + RESTIC_PASSWORD: "${SECRET_VOLSYNC_PASSWORD}" + AWS_ACCESS_KEY_ID: "${SECRET_VOLSYNC_R2_ID}" + AWS_SECRET_ACCESS_KEY: "${SECRET_VOLSYNC_R2_KEY}" --- apiVersion: volsync.backube/v1alpha1 kind: ReplicationSource metadata: - name: jellyfin-restic - namespace: jellyfin + name: "jellyfin-data-restic" + namespace: "jellyfin" spec: - sourcePVC: config-jellyfin-0 + sourcePVC: "jellyfin-data" trigger: schedule: "0 6 * * *" restic: - copyMethod: Snapshot + copyMethod: "Snapshot" pruneIntervalDays: 14 - repository: jellyfin-restic - cacheCapacity: 2Gi - volumeSnapshotClassName: block - storageClassName: block + repository: "jellyfin-data-restic" + cacheCapacity: "2Gi" + volumeSnapshotClassName: "file" + storageClassName: "file" moverSecurityContext: runAsUser: 568 runAsGroup: 568 @@ -34,3 +34,23 @@ spec: retain: daily: 14 within: 7d +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: "jellyfin-bootstrap" + namespace: "jellyfin" +spec: + trigger: + manual: "restore-once-bootstrap" + restic: + repository: "jellyfin-data-restic" + copyMethod: "Snapshot" + volumeSnapshotClassName: "file" + storageClassName: "file" + capacity: "50Gi" + accessModes: ["ReadWriteMany"] + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 diff --git a/kube/deploy/apps/kavita/app/_nfs.yaml b/kube/deploy/apps/kavita/app/_nfs.yaml deleted file mode 100644 index e0c754d7..00000000 --- a/kube/deploy/apps/kavita/app/_nfs.yaml +++ /dev/null @@ -1,78 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: kavita-nas-media -spec: - storageClassName: kavita-nas-media - capacity: - storage: 1Mi - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - nfs: - server: "${IP_TRUENAS}" - path: "${PATH_NAS_MEDIA}" - mountOptions: - - nfsvers=4.2 - - tcp - - intr - - soft - - noatime - - nodiratime - - nocto - - nconnect=8 - - rsize=131072 - - wsize=131072 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: kavita-nas-media - namespace: kavita -spec: - accessModes: - - ReadWriteMany - storageClassName: kavita-nas-media - resources: - requests: - storage: 1Mi ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: kavita-nas-backups -spec: - storageClassName: kavita-nas-backups - capacity: - storage: 1Mi - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - nfs: - server: "${IP_TRUENAS}" - path: "${PATH_NAS_BACKUPS_K8S}/kavita" - mountOptions: - - nfsvers=4.2 - - tcp - - intr - - hard - - noatime - - nodiratime - - nocto - - nconnect=8 - - rsize=131072 - - wsize=131072 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: kavita-nas-backups - namespace: kavita -spec: - accessModes: - - ReadWriteMany - storageClassName: kavita-nas-backups - resources: - requests: - storage: 1Mi diff --git a/kube/deploy/apps/kavita/app/hr.yaml b/kube/deploy/apps/kavita/app/hr.yaml index 1a82516f..f8e0c74d 100644 --- a/kube/deploy/apps/kavita/app/hr.yaml +++ b/kube/deploy/apps/kavita/app/hr.yaml @@ -16,7 +16,8 @@ spec: values: automountServiceAccountToken: false controller: - type: statefulset + type: "deployment" + replicas: 1 image: repository: kizaing/kavita tag: 0.7.1 @@ -40,23 +41,28 @@ spec: tls: - hosts: - *host - volumeClaimTemplates: - - name: config - mountPath: /kavita/config - accessMode: ReadWriteOnce - size: 50Gi - storageClass: block persistence: + config: + enabled: true + existingClaim: "kavita-config" + mountPath: "/kavita/config" media: enabled: true - existingClaim: kavita-nas-media - mountPath: /media + type: "nfs" + server: "${IP_TRUENAS}" + path: "${PATH_NAS_MEDIA}" + mountPath: "/media" readOnly: true backups: enabled: true - existingClaim: kavita-nas-backups - mountPath: /kavita/config/backups + type: "nfs" + server: "${IP_TRUENAS}" + path: "${PATH_NAS_BACKUPS_K8S}/kavita" + mountPath: "/kavita/config/backups" resources: requests: - cpu: 20m - memory: 100Mi + cpu: "20m" + memory: "250Mi" + limits: + cpu: "3000m" + memory: "1Gi" diff --git a/kube/deploy/apps/kavita/app/kustomization.yaml b/kube/deploy/apps/kavita/app/kustomization.yaml deleted file mode 100644 index 5f95cd42..00000000 --- a/kube/deploy/apps/kavita/app/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - _nfs.yaml - - hr.yaml - # - netpol.yaml - - volsync.yaml diff --git a/kube/deploy/apps/kavita/app/pvc.yaml b/kube/deploy/apps/kavita/app/pvc.yaml new file mode 100644 index 00000000..37730111 --- /dev/null +++ b/kube/deploy/apps/kavita/app/pvc.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "kavita-config" + namespace: &app "kavita" + labels: + app.kubernetes.io/name: *app + app.kubernetes.io/instance: *app + snapshot.home.arpa/enabled: "true" +spec: + storageClassName: "file" + accessModes: ["ReadWriteMany"] + resources: + requests: + storage: "10Gi" + dataSourceRef: + apiGroup: "volsync.backube" + kind: "ReplicationDestination" + name: "kavita-config-bootstrap" diff --git a/kube/deploy/apps/kavita/app/volsync.yaml b/kube/deploy/apps/kavita/app/volsync.yaml index 968a6985..e528440d 100644 --- a/kube/deploy/apps/kavita/app/volsync.yaml +++ b/kube/deploy/apps/kavita/app/volsync.yaml @@ -2,31 +2,31 @@ apiVersion: v1 kind: Secret metadata: - name: kavita-restic - namespace: kavita + name: "kavita-config-restic" + namespace: "kavita" type: Opaque stringData: - RESTIC_REPOSITORY: ${SECRET_VOLSYNC_R2_REPO}/kavita - RESTIC_PASSWORD: ${SECRET_VOLSYNC_PASSWORD} - AWS_ACCESS_KEY_ID: ${SECRET_VOLSYNC_R2_ID} - AWS_SECRET_ACCESS_KEY: ${SECRET_VOLSYNC_R2_KEY} + RESTIC_REPOSITORY: "${SECRET_VOLSYNC_R2_REPO}/kavita" + RESTIC_PASSWORD: "${SECRET_VOLSYNC_PASSWORD}" + AWS_ACCESS_KEY_ID: "${SECRET_VOLSYNC_R2_ID}" + AWS_SECRET_ACCESS_KEY: "${SECRET_VOLSYNC_R2_KEY}" --- apiVersion: volsync.backube/v1alpha1 kind: ReplicationSource metadata: - name: kavita-restic - namespace: kavita + name: "kavita-config-restic" + namespace: "kavita" spec: - sourcePVC: config-kavita-0 + sourcePVC: "kavita-config" trigger: schedule: "0 6 * * *" restic: - copyMethod: Snapshot + copyMethod: "Snapshot" pruneIntervalDays: 14 - repository: kavita-restic - cacheCapacity: 2Gi - volumeSnapshotClassName: block - storageClassName: block + repository: "kavita-config-restic" + cacheCapacity: "2Gi" + volumeSnapshotClassName: "file" + storageClassName: "file" moverSecurityContext: runAsUser: 0 runAsGroup: 0 @@ -34,3 +34,23 @@ spec: retain: daily: 14 within: 7d +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: "kavita-config-bootstrap" + namespace: "kavita" +spec: + trigger: + manual: "restore-once-bootstrap" + restic: + repository: "kavita-config-restic" + copyMethod: "Snapshot" + volumeSnapshotClassName: "file" + storageClassName: "file" + capacity: "10Gi" + accessModes: ["ReadWriteMany"] + moverSecurityContext: + runAsUser: &uid 0 + runAsGroup: *uid + fsGroup: *uid diff --git a/kube/deploy/apps/minecraft/app/hr.yaml b/kube/deploy/apps/minecraft/app/hr.yaml index 25463903..4c3336ea 100644 --- a/kube/deploy/apps/minecraft/app/hr.yaml +++ b/kube/deploy/apps/minecraft/app/hr.yaml @@ -16,7 +16,7 @@ spec: values: controllers: main: - type: statefulset + type: "deployment" replicas: 1 pod: labels: @@ -58,7 +58,7 @@ spec: better-combat:Z6wHaEla dual-swords:7KDebPb5 DISABLED_MODS: |- # this is for my reference - servercore:exA4UxFq + servercore:exA4UxFq # disabled due to the 1.20.1 Fabric version causing mixin launch errors tick-stasis # disabled due to kube-probe never succeeding from paused server, have to check if timeout config exists SERVER_NAME: "${CONFIG_MINECRAFT_NAME}" OPS: "${CONFIG_MINECRAFT_OPS}" @@ -121,15 +121,6 @@ spec: capabilities: drop: ["ALL"] add: ["NET_RAW"] - statefulset: - volumeClaimTemplates: - - name: data - accessMode: ReadWriteOnce - size: 20Gi - storageClass: block - globalMounts: - - path: "/data" - readOnly: false service: main: enabled: true @@ -159,6 +150,12 @@ spec: seccompProfile: {type: "RuntimeDefault"} fsGroupChangePolicy: "Always" persistence: + data: + enabled: true + existingClaim: "minecraft-data" + globalMounts: + - path: "/data" + readOnly: false tmp: enabled: true type: emptyDir diff --git a/kube/deploy/apps/minecraft/app/pvc.yaml b/kube/deploy/apps/minecraft/app/pvc.yaml new file mode 100644 index 00000000..0b3c0aea --- /dev/null +++ b/kube/deploy/apps/minecraft/app/pvc.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "minecraft-data" + namespace: &app "minecraft" + labels: + app.kubernetes.io/name: *app + app.kubernetes.io/instance: *app + snapshot.home.arpa/enabled: "true" +spec: + storageClassName: "block" + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: "20Gi" + dataSourceRef: + apiGroup: "volsync.backube" + kind: "ReplicationDestination" + name: "minecraft-data-bootstrap" diff --git a/kube/deploy/apps/minecraft/app/volsync.yaml b/kube/deploy/apps/minecraft/app/volsync.yaml index a5fc8571..58adc95d 100644 --- a/kube/deploy/apps/minecraft/app/volsync.yaml +++ b/kube/deploy/apps/minecraft/app/volsync.yaml @@ -2,35 +2,55 @@ apiVersion: v1 kind: Secret metadata: - name: minecraft-restic - namespace: minecraft + name: "minecraft-data-restic" + namespace: "minecraft" type: Opaque stringData: - RESTIC_REPOSITORY: ${SECRET_VOLSYNC_R2_REPO}/minecraft - RESTIC_PASSWORD: ${SECRET_VOLSYNC_PASSWORD} - AWS_ACCESS_KEY_ID: ${SECRET_VOLSYNC_R2_ID} - AWS_SECRET_ACCESS_KEY: ${SECRET_VOLSYNC_R2_KEY} + RESTIC_REPOSITORY: "${SECRET_VOLSYNC_R2_REPO}/minecraft" + RESTIC_PASSWORD: "${SECRET_VOLSYNC_PASSWORD}" + AWS_ACCESS_KEY_ID: "${SECRET_VOLSYNC_R2_ID}" + AWS_SECRET_ACCESS_KEY: "${SECRET_VOLSYNC_R2_KEY}" --- apiVersion: volsync.backube/v1alpha1 kind: ReplicationSource metadata: - name: minecraft-restic - namespace: minecraft + name: "minecraft-data-restic" + namespace: "minecraft" spec: - sourcePVC: data-minecraft-0 + sourcePVC: "minecraft-data" trigger: schedule: "0 6 * * *" restic: - copyMethod: Snapshot + copyMethod: "Snapshot" pruneIntervalDays: 14 - repository: minecraft-restic - cacheCapacity: 2Gi - volumeSnapshotClassName: block - storageClassName: block + repository: "minecraft-data-restic" + cacheCapacity: "2Gi" + volumeSnapshotClassName: "block" + storageClassName: "block" moverSecurityContext: - runAsUser: ${APP_UID_MINECRAFT} - runAsGroup: ${APP_UID_MINECRAFT} - fsGroup: ${APP_UID_MINECRAFT} + runAsUser: &uid ${APP_UID_MINECRAFT} + runAsGroup: *uid + fsGroup: *uid retain: daily: 14 within: 7d +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: "minecraft-data-bootstrap" + namespace: "minecraft" +spec: + trigger: + manual: "restore-once-bootstrap" + restic: + repository: "minecraft-data-restic" + copyMethod: "Snapshot" + volumeSnapshotClassName: "block" + storageClassName: "block" + capacity: "20Gi" + accessModes: ["ReadWriteMany"] + moverSecurityContext: + runAsUser: &uid ${APP_UID_MINECRAFT} + runAsGroup: *uid + fsGroup: *uid