diff --git a/kube/deploy/core/db/pg/clusters/template/crunchy.yaml b/kube/deploy/core/db/pg/clusters/template/crunchy.yaml index 52774904..9177da1f 100644 --- a/kube/deploy/core/db/pg/clusters/template/crunchy.yaml +++ b/kube/deploy/core/db/pg/clusters/template/crunchy.yaml @@ -73,6 +73,16 @@ spec: repo3-retention-diff: "30" repo3-cipher-type: "aes-256-cbc" repos: + # the array with index 0 on the `repos` list is used for replica recovery/replay from WALs, regardless of repo name (repo1, repo2, etc) + - name: "repo3" # Ceph RGW in-cluster + s3: &rgw + endpoint: "rgw-biohazard.${DNS_TS}" + bucket: "pg-${PG_APP_NAME}" + region: "us-east-1" + schedules: # times staggered to avoid NFS schedule causing failed jobs due to locks + full: "15 6 * * 1" # every Monday at 06:15 + differential: "15 6 * * 0,2-6" # every day at 06:15 except Monday + incremental: "15 1-5,7-23 * * *" # every hour except 06:15 - name: "repo1" # NFS volume: &nfs volumeClaimSpec: @@ -94,23 +104,14 @@ spec: schedules: # times staggered to avoid NFS schedule causing failed jobs due to locks full: "30 6 * * 1" # every Monday at 06:30 incremental: "30 6 * * 0,2-6" # every day at 06:30 except Monday - - name: "repo3" # Ceph RGW in-cluster - s3: &rgw - endpoint: "rgw-biohazard.${DNS_TS}" - bucket: "pg-${PG_APP_NAME}" - region: "us-east-1" - schedules: # times staggered to avoid NFS schedule causing failed jobs due to locks - full: "15 6 * * 1" # every Monday at 06:15 - differential: "15 6 * * 0,2-6" # every day at 06:15 except Monday - incremental: "15 1-5,7-23 * * *" # every hour except 06:15 - # dataSource: - # pgbackrest: - # stanza: "db" - # configuration: *brcfg - # global: *brflag - # repo: - # name: "repo3" - # s3: *rgw + dataSource: + pgbackrest: + stanza: "db" + configuration: *brcfg + global: *brflag + repo: + name: "repo3" + s3: *rgw proxy: pgBouncer: port: 5432 diff --git a/kube/deploy/core/storage/volsync/template/pvc.yaml b/kube/deploy/core/storage/volsync/template/pvc.yaml index b898e5ad..79b8b06f 100644 --- a/kube/deploy/core/storage/volsync/template/pvc.yaml +++ b/kube/deploy/core/storage/volsync/template/pvc.yaml @@ -5,9 +5,11 @@ metadata: name: "${PVC}" labels: snapshot.home.arpa/enabled: "true" + kustomize.toolkit.fluxcd.io/prune: "Disabled" spec: storageClassName: "${SC:=block}" accessModes: ["${ACCESSMODE:=ReadWriteOnce}"] + volumeMode: "${VOLUMEMODE:=Filesystem}" resources: requests: storage: "${SIZE:=20Gi}" diff --git a/kube/deploy/core/storage/volsync/template/rsrc.yaml b/kube/deploy/core/storage/volsync/template/rsrc.yaml index 5ff20ad5..de3c7fa1 100644 --- a/kube/deploy/core/storage/volsync/template/rsrc.yaml +++ b/kube/deploy/core/storage/volsync/template/rsrc.yaml @@ -12,9 +12,9 @@ spec: pruneIntervalDays: ${PRUNE:=14} repository: "${PVC}-restic" cacheCapacity: "${CACHESIZE:=2Gi}" - volumeSnapshotClassName: "${VSC:=block}" + volumeSnapshotClassName: "${SNAP:=block}" storageClassName: "${SC:=block}" - cacheStorageClassName: "${SC:=block}" + cacheStorageClassName: "${CACHE:=block}" moverSecurityContext: runAsUser: ${RUID:=1000} runAsGroup: ${RGID:=1000} diff --git a/kube/deploy/vm/ad/ks.yaml b/kube/deploy/vm/ad/ks.yaml index a9446b6e..480c301f 100644 --- a/kube/deploy/vm/ad/ks.yaml +++ b/kube/deploy/vm/ad/ks.yaml @@ -12,6 +12,29 @@ spec: --- apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization +metadata: + name: zz-vm-ad-dc1-pvc + namespace: flux-system +spec: + path: ./kube/deploy/vm/ad/template-dc + targetNamespace: "vm-ad" + dependsOn: + - name: 1-core-storage-volsync-app + - name: 1-core-storage-rook-ceph-cluster + postBuild: + substitute: + PVC: "vm-ad-dc1-c-drive" + SC: "file" + SNAP: "file" + SIZE: "105Gi" + VOLUMEMODE: "Filesystem" + ACCESSMODE: "ReadWriteMany" + RUID: &uid "107" + RGID: *uid + RFSG: *uid +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization metadata: name: zz-vm-ad-dc1 namespace: flux-system @@ -23,7 +46,7 @@ spec: dependsOn: - name: zz-vm-1-kubevirt-app - name: zz-vm-ad-1-deps - - name: 1-core-storage-rook-ceph-cluster + - name: zz-vm-ad-dc1-pvc postBuild: substitute: NUM: "1" @@ -31,6 +54,29 @@ spec: --- apiVersion: kustomize.toolkit.fluxcd.io/v1 kind: Kustomization +metadata: + name: zz-vm-ad-dc2-pvc + namespace: flux-system +spec: + path: ./kube/deploy/vm/ad/template-dc + targetNamespace: "vm-ad" + dependsOn: + - name: 1-core-storage-volsync-app + - name: 1-core-storage-rook-ceph-cluster + postBuild: + substitute: + PVC: "vm-ad-dc2-c-drive" + SC: "file" + SNAP: "file" + SIZE: "105Gi" + VOLUMEMODE: "Filesystem" + ACCESSMODE: "ReadWriteMany" + RUID: &uid "107" + RGID: *uid + RFSG: *uid +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization metadata: name: zz-vm-ad-dc2 namespace: flux-system diff --git a/kube/deploy/vm/ad/template-dc/pvc.yaml b/kube/deploy/vm/ad/template-dc/pvc.yaml deleted file mode 100644 index 3e61576c..00000000 --- a/kube/deploy/vm/ad/template-dc/pvc.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: "vm-ad-dc${NUM}-c-drive" - labels: - snapshot.home.arpa/enabled: "true" -spec: - storageClassName: "file" - accessModes: ["ReadWriteMany"] - volumeMode: "Filesystem" - resources: - requests: - storage: "105Gi" - # dataSourceRef: - # apiGroup: "volsync.backube" - # kind: "ReplicationDestination" - # name: "vm-ad-dc1-c-drive-bootstrap"