mirror of
https://github.com/outbackdingo/Biohazard.git
synced 2026-01-27 18:18:26 +00:00
feat(vm-ad): add VolSync
This commit is contained in:
@@ -73,6 +73,16 @@ spec:
|
||||
repo3-retention-diff: "30"
|
||||
repo3-cipher-type: "aes-256-cbc"
|
||||
repos:
|
||||
# the array with index 0 on the `repos` list is used for replica recovery/replay from WALs, regardless of repo name (repo1, repo2, etc)
|
||||
- name: "repo3" # Ceph RGW in-cluster
|
||||
s3: &rgw
|
||||
endpoint: "rgw-biohazard.${DNS_TS}"
|
||||
bucket: "pg-${PG_APP_NAME}"
|
||||
region: "us-east-1"
|
||||
schedules: # times staggered to avoid NFS schedule causing failed jobs due to locks
|
||||
full: "15 6 * * 1" # every Monday at 06:15
|
||||
differential: "15 6 * * 0,2-6" # every day at 06:15 except Monday
|
||||
incremental: "15 1-5,7-23 * * *" # every hour except 06:15
|
||||
- name: "repo1" # NFS
|
||||
volume: &nfs
|
||||
volumeClaimSpec:
|
||||
@@ -94,23 +104,14 @@ spec:
|
||||
schedules: # times staggered to avoid NFS schedule causing failed jobs due to locks
|
||||
full: "30 6 * * 1" # every Monday at 06:30
|
||||
incremental: "30 6 * * 0,2-6" # every day at 06:30 except Monday
|
||||
- name: "repo3" # Ceph RGW in-cluster
|
||||
s3: &rgw
|
||||
endpoint: "rgw-biohazard.${DNS_TS}"
|
||||
bucket: "pg-${PG_APP_NAME}"
|
||||
region: "us-east-1"
|
||||
schedules: # times staggered to avoid NFS schedule causing failed jobs due to locks
|
||||
full: "15 6 * * 1" # every Monday at 06:15
|
||||
differential: "15 6 * * 0,2-6" # every day at 06:15 except Monday
|
||||
incremental: "15 1-5,7-23 * * *" # every hour except 06:15
|
||||
# dataSource:
|
||||
# pgbackrest:
|
||||
# stanza: "db"
|
||||
# configuration: *brcfg
|
||||
# global: *brflag
|
||||
# repo:
|
||||
# name: "repo3"
|
||||
# s3: *rgw
|
||||
dataSource:
|
||||
pgbackrest:
|
||||
stanza: "db"
|
||||
configuration: *brcfg
|
||||
global: *brflag
|
||||
repo:
|
||||
name: "repo3"
|
||||
s3: *rgw
|
||||
proxy:
|
||||
pgBouncer:
|
||||
port: 5432
|
||||
|
||||
@@ -5,9 +5,11 @@ metadata:
|
||||
name: "${PVC}"
|
||||
labels:
|
||||
snapshot.home.arpa/enabled: "true"
|
||||
kustomize.toolkit.fluxcd.io/prune: "Disabled"
|
||||
spec:
|
||||
storageClassName: "${SC:=block}"
|
||||
accessModes: ["${ACCESSMODE:=ReadWriteOnce}"]
|
||||
volumeMode: "${VOLUMEMODE:=Filesystem}"
|
||||
resources:
|
||||
requests:
|
||||
storage: "${SIZE:=20Gi}"
|
||||
|
||||
@@ -12,9 +12,9 @@ spec:
|
||||
pruneIntervalDays: ${PRUNE:=14}
|
||||
repository: "${PVC}-restic"
|
||||
cacheCapacity: "${CACHESIZE:=2Gi}"
|
||||
volumeSnapshotClassName: "${VSC:=block}"
|
||||
volumeSnapshotClassName: "${SNAP:=block}"
|
||||
storageClassName: "${SC:=block}"
|
||||
cacheStorageClassName: "${SC:=block}"
|
||||
cacheStorageClassName: "${CACHE:=block}"
|
||||
moverSecurityContext:
|
||||
runAsUser: ${RUID:=1000}
|
||||
runAsGroup: ${RGID:=1000}
|
||||
|
||||
@@ -12,6 +12,29 @@ spec:
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: zz-vm-ad-dc1-pvc
|
||||
namespace: flux-system
|
||||
spec:
|
||||
path: ./kube/deploy/vm/ad/template-dc
|
||||
targetNamespace: "vm-ad"
|
||||
dependsOn:
|
||||
- name: 1-core-storage-volsync-app
|
||||
- name: 1-core-storage-rook-ceph-cluster
|
||||
postBuild:
|
||||
substitute:
|
||||
PVC: "vm-ad-dc1-c-drive"
|
||||
SC: "file"
|
||||
SNAP: "file"
|
||||
SIZE: "105Gi"
|
||||
VOLUMEMODE: "Filesystem"
|
||||
ACCESSMODE: "ReadWriteMany"
|
||||
RUID: &uid "107"
|
||||
RGID: *uid
|
||||
RFSG: *uid
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: zz-vm-ad-dc1
|
||||
namespace: flux-system
|
||||
@@ -23,7 +46,7 @@ spec:
|
||||
dependsOn:
|
||||
- name: zz-vm-1-kubevirt-app
|
||||
- name: zz-vm-ad-1-deps
|
||||
- name: 1-core-storage-rook-ceph-cluster
|
||||
- name: zz-vm-ad-dc1-pvc
|
||||
postBuild:
|
||||
substitute:
|
||||
NUM: "1"
|
||||
@@ -31,6 +54,29 @@ spec:
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: zz-vm-ad-dc2-pvc
|
||||
namespace: flux-system
|
||||
spec:
|
||||
path: ./kube/deploy/vm/ad/template-dc
|
||||
targetNamespace: "vm-ad"
|
||||
dependsOn:
|
||||
- name: 1-core-storage-volsync-app
|
||||
- name: 1-core-storage-rook-ceph-cluster
|
||||
postBuild:
|
||||
substitute:
|
||||
PVC: "vm-ad-dc2-c-drive"
|
||||
SC: "file"
|
||||
SNAP: "file"
|
||||
SIZE: "105Gi"
|
||||
VOLUMEMODE: "Filesystem"
|
||||
ACCESSMODE: "ReadWriteMany"
|
||||
RUID: &uid "107"
|
||||
RGID: *uid
|
||||
RFSG: *uid
|
||||
---
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: zz-vm-ad-dc2
|
||||
namespace: flux-system
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "vm-ad-dc${NUM}-c-drive"
|
||||
labels:
|
||||
snapshot.home.arpa/enabled: "true"
|
||||
spec:
|
||||
storageClassName: "file"
|
||||
accessModes: ["ReadWriteMany"]
|
||||
volumeMode: "Filesystem"
|
||||
resources:
|
||||
requests:
|
||||
storage: "105Gi"
|
||||
# dataSourceRef:
|
||||
# apiGroup: "volsync.backube"
|
||||
# kind: "ReplicationDestination"
|
||||
# name: "vm-ad-dc1-c-drive-bootstrap"
|
||||
Reference in New Issue
Block a user