From aacf6cfa43a5896578e3129d8a8a76b1f16c4bcb Mon Sep 17 00:00:00 2001 From: JJGadgets Date: Thu, 9 Nov 2023 17:43:10 +0800 Subject: [PATCH] feat(rook-ceph): deploy cluster within biohazard --- .../core/storage/rook-ceph/pve/app/.sops.yaml | 0 .../rook-ceph/pve/app/ceph-cluster.sops.yaml | 0 .../rook-ceph/pve/app/ceph-monitor.yaml | 0 .../rook-ceph/pve/app/ceph-prometheus.yaml | 0 .../rook-ceph/pve/app/create-secrets.sh | 0 .../rook-ceph/pve/app/kustomization.yaml | 0 .../pve/app/object-radosgw-certs.yaml | 0 .../storage/rook-ceph/pve/app/object.yaml | 0 .../rook-ceph/pve/app/pveceph-object.sh | 0 .../rook-ceph/pve/app/secret.sops.yaml | 0 .../rook-ceph/pve/app/storage-class.yaml | 0 .../pve/app/volume-snapshot-class.yaml | 0 .../deploy/core/storage/rook-ceph/pve/ks.yaml | 0 .../storage/rook-ceph/pve/kustomization.yaml | 0 .../biohazard/flux/kustomization.yaml | 2 +- .../storage/rook-ceph/cluster/app/hr.yaml | 110 ++++++++++++++---- 16 files changed, 87 insertions(+), 25 deletions(-) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/.sops.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/ceph-cluster.sops.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/ceph-monitor.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/ceph-prometheus.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/create-secrets.sh (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/kustomization.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/object-radosgw-certs.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/object.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/pveceph-object.sh (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/secret.sops.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/storage-class.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/app/volume-snapshot-class.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/ks.yaml (100%) rename {kube => archive/kube}/deploy/core/storage/rook-ceph/pve/kustomization.yaml (100%) diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/.sops.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/.sops.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/.sops.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/.sops.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/ceph-cluster.sops.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/ceph-cluster.sops.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/ceph-cluster.sops.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/ceph-cluster.sops.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/ceph-monitor.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/ceph-monitor.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/ceph-monitor.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/ceph-monitor.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/ceph-prometheus.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/ceph-prometheus.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/ceph-prometheus.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/ceph-prometheus.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/create-secrets.sh b/archive/kube/deploy/core/storage/rook-ceph/pve/app/create-secrets.sh similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/create-secrets.sh rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/create-secrets.sh diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/kustomization.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/kustomization.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/kustomization.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/kustomization.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/object-radosgw-certs.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/object-radosgw-certs.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/object-radosgw-certs.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/object-radosgw-certs.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/object.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/object.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/object.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/object.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/pveceph-object.sh b/archive/kube/deploy/core/storage/rook-ceph/pve/app/pveceph-object.sh similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/pveceph-object.sh rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/pveceph-object.sh diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/secret.sops.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/secret.sops.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/secret.sops.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/secret.sops.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/storage-class.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/storage-class.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/storage-class.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/storage-class.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/app/volume-snapshot-class.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/app/volume-snapshot-class.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/app/volume-snapshot-class.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/app/volume-snapshot-class.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/ks.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/ks.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/ks.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/ks.yaml diff --git a/kube/deploy/core/storage/rook-ceph/pve/kustomization.yaml b/archive/kube/deploy/core/storage/rook-ceph/pve/kustomization.yaml similarity index 100% rename from kube/deploy/core/storage/rook-ceph/pve/kustomization.yaml rename to archive/kube/deploy/core/storage/rook-ceph/pve/kustomization.yaml diff --git a/kube/clusters/biohazard/flux/kustomization.yaml b/kube/clusters/biohazard/flux/kustomization.yaml index 26073b13..7fcfcfc8 100644 --- a/kube/clusters/biohazard/flux/kustomization.yaml +++ b/kube/clusters/biohazard/flux/kustomization.yaml @@ -15,7 +15,7 @@ resources: - ../../../deploy/core/secrets/external-secrets/ - ../../../deploy/core/storage/_external-snapshotter/ - ../../../deploy/core/storage/rook-ceph/ - - ../../../deploy/core/storage/rook-ceph/pve/ + - ../../../deploy/core/storage/rook-ceph/cluster/ - ../../../deploy/core/storage/democratic-csi/_deps/ - ../../../deploy/core/storage/democratic-csi/local-hostpath/ - ../../../deploy/core/storage/csi-driver-nfs/ diff --git a/kube/deploy/core/storage/rook-ceph/cluster/app/hr.yaml b/kube/deploy/core/storage/rook-ceph/cluster/app/hr.yaml index 4a6552ad..5d3e0ae6 100644 --- a/kube/deploy/core/storage/rook-ceph/cluster/app/hr.yaml +++ b/kube/deploy/core/storage/rook-ceph/cluster/app/hr.yaml @@ -2,15 +2,27 @@ apiVersion: helm.toolkit.fluxcd.io/v2beta1 kind: HelmRelease metadata: - name: rook-ceph-cluster + name: "rook-ceph-cluster-${CLUSTER_NAME}" namespace: rook-ceph spec: interval: 5m - timeout: 15m + timeout: 1h + install: + createNamespace: true + remediation: + retries: 5 + maxHistory: 5 + uninstall: + deletionPropagation: background + keepHistory: false + upgrade: + cleanupOnFail: true + remediation: + retries: 5 chart: spec: chart: rook-ceph-cluster - version: "${VERSION_ROOK:=v1.10.10}" + version: "v1.12.7" sourceRef: name: rook-ceph kind: HelmRepository @@ -27,15 +39,25 @@ spec: osd_pool_default_min_size = 2 mon_data_avail_warn = 10 cephClusterSpec: + placement: + mon: &affinity + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: rook + operator: In + values: ["true"] + mgr: *affinity network: - provider: host + #provider: host connections: encryption: - enabled: false + enabled: true compression: - enabled: false + enabled: true crashCollector: - disable: false + disable: true dashboard: enabled: true urlPrefix: "/" @@ -56,15 +78,13 @@ spec: useAllNodes: false useAllDevices: false config: - # encryptedDevice: "true" + encryptedDevice: "true" osdsPerDevice: "1" nodes: - - name: "humming" - devicePathFilter: "^/dev/disk/by-id/ata-INTEL_SSDSC2BB016T4.*" - - name: "blackfish" - devicePathFilter: "^/dev/disk/by-id/ata-INTEL_SSDSC2BB016T4.*" - # - name: "strato" - # devicePathFilter: "^/dev/disk/by-id/ata-INTEL_SSDSC2BB016T4.*" + - name: "ange" + devicePathFilter: &osd-s3500-1-6tb "^/dev/disk/by-id/ata-INTEL_SSDSC2BB016T4.*" + - name: "charlotte" + devicePathFilter: *osd-s3500-1-6tb resources: mgr: requests: @@ -129,19 +149,18 @@ spec: size: 2 parameters: min_size: "2" - # compression_mode: "aggressive" - # compression_algorithm: "zstd" + compression_mode: "aggressive" + compression_algorithm: "zstd" storageClass: &rbd-sc enabled: true name: "block" isDefault: true reclaimPolicy: "Retain" allowVolumeExpansion: true - # mountOptions: ["discard"] + mountOptions: ["discard"] parameters: imageFormat: "2" - imageFeatures: "layering" - # imageFeatures: "layering,exclusive-lock,object-map,fast-diff,deep-flatten" # https://docs.ceph.com/en/quincy/rbd/rbd-config-ref/#image-features + imageFeatures: "layering,exclusive-lock,object-map,fast-diff,deep-flatten" # https://docs.ceph.com/en/quincy/rbd/rbd-config-ref/#image-features csi.storage.k8s.io/provisioner-secret-name: "rook-csi-rbd-provisioner" csi.storage.k8s.io/provisioner-secret-namespace: "rook-ceph" csi.storage.k8s.io/controller-expand-secret-name: "rook-csi-rbd-provisioner" @@ -149,6 +168,35 @@ spec: csi.storage.k8s.io/node-stage-secret-name: "rook-csi-rbd-node" csi.storage.k8s.io/node-stage-secret-namespace: "rook-ceph" csi.storage.k8s.io/fstype: "ext4" + # - name: "pvc-migrate" + # spec: + # failureDomain: "osd" + # deviceClass: "ssd" + # replicated: + # # size: 3 + # size: 2 + # parameters: + # min_size: "2" + # # compression_mode: "aggressive" + # # compression_algorithm: "zstd" + # storageClass: &rbd-sc + # enabled: true + # name: "pvc-migrate" + # isDefault: false + # reclaimPolicy: "Retain" + # allowVolumeExpansion: true + # mountOptions: ["discard"] + # parameters: + # imageFormat: "2" + # imageFeatures: "layering" + # # imageFeatures: "layering,exclusive-lock,object-map,fast-diff,deep-flatten" # https://docs.ceph.com/en/quincy/rbd/rbd-config-ref/#image-features + # csi.storage.k8s.io/provisioner-secret-name: "rook-csi-rbd-provisioner" + # csi.storage.k8s.io/provisioner-secret-namespace: "rook-ceph" + # csi.storage.k8s.io/controller-expand-secret-name: "rook-csi-rbd-provisioner" + # csi.storage.k8s.io/controller-expand-secret-namespace: "rook-ceph" + # csi.storage.k8s.io/node-stage-secret-name: "rook-csi-rbd-node" + # csi.storage.k8s.io/node-stage-secret-namespace: "rook-ceph" + # csi.storage.k8s.io/fstype: "ext4" # - name: &rbd "${CLUSTER_NAME}-block-k8s-ssd-ec-2-1" # spec: # failureDomain: "osd" @@ -177,9 +225,22 @@ spec: # storageClass: # <<: *rbd-sc # name: "block-hdd-ec-2-2" + # - name: "games-ec-2-1" + # spec: + # failureDomain: "osd" + # deviceClass: "ssd" + # erasureCoded: + # dataChunks: 2 + # codingChunks: 1 + # parameters: + # min_size: "2" + # compression_mode: "aggressive" + # compression_algorithm: "lz4" + # storageClass: + # enabled: false # only my gaming PC will be accessing this cephBlockPoolsVolumeSnapshotClass: - enabled: false - name: "ceph-block" + enabled: true + name: "block" isDefault: true deletionPolicy: "Delete" cephFileSystems: @@ -202,6 +263,7 @@ spec: metadataServer: activeCount: 1 activeStandby: true + placement: *affinity resources: requests: cpu: 1000m @@ -214,7 +276,7 @@ spec: name: "file" pool: *fsdata0 reclaimPolicy: "Retain" - # allowVolumeExpansion: true + allowVolumeExpansion: true # mountOptions: ["discard"] parameters: csi.storage.k8s.io/provisioner-secret-name: "rook-csi-cephfs-provisioner" @@ -225,8 +287,8 @@ spec: csi.storage.k8s.io/node-stage-secret-namespace: "rook-ceph" csi.storage.k8s.io/fstype: "ext4" cephFileSystemVolumeSnapshotClass: - enabled: false - name: "ceph-fs" + enabled: true + name: "file" isDefault: false deletionPolicy: Delete cephObjectStores: