diff --git a/hack/e2e-apps/foundationdb.bats b/hack/e2e-apps/foundationdb.bats index e39561c8..99937a42 100644 --- a/hack/e2e-apps/foundationdb.bats +++ b/hack/e2e-apps/foundationdb.bats @@ -20,10 +20,9 @@ spec: key: "foundationdb.org/none" valueFrom: "\$FDB_ZONE_ID" storage: - size: "8Gi" + size: "1Gi" storageClass: "" - resources: - preset: "nano" + resourcesPreset: "small" backup: enabled: false s3: @@ -36,45 +35,76 @@ spec: retentionPolicy: "7d" monitoring: enabled: true - advanced: - customParameters: - - "knob_disable_posix_kernel_aio=1" - imageType: "split" - automaticReplacements: true + customParameters: + - "knob_disable_posix_kernel_aio=1" + imageType: "split" + automaticReplacements: true EOF - sleep 10 - + sleep 15 + # Wait for HelmRelease to be ready - kubectl -n tenant-test wait hr foundationdb-\$name --timeout=180s --for=condition=ready - - # Wait for FoundationDBCluster to be created - timeout 120 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org \$name; do sleep 10; done" - - # Wait for cluster to become available (this may take some time) - timeout 300 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org \$name -o jsonpath='{.status.databaseConfiguration.usable_regions}' | grep -q '1'; do sleep 15; done" - + kubectl -n tenant-test wait hr foundationdb-$name --timeout=300s --for=condition=ready + + # Wait for FoundationDBCluster to be created (name has foundationdb- prefix) + timeout 300 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name; do sleep 15; done" + + # Wait for cluster to become available (initial reconciliation takes time - allow 5 minutes) + timeout 300 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.usable_regions}' | grep -q '1'; do sleep 30; done" + # Check that storage processes are running - timeout 180 sh -ec "until [ \$(kubectl -n tenant-test get pods -l app=\$name,foundationdb.org/fdb-process-class=storage --field-selector=status.phase=Running --no-headers | wc -l) -eq 3 ]; do sleep 10; done" - - # Check that stateless processes are running - timeout 180 sh -ec "until [ \$(kubectl -n tenant-test get pods -l app=\$name,foundationdb.org/fdb-process-class=stateless --field-selector=status.phase=Running --no-headers | wc -l) -ge 1 ]; do sleep 10; done" - + timeout 300 sh -ec "until [ \$(kubectl -n tenant-test get pods -l foundationdb.org/fdb-cluster-name=foundationdb-$name,foundationdb.org/fdb-process-class=storage --field-selector=status.phase=Running --no-headers | wc -l) -eq 3 ]; do sleep 15; done" + + # Check that log processes are running (these are the stateless processes) + timeout 300 sh -ec "until [ \$(kubectl -n tenant-test get pods -l foundationdb.org/fdb-cluster-name=foundationdb-$name,foundationdb.org/fdb-process-class=log --field-selector=status.phase=Running --no-headers | wc -l) -ge 1 ]; do sleep 15; done" + # Check that cluster controller is running - timeout 180 sh -ec "until [ \$(kubectl -n tenant-test get pods -l app=\$name,foundationdb.org/fdb-process-class=cluster_controller --field-selector=status.phase=Running --no-headers | wc -l) -eq 1 ]; do sleep 10; done" - + timeout 300 sh -ec "until [ \$(kubectl -n tenant-test get pods -l foundationdb.org/fdb-cluster-name=foundationdb-$name,foundationdb.org/fdb-process-class=cluster_controller --field-selector=status.phase=Running --no-headers | wc -l) -eq 1 ]; do sleep 15; done" + # Check WorkloadMonitor is created and configured - kubectl -n tenant-test get workloadmonitor \$name - timeout 60 sh -ec "until kubectl -n tenant-test get workloadmonitor \$name -o jsonpath='{.spec.replicas}' | grep -q '3'; do sleep 5; done" - + timeout 120 sh -ec "until kubectl -n tenant-test get workloadmonitor foundationdb-$name; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get workloadmonitor foundationdb-$name -o jsonpath='{.spec.replicas}' | grep -q '3'; do sleep 5; done" + # Check dashboard resource map is created - kubectl -n tenant-test get configmap \$name-resourcemap - - # Verify cluster is healthy (check cluster status) - timeout 120 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org \$name -o jsonpath='{.status.health.available}' | grep -q 'true'; do sleep 10; done" - + kubectl -n tenant-test get configmap foundationdb-$name-resourcemap + + # Verify cluster is healthy (check cluster status) - allow extra time for initial setup + timeout 300 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.health.available}' | grep -q 'true'; do sleep 20; done" + + # Validate status.configured field + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.configured}' | grep -q 'true'; do sleep 10; done" + + # Validate status.connectionString field exists and contains expected format + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.connectionString}' | grep -q '@.*\.svc\.cozy\.local'; do sleep 10; done" + + # Validate comprehensive status.databaseConfiguration fields + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.logs}' | grep -q '3'; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.proxies}' | grep -q '3'; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.redundancy_mode}' | grep -q 'double'; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.resolvers}' | grep -q '1'; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.storage_engine}' | grep -q 'ssd-2'; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.databaseConfiguration.usable_regions}' | grep -q '1'; do sleep 10; done" + + # Validate status.desiredProcessGroups field + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.desiredProcessGroups}' | grep -q '^[0-9][0-9]*$'; do sleep 10; done" + + # Validate status.generations.reconciled field + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.generations.reconciled}' | grep -q '^[0-9][0-9]*$'; do sleep 10; done" + + # Validate status.hasListenIPsForAllPods field + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.hasListenIPsForAllPods}' | grep -q 'true'; do sleep 10; done" + + # Validate comprehensive status.health fields + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.health.fullReplication}' | grep -q 'true'; do sleep 10; done" + timeout 60 sh -ec "until kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name -o jsonpath='{.status.health.healthy}' | grep -q 'true'; do sleep 10; done" + + # Verify security context is applied correctly (non-root user) + storage_pod=$(kubectl -n tenant-test get pods -l foundationdb.org/fdb-cluster-name=foundationdb-$name,foundationdb.org/fdb-process-class=storage --no-headers | head -n1 | awk '{print $1}') + kubectl -n tenant-test get pod "$storage_pod" -o jsonpath='{.spec.containers[0].securityContext.runAsUser}' | grep -q '4059' + kubectl -n tenant-test get pod "$storage_pod" -o jsonpath='{.spec.containers[0].securityContext.runAsGroup}' | grep -q '4059' + # Clean up - kubectl -n tenant-test delete foundationdb \$name - + kubectl -n tenant-test delete foundationdb $name + # Wait for cleanup to complete - timeout 60 sh -ec "while kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org \$name 2>/dev/null; do sleep 5; done" + timeout 120 sh -ec "while kubectl -n tenant-test get foundationdbclusters.apps.foundationdb.org foundationdb-$name 2>/dev/null; do sleep 10; done" } \ No newline at end of file diff --git a/packages/apps/foundationdb/templates/cluster.yaml b/packages/apps/foundationdb/templates/cluster.yaml index f342054d..209a78f9 100644 --- a/packages/apps/foundationdb/templates/cluster.yaml +++ b/packages/apps/foundationdb/templates/cluster.yaml @@ -1,3 +1,5 @@ +{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" | default (dict "data" (dict)) }} +{{- $clusterDomain := index $cozyConfig.data "cluster-domain" | default "cozy.local" }} --- apiVersion: apps.foundationdb.org/v1beta2 kind: FoundationDBCluster @@ -41,7 +43,13 @@ spec: {{- end }} {{- end }} podTemplate: + metadata: + labels: + policy.cozystack.io/allow-to-apiserver: "true" spec: + serviceAccountName: {{ .Release.Name }}-foundationdb + securityContext: + fsGroup: {{ .Values.securityContext.runAsGroup }} containers: - name: foundationdb resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 16 }} @@ -79,6 +87,7 @@ spec: storage: {{ .Values.storage.size }} routing: + dnsDomain: {{ $clusterDomain }} defineDNSLocalityFields: true sidecarContainer: diff --git a/packages/apps/foundationdb/templates/role.yaml b/packages/apps/foundationdb/templates/role.yaml new file mode 100644 index 00000000..a391a084 --- /dev/null +++ b/packages/apps/foundationdb/templates/role.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-foundationdb + labels: + app.kubernetes.io/name: foundationdb + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - create + - update + - patch + - delete \ No newline at end of file diff --git a/packages/apps/foundationdb/templates/rolebinding.yaml b/packages/apps/foundationdb/templates/rolebinding.yaml new file mode 100644 index 00000000..45b3e123 --- /dev/null +++ b/packages/apps/foundationdb/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-foundationdb + labels: + app.kubernetes.io/name: foundationdb + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-foundationdb +subjects: +- kind: ServiceAccount + name: {{ .Release.Name }}-foundationdb + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/packages/apps/foundationdb/templates/serviceaccount.yaml b/packages/apps/foundationdb/templates/serviceaccount.yaml new file mode 100644 index 00000000..b53143de --- /dev/null +++ b/packages/apps/foundationdb/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-foundationdb + labels: + app.kubernetes.io/name: foundationdb + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} \ No newline at end of file