Compare commits

..

2 Commits

Author SHA1 Message Date
Jeff McCune
fd6fbe5598 (#57) Allow gha-rs scale set to fail on all but one clusters
The effect of this patch is limited to refreshing credentials only for
namespaces that exist in the local cluster.  There is structure in place
in the CUE code to allow for namespaces bound to specific clusters, but
this is used only by the optional Vault component.

This patch was an attempt to work around
https://github.com/actions/actions-runner-controller/issues/3351 by
deploying the runner scale sets into unique namespaces.

This effort was a waste of time, only one listener pod successfully
registered for a given scale set name / group combination.

Because we have only one group named Default we can only have one
listener pod globally for a given scale set name.

Because we want our workflows to execute regardless of the availability
of a single cluster, we're going to let this fail for now.  The pod
retries every 3 seconds.  When a cluster is destroyed, another cluster
will quickly register.

A follow up patch will look to expand this retry behavior.
2024-03-15 12:53:16 -07:00
Jeff McCune
67472e1e1c (#60) Disable flux reconciliation of deployment/zitadel on standby clusters 2024-03-14 21:58:32 -07:00
13 changed files with 125 additions and 48 deletions

View File

@@ -1,16 +1,19 @@
package holos
// Controls optional feature flags for services distributed across multiple holos components.
// For example, enable issuing certificates in the provisioner cluster when an optional service is
// enabled for a workload cluster.
package holos
import "list"
#OptionalService: {
name: string
enabled: true | *false
clusters: [Name=_]: #Platform.clusters[Name]
clusterNames: [for k, v in clusters {k}]
namespaces: [Name=_]: #ManagedNamespace & {
name: Name
clusterNames: [for c in clusters {c.name}]
managedNamespaces: [Name=_]: #ManagedNamespace & {
namespace: metadata: name: Name
clusterNames: ["provisioner", for c in clusters {c.name}]
}
// servers represents istio Gateway.spec.servers.hosts entries
// Refer to istio/gateway/gateway.cue
@@ -34,6 +37,10 @@ package holos
}
}
for k, v in #OptionalServices {
#ManagedNamespaces: v.namespaces
for svc in #OptionalServices {
for nsName, ns in svc.managedNamespaces {
if svc.enabled && list.Contains(ns.clusterNames, #ClusterName) {
#ManagedNamespaces: "\(nsName)": ns
}
}
}

View File

@@ -8,7 +8,9 @@ let TargetNamespace = "prod-core-vault"
enabled: true
clusters: core1: _
clusters: core2: _
namespaces: "prod-core-vault": labels: "istio-injection": "enabled"
managedNamespaces: "prod-core-vault": {
namespace: metadata: labels: "istio-injection": "enabled"
}
certs: "vault-core": #Certificate & {
metadata: name: "vault-core"
metadata: namespace: "istio-ingress"

View File

@@ -61,43 +61,70 @@ let DatabaseCACertPatch = [
},
]
#Kustomize: {
patches: [
{
let CAPatch = #Patch & {
target: {
group: "apps" | "batch"
version: "v1"
kind: "Job" | "Deployment"
name: string
}
patch: yaml.Marshal(DatabaseCACertPatch)
}
#KustomizePatches: {
mesh: {
target: {
group: "apps"
version: "v1"
kind: "Deployment"
name: Name
}
patch: yaml.Marshal(IstioInject)
}
deploymentCA: CAPatch & {
target: group: "apps"
target: kind: "Deployment"
target: name: Name
}
initJob: CAPatch & {
target: group: "batch"
target: kind: "Job"
target: name: "\(Name)-init"
}
setupJob: CAPatch & {
target: group: "batch"
target: kind: "Job"
target: name: "\(Name)-setup"
}
if #IsPrimaryCluster == false {
fluxDisable: {
target: {
group: "apps"
version: "v1"
kind: "Deployment"
name: Name
}
patch: yaml.Marshal(IstioInject)
},
{
target: {
group: "apps"
version: "v1"
kind: "Deployment"
name: Name
}
patch: yaml.Marshal(DatabaseCACertPatch)
},
{
patch: yaml.Marshal(DisableFluxPatch)
}
initDisable: {
target: {
group: "batch"
version: "v1"
kind: "Job"
name: "\(Name)-init"
}
patch: yaml.Marshal(DatabaseCACertPatch)
},
{
patch: yaml.Marshal(DisableFluxPatch)
}
setupDisable: {
target: {
group: "batch"
version: "v1"
kind: "Job"
name: "\(Name)-setup"
}
patch: yaml.Marshal(DatabaseCACertPatch)
},
]
patch: yaml.Marshal(DisableFluxPatch)
}
}
}
let DisableFluxPatch = [{op: "replace", path: "/metadata/annotations/kustomize.toolkit.fluxcd.io~1reconcile", value: "disabled"}]

View File

@@ -4,6 +4,6 @@ package holos
#InputKeys: project: "github"
#DependsOn: Namespaces: name: "prod-secrets-namespaces"
#TargetNamespace: #InputKeys.component
#ARCSystemNamespace: "arc-system"
#HelmChart: namespace: #TargetNamespace
#HelmChart: chart: version: "0.8.3"

View File

@@ -1,5 +1,6 @@
package holos
#TargetNamespace: "arc-runner"
#InputKeys: component: "arc-runner"
#Kustomization: spec: targetNamespace: #TargetNamespace

View File

@@ -1,6 +1,6 @@
package holos
#TargetNamespace: "arc-system"
#TargetNamespace: #ARCSystemNamespace
#InputKeys: component: "arc-system"
#HelmChart & {

View File

@@ -1,5 +1,7 @@
package holos
import "list"
#TargetNamespace: "default"
#InputKeys: {
@@ -11,7 +13,9 @@ package holos
apiObjects: {
// #ManagedNamespaces is the set of all namespaces across all clusters in the platform.
for k, ns in #ManagedNamespaces {
Namespace: "\(ns.name)": #Namespace & {metadata: ns}
if list.Contains(ns.clusterNames, #ClusterName) {
Namespace: "\(k)": #Namespace & ns.namespace
}
}
// #PlatformNamespaces is deprecated in favor of #ManagedNamespaces.

View File

@@ -93,7 +93,14 @@ provisioner get serviceaccount -A --selector=holos.run/job.name=\(NAME) --output
# Create the tokens
mkdir tokens
jq -r '.items[].metadata | "provisioner -n \\(.namespace) create token --duration=12h \\(.name) > tokens/\\(.namespace).\\(.name).jwt"' serviceaccounts.json | bash -x
kubectl get namespaces -o name > namespaces.txt
# Iterate over local namespaces
while IFS= read -r NAMESPACE; do
echo "Getting token for local cluster $NAMESPACE" >&2
jq -r '.items[] | select("namespace/"+.metadata.namespace == "'${NAMESPACE}'") | .metadata | "provisioner -n \\(.namespace) create token --duration=12h \\(.name) > tokens/\\(.namespace).\\(.name).jwt"' serviceaccounts.json | bash -x
done < namespaces.txt
# Create the secrets
mksecret tokens/*.jwt
@@ -124,6 +131,11 @@ kubectl apply --server-side=true -f secrets.yaml
resources: ["secrets"]
verbs: ["*"]
},
{
apiGroups: [""]
resources: ["namespaces"]
verbs: ["list"]
},
]
},
// Bind the Role to the ServiceAccount for the Job.

View File

@@ -1,5 +1,7 @@
package holos
import "list"
#DependsOn: _ESOCreds
#TargetNamespace: "default"
@@ -31,9 +33,11 @@ package holos
}
}
for k, ns in #ManagedNamespaces {
let obj = #SecretStore & {_namespace: ns.name}
SecretStore: "\(ns.name)/\(obj.metadata.name)": obj
for nsName, ns in #ManagedNamespaces {
if list.Contains(ns.clusterNames, #ClusterName) {
let obj = #SecretStore & {_namespace: nsName}
SecretStore: "\(nsName)/\(obj.metadata.name)": obj
}
}
}
}

View File

@@ -25,11 +25,11 @@ ksObjects: []
}
}
for k, ns in #ManagedNamespaces {
for obj in (#PlatformNamespaceObjects & {_ns: ns}).objects {
for nsName, ns in #ManagedNamespaces {
for obj in (#PlatformNamespaceObjects & {_ns: ns.namespace.metadata}).objects {
let Kind = obj.kind
let Name = obj.metadata.name
"\(Kind)": "\(ns.name)/\(Name)": obj
"\(Kind)": "\(nsName)/\(Name)": obj
}
}
}

View File

@@ -10,8 +10,8 @@ package holos
#KubernetesObjects & {
apiObjects: {
// #ManagedNamespaces is the set of all namespaces across all clusters in the platform.
for k, ns in #ManagedNamespaces {
Namespace: "\(ns.name)": #Namespace & {metadata: ns}
for nsName, ns in #ManagedNamespaces {
Namespace: "\(nsName)": #Namespace & ns.namespace
}
// #PlatformNamespaces is deprecated in favor of #ManagedNamespaces.

View File

@@ -152,7 +152,8 @@ _apiVersion: "holos.run/v1alpha1"
suspend?: bool
targetNamespace?: string
timeout: string | *"3m0s"
wait: bool | *true
// wait performs health checks for all reconciled resources. If set to true, .spec.healthChecks is ignored.
wait: bool | *true
dependsOn: [for k, v in #DependsOn {v}]
}
}
@@ -287,16 +288,21 @@ _apiVersion: "holos.run/v1alpha1"
// ManagedNamespace is a namespace to manage across all clusters in the holos platform.
#ManagedNamespace: {
// TODO metadata labels and annotations
name: string
labels: [string]: string
namespace: {
metadata: {
name: string
labels: [string]: string
}
}
// clusterNames represents the set of clusters the namespace is managed on. Usually all clusters.
clusterNames: [...string]
}
// #ManagedNamepsaces is the union of all namespaces across all cluster types and optional services.
// Holos adopts the namespace sameness position of SIG Multicluster, refer to https://github.com/kubernetes/community/blob/dd4c8b704ef1c9c3bfd928c6fa9234276d61ad18/sig-multicluster/namespace-sameness-position-statement.md
#ManagedNamespaces: {
[Name=_]: {
name: Name
[Name=_]: #ManagedNamespace & {
namespace: metadata: name: Name
}
}
@@ -465,8 +471,18 @@ _apiVersion: "holos.run/v1alpha1"
kind: "Kustomization"
resources: [ResourcesFile]
...
if len(#KustomizePatches) > 0 {
patches: [for v in #KustomizePatches {v}]
}
}
#KustomizePatches: {
[_]: #Patch
}
// #Patch is a kustomize patch
#Patch: kc.#Patch
// #DefaultSecurityContext is the holos default security context to comply with the restricted namespace policy.
// Refer to https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
#DefaultSecurityContext: {
@@ -485,6 +501,10 @@ _apiVersion: "holos.run/v1alpha1"
spec: secretName: metadata.name
}
// #IsPrimaryCluster is true if the cluster being rendered is the primary cluster
// Used by the iam project to determine where https://login.example.com is active.
#IsPrimaryCluster: bool & #ClusterName == #Platform.primaryCluster.name
// By default, render kind: Skipped so holos knows to skip over intermediate cue files.
// This enables the use of holos render ./foo/bar/baz/... when bar contains intermediary constraints which are not complete components.
// Holos skips over these intermediary cue instances.

View File

@@ -1 +1 @@
0
1