Compare commits

...

6 Commits

Author SHA1 Message Date
Jeff McCune
2b3b5a4887 (#36) Issue login and httpbin certs
This patch uses cert manager in the provisioner cluster to provision tls
certs for https://login.example.com and https://httpbin.k2.example.com

The certs are not yet synced to the clusters.  Next step is to replace
the Certificate resources with ExternalSecret resources, then remove
cert manager from the workload clusters.
2024-03-05 14:27:37 -08:00
Jeff McCune
7426e8f867 (#36) Move cert-manager to the provisioner cluster
This patch moves certificate management to the provisioner cluster to
centralize all secrets into the highly secured cluster.  This change
also simplifies the architecture in a number of ways:

1. Certificate lives are now completely independent of cluster
   lifecycle.
2. Remove the need for bi-directional sync to save cert secrets.
3. Workload clusters no longer need access to DNS.
2024-03-05 12:51:58 -08:00
Jeff McCune
cf0c455aa2 (#34) Add test for print secret data 2024-03-05 11:14:37 -08:00
Jeff McCune
752a3f912d (#34) Remove debug info logs 2024-03-05 11:05:51 -08:00
Jeff McCune
7d5852d675 (#34) Print secret data as json
Closes: #34
2024-03-05 11:03:47 -08:00
Jeff McCune
66b4ca0e6c (#31) Fix helm missing in actions workflow
Causing test failures that should pass.
2024-03-05 10:11:43 -08:00
14 changed files with 284 additions and 123 deletions

View File

@@ -23,5 +23,10 @@ jobs:
with:
go-version: stable
- name: Set up Helm
uses: azure/setup-helm@v4.1.0
with:
version: 'latest'
- name: Test
run: ./scripts/test

View File

@@ -0,0 +1,56 @@
package holos
#PlatformCerts: {
// Login service for IAM.
login: #PlatformCert & {
_name: "login"
_wildcard: true
_description: "Cert for Zitadel the platform oidc identity provider for iam"
}
"k2-httpbin": #ClusterCert & {
_name: "httpbin"
_cluster: "k2"
_description: "Test endpoint to verify the service mesh ingress gateway"
}
}
// #PlatformCert provisions a cert in the provisioner cluster. Workload clusters use ExternalSecret resources to fetch the Secret tls key and cert from the provisioner cluster.
#PlatformCert: #Certificate & {
_name: string
_wildcard: true | *false
metadata: name: string | *_name
metadata: namespace: string | *"istio-ingress"
spec: {
commonName: string | *"\(_name).\(#Platform.org.domain)"
if _wildcard {
dnsNames: [commonName, "*.\(commonName)"]
}
if !_wildcard {
dnsNames: [commonName]
}
secretName: metadata.name
issuerRef: kind: "ClusterIssuer"
issuerRef: name: string | *"letsencrypt"
}
}
// #ClusterCert provisions a cluster specific certificate.
#ClusterCert: #Certificate & {
_name: string
_cluster: string
_wildcard: true | *false
metadata: name: string | *"\(_cluster)-\(_name)"
metadata: namespace: string | *"istio-ingress"
spec: {
commonName: string | *"\(_name).\(_cluster).\(#Platform.org.domain)"
if _wildcard {
dnsNames: [commonName, "*.\(commonName)"]
}
if !_wildcard {
dnsNames: [commonName]
}
secretName: metadata.name
issuerRef: kind: "ClusterIssuer"
issuerRef: name: string | *"letsencrypt"
}
}

View File

@@ -0,0 +1,20 @@
package holos
// Provision all platform certificates.
#InputKeys: component: "certificates"
// Certificates usually go into the istio-system namespace, but they may go anywhere.
#TargetNamespace: "default"
// Depends on issuers
#DependsOn: _LetsEncrypt
#KubernetesObjects & {
apiObjects: {
for k, obj in #PlatformCerts {
"\(obj.kind)": {
"\(obj.metadata.namespace)/\(obj.metadata.name)": obj
}
}
}
}

View File

@@ -0,0 +1,43 @@
package holos
// https://cert-manager.io/docs/
#TargetNamespace: "cert-manager"
#InputKeys: {
component: "certmanager"
service: "cert-manager"
}
#HelmChart & {
values: #Values & {
installCRDs: true
startupapicheck: enabled: false
// Must not use kube-system on gke autopilot. GKE Warden authz blocks access.
global: leaderElection: namespace: #TargetNamespace
}
namespace: #TargetNamespace
chart: {
name: "cert-manager"
version: "1.14.3"
repository: {
name: "jetstack"
url: "https://charts.jetstack.io"
}
}
}
// https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-resource-requests#min-max-requests
#PodResources: {
requests: {
cpu: string | *"250m"
memory: string | *"512Mi"
"ephemeral-storage": string | *"100Mi"
}
}
// https://cloud.google.com/kubernetes-engine/docs/how-to/autopilot-spot-pods
#NodeSelector: {
"kubernetes.io/os": "linux"
"cloud.google.com/gke-spot": "true"
}

View File

@@ -1,6 +1,6 @@
package holos
#UpstreamValues: {
#Values: {
// +docs:section=Global
// Default values for cert-manager.
@@ -51,7 +51,7 @@ package holos
leaderElection: {
// Override the namespace used for the leader election lease
namespace: "kube-system"
namespace: string | *"kube-system"
}
}
@@ -246,7 +246,7 @@ package holos
// memory: 32Mi
//
// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
resources: #PodResources
// Pod Security Context
// ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
@@ -310,9 +310,7 @@ package holos
// This default ensures that Pods are only scheduled to Linux nodes.
// It prevents Pods being scheduled to Windows nodes in a mixed OS cluster.
// +docs:property
nodeSelector: {
"kubernetes.io/os": "linux"
}
nodeSelector: #NodeSelector
// +docs:ignore
ingressShim: {}
@@ -408,7 +406,7 @@ package holos
enabled: true
servicemonitor: {
// Create a ServiceMonitor to add cert-manager to Prometheus
enabled: false
enabled: true | *false
// Specifies the `prometheus` label on the created ServiceMonitor, this is
// used when different Prometheus instances have label selectors matching
@@ -652,7 +650,7 @@ package holos
// memory: 32Mi
//
// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
resources: #PodResources
// Liveness probe values
// ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
@@ -685,9 +683,7 @@ package holos
// This default ensures that Pods are only scheduled to Linux nodes.
// It prevents Pods being scheduled to Windows nodes in a mixed OS cluster.
// +docs:property
nodeSelector: {
"kubernetes.io/os": "linux"
}
nodeSelector: #NodeSelector
// A Kubernetes Affinity, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core
//
@@ -959,7 +955,7 @@ package holos
// memory: 32Mi
//
// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
resources: #PodResources
// The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with
// matching labels.
@@ -968,9 +964,7 @@ package holos
// This default ensures that Pods are only scheduled to Linux nodes.
// It prevents Pods being scheduled to Windows nodes in a mixed OS cluster.
// +docs:property
nodeSelector: {
"kubernetes.io/os": "linux"
}
nodeSelector: #NodeSelector
// A Kubernetes Affinity, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core
//
@@ -1098,7 +1092,7 @@ package holos
startupapicheck: {
// Enables the startup api check
enabled: true
enabled: *true | false
// Pod Security Context to be set on the startupapicheck component Pod
// ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
@@ -1151,7 +1145,7 @@ package holos
// memory: 32Mi
//
// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
resources: #PodResources
// The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with
// matching labels.
@@ -1160,9 +1154,7 @@ package holos
// This default ensures that Pods are only scheduled to Linux nodes.
// It prevents Pods being scheduled to Windows nodes in a mixed OS cluster.
// +docs:property
nodeSelector: {
"kubernetes.io/os": "linux"
}
nodeSelector: #NodeSelector
// A Kubernetes Affinity, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core
//

View File

@@ -0,0 +1,78 @@
package holos
// Lets Encrypt certificate issuers for public tls certs
#InputKeys: component: "letsencrypt"
#TargetNamespace: "cert-manager"
let Name = "letsencrypt"
// The cloudflare api token is platform scoped, not cluster scoped.
#SecretName: "cloudflare-api-token-secret"
// Depends on cert manager
#DependsOn: _CertManager
#KubernetesObjects & {
apiObjects: {
ClusterIssuer: {
letsencrypt: #ClusterIssuer & {
metadata: name: Name
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name
solvers: [{
dns01: cloudflare: {
email: #Platform.org.cloudflare.email
apiTokenSecretRef: name: #SecretName
apiTokenSecretRef: key: "api_token"
}}]
}
}
}
letsencryptStaging: #ClusterIssuer & {
metadata: name: Name + "-staging"
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-staging-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name + "-staging"
solvers: [{
dns01: cloudflare: {
email: #Platform.org.cloudflare.email
apiTokenSecretRef: name: #SecretName
apiTokenSecretRef: key: "api_token"
}}]
}
}
}
}
}
}
// _HTTPSolvers are disabled in the provisioner cluster, dns is the method supported by holos.
_HTTPSolvers: {
letsencryptHTTP: #ClusterIssuer & {
metadata: name: Name + "-http"
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name
solvers: [{http01: ingress: class: "istio"}]
}
}
}
letsencryptHTTPStaging: #ClusterIssuer & {
metadata: name: Name + "-http-staging"
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-staging-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name + "-staging"
solvers: [{http01: ingress: class: "istio"}]
}
}
}
}

View File

@@ -0,0 +1,13 @@
package holos
// Components under this directory are part of this collection
#InputKeys: project: "mesh"
// Shared dependencies for all components in this collection.
#DependsOn: _Namespaces
// Common Dependencies
_Namespaces: Namespaces: name: "\(#StageName)-secrets-namespaces"
_CertManager: CertManager: name: "\(#InstancePrefix)-certmanager"
_LetsEncrypt: LetsEncrypt: name: "\(#InstancePrefix)-letsencrypt"
_Certificates: Certificates: name: "\(#InstancePrefix)-certificates"

View File

@@ -1,61 +0,0 @@
package holos
// Lets Encrypt certificate issuers for public tls certs
#InputKeys: component: "certissuers"
#TargetNamespace: "cert-manager"
let Name = "letsencrypt"
// The cloudflare api token is platform scoped, not cluster scoped.
#SecretName: "cloudflare-api-token-secret"
// Depends on cert manager
#DependsOn: _CertManager
#KubernetesObjects & {
apiObjects: {
ClusterIssuer: {
letsencrypt: #ClusterIssuer & {
metadata: name: Name
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name + "-istio"
solvers: [{http01: ingress: class: "istio"}]
}
}
}
letsencryptStaging: #ClusterIssuer & {
metadata: name: Name + "-staging"
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-staging-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name + "-staging-istio"
solvers: [{http01: ingress: class: "istio"}]
}
}
}
letsencryptDns: #ClusterIssuer & {
metadata: name: Name + "-dns"
spec: {
acme: {
email: #Platform.org.contact.email
server: "https://acme-v02.api.letsencrypt.org/directory"
privateKeySecretRef: name: Name + "-istio"
solvers: [{
dns01: cloudflare: {
email: #Platform.org.cloudflare.email
apiTokenSecretRef: name: #SecretName
apiTokenSecretRef: key: "api_token"
}}]
}
}
}
}
ExternalSecret: "\(#SecretName)": #ExternalSecret & {
_name: #SecretName
}
}
}

View File

@@ -1,25 +0,0 @@
package holos
// https://cert-manager.io/docs/
#TargetNamespace: "cert-manager"
#InputKeys: {
component: "certmanager"
service: "cert-manager"
}
#HelmChart & {
values: #UpstreamValues & {
installCRDs: true
}
namespace: #TargetNamespace
chart: {
name: "cert-manager"
version: "1.14.3"
repository: {
name: "jetstack"
url: "https://charts.jetstack.io"
}
}
}

View File

@@ -2,10 +2,12 @@ package secret
import (
"context"
"encoding/json"
"fmt"
"github.com/holos-run/holos/pkg/cli/command"
"github.com/holos-run/holos/pkg/holos"
"github.com/holos-run/holos/pkg/logger"
"github.com/holos-run/holos/pkg/util"
"github.com/holos-run/holos/pkg/wrapper"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -90,17 +92,24 @@ func makeGetRunFunc(hc *holos.Config, cfg *config) command.RunFunc {
printFile := *cfg.printFile
if len(toExtract) == 0 {
if printFile == "" {
printFile = secretName
}
}
if printFile != "" {
if data, found := secret.Data[printFile]; found {
hc.Write(data)
} else {
err := fmt.Errorf("cannot print: want %s have %v: did you mean --extract-all or --%s=name", printFile, keys, printFlagName)
return wrapper.Wrap(err)
if printFile == "" { // print all data keys
data := make(map[string]string)
for k, v := range secret.Data {
data[k] = string(v)
}
b, err := json.MarshalIndent(data, "", " ")
if err != nil {
return wrapper.Wrap(err)
}
b = util.EnsureNewline(b)
hc.Write(b)
} else { // print named data keys keys
if data, found := secret.Data[printFile]; found {
hc.Write(data)
} else {
err := fmt.Errorf("cannot print: want %s have %v: did you mean --extract-all or --%s=name", printFile, keys, printFlagName)
return wrapper.Wrap(err)
}
}
}

View File

@@ -36,6 +36,30 @@ var secret = v1.Secret{
Type: "Opaque",
}
var loginSecret = v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "zitadel-admin-d7fgbgbfbt",
Namespace: "secrets",
Labels: map[string]string{
"holos.run/owner.name": "jeff",
"holos.run/secret.name": "zitadel-admin",
},
CreationTimestamp: metav1.Time{
Time: time.Date(2021, time.January, 1, 0, 0, 0, 0, time.UTC),
},
},
Data: map[string][]byte{
"url": []byte("https://login.example.com"),
"username": []byte("zitadel-admin@zitadel.login.example.com"),
"password": []byte("Password1!"),
},
Type: "Opaque",
}
// cmdHolos executes the holos root command with a kubernetes.Interface that
// persists for the duration of the testscript. holos is NOT executed in a
// subprocess, the current working directory is not and should not be changed.
@@ -72,7 +96,7 @@ func TestSecrets(t *testing.T) {
testscript.Run(t, testscript.Params{
Dir: "testdata",
Setup: func(env *testscript.Env) error {
env.Values[clientsetKey] = fake.NewSimpleClientset(&secret)
env.Values[clientsetKey] = fake.NewSimpleClientset(&secret, &loginSecret)
return nil
},
Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){

View File

@@ -0,0 +1,7 @@
# Print the data key by default
holos get secret zitadel-admin
stdout '^{$'
stdout '^ "url": "https://login.example.com"'
stdout '^ "username": "zitadel-admin@zitadel.login.example.com"'
stdout '^ "password": "Password1!"'
stdout '^}$'

View File

@@ -164,10 +164,10 @@ func (r *Result) addOverlayObjects(log *slog.Logger) {
for _, name := range names {
yamlString := v[name]
log.Debug(fmt.Sprintf("%s/%s", kind, name), "kind", kind, "name", name)
util.EnsureNewline(b)
b = util.EnsureNewline(b)
header := fmt.Sprintf("---\n# Source: CUE apiObjects.%s.%s\n", kind, name)
b = append(b, []byte(header+yamlString)...)
util.EnsureNewline(b)
b = util.EnsureNewline(b)
}
}
r.accumulatedOutput = string(b)
@@ -193,7 +193,7 @@ func (r *Result) kustomize(ctx context.Context) error {
// Write the main api object resources file for kustomize.
target := filepath.Join(tempDir, r.ResourcesFile)
b := []byte(r.AccumulatedOutput())
util.EnsureNewline(b)
b = util.EnsureNewline(b)
if err := os.WriteFile(target, b, 0644); err != nil {
return wrapper.Wrap(fmt.Errorf("could not write resources: %w", err))
}
@@ -206,7 +206,7 @@ func (r *Result) kustomize(ctx context.Context) error {
return wrapper.Wrap(err)
}
b := []byte(content)
util.EnsureNewline(b)
b = util.EnsureNewline(b)
if err := os.WriteFile(target, b, 0644); err != nil {
return wrapper.Wrap(fmt.Errorf("could not write: %w", err))
}

View File

@@ -1 +1 @@
51
52