Compare commits

..

14 Commits

Author SHA1 Message Date
Jeff McCune
18cbff0c13 (#31) Add tls cert for zitadel to connect to cockroach db
Cockroach DB uses tls certs for client authentication.  Issue one for
Zitadel.

With this patch Zitadel starts up bit is not yet exposted with a
VirtualService.

Refer to https://zitadel.com/docs/self-hosting/manage/configure
2024-03-04 14:46:49 -08:00
Jeff McCune
b4fca0929c (#31) ExternalSecret for zitadel-masterkey 2024-03-04 14:31:27 -08:00
Jeff McCune
911d65bdc6 (#31) Setup login.ois.run with basic istio default Gateway
The istio default Gateway is the basis for what will become a dynamic
set of server entries specified from cue project data integrated with
extauthz.

For now we simply need to get the identity provider up and running as
the first step toward identity and access management.
2024-03-04 13:59:17 -08:00
Jeff McCune
2a5eccf0c1 (#33) Helm stderr logging
Log error messages from helm when building and rendering holos
components.

Closes: #33
2024-03-04 13:16:51 -08:00
Jeff McCune
9db4873205 (#31) Add Cockroach DB for Zitadel
Following https://github.com/zitadel/zitadel-charts/blob/main/examples/4-cockroach-secure/README.md
2024-03-04 10:31:39 -08:00
Jeff McCune
f90e83e142 (#30) Add httpbin Gateway and VirtualService
There isn't a default Gateway yet, so use a specific `httpbin` gateway
to test istio instead.
2024-03-02 21:12:03 -08:00
Jeff McCune
bdd2964edb (#30) Add httpbin Service for ns istio-ingress 2024-03-02 20:39:55 -08:00
Jeff McCune
56375b82d8 (#30) Fix httpbin Deployment selector match labels
Without this patch the deployment fails with:

```
Deployment/istio-ingress/httpbin dry-run failed, reason: Invalid:
Deployment.apps "httpbin" is invalid: spec.template.metadata.labels:
Invalid value:
map[string]string{"app.kubernetes.io/component":"httpbin",
"app.kubernetes.io/instance":"prod-mesh-httpbin",
"app.kubernetes.io/name":"mesh", "app .kubernetes.io/part-of":"prod",
"holos.run/component.name":"httpbin", "holos.run/project.name":"mesh",
"holos.run/stage.name":"prod", "sidecar.istio.io/inject":"true"}:
`selector` does not match template `labels`
```
2024-03-02 20:23:23 -08:00
Jeff McCune
dc27489249 (#30) Add httpbin Deployment in istio-ingress namespace
This patch gets the Deployment running with a restricted seccomp
profile.
2024-03-02 20:17:16 -08:00
Jeff McCune
7d8a618e25 (#30) Add httpbin Certificate to verify the mesh
Also fix certmanager which was not installing role bindings correctly
because the flux kustomization was writing over the metadata namespace
field.
2024-03-02 17:16:42 -08:00
Jeff McCune
646f6fcdb0 (#30) Add https redirect overlay resources
This patch migrates the https redirect and the
istio-ingressgateway-loopback Service from
`holos-infra/components/core/istio/ingress/templates/deployment`
2024-03-02 15:01:58 -08:00
Jeff McCune
4ce39db745 (#30) Enforce restricted pod security profile on istio-ingress namespace
This patch enforces the restricted pod security profile on the istio
ingress namespace. The istio cni to move the traffic redirection from
the init container to a cni daemon set pod.

Refer to:

 - https://istio.io/latest/docs/setup/additional-setup/pod-security-admission/
 - https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
2024-03-02 11:16:55 -08:00
Jeff McCune
eba58d1639 (#30) Add ingress component and istio-ingressgateway Deployment
Migrated from holos-infra/components/core/istio/ingress
2024-03-02 10:22:21 -08:00
Jeff McCune
765832d90d (#30) Trim istiod 2024-03-01 16:27:49 -08:00
33 changed files with 3307 additions and 37 deletions

View File

@@ -0,0 +1,280 @@
# Want helm errors to show up
! exec holos build .
stderr 'Error: execution error at \(zitadel/templates/secret_zitadel-masterkey.yaml:2:4\): Either set .Values.zitadel.masterkey xor .Values.zitadel.masterkeySecretName'
-- cue.mod --
package holos
-- zitadel.cue --
package holos
cluster: string @tag(cluster, string)
apiVersion: "holos.run/v1alpha1"
kind: "HelmChart"
metadata: name: "zitadel"
namespace: "zitadel"
chart: {
name: "zitadel"
version: "7.9.0"
repository: {
name: "zitadel"
url: "https://charts.zitadel.com"
}
}
-- vendor/zitadel/templates/secret_zitadel-masterkey.yaml --
{{- if (or (and .Values.zitadel.masterkey .Values.zitadel.masterkeySecretName) (and (not .Values.zitadel.masterkey) (not .Values.zitadel.masterkeySecretName)) ) }}
{{- fail "Either set .Values.zitadel.masterkey xor .Values.zitadel.masterkeySecretName" }}
{{- end }}
{{- if .Values.zitadel.masterkey -}}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: zitadel-masterkey
{{- with .Values.zitadel.masterkeyAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "zitadel.labels" . | nindent 4 }}
stringData:
masterkey: {{ .Values.zitadel.masterkey }}
{{- end -}}
-- vendor/zitadel/Chart.yaml --
apiVersion: v2
appVersion: v2.46.0
description: A Helm chart for ZITADEL
icon: https://zitadel.com/zitadel-logo-dark.svg
kubeVersion: '>= 1.21.0-0'
maintainers:
- email: support@zitadel.com
name: zitadel
url: https://zitadel.com
name: zitadel
type: application
version: 7.9.0
-- vendor/zitadel/values.yaml --
# Default values for zitadel.
zitadel:
# The ZITADEL config under configmapConfig is written to a Kubernetes ConfigMap
# See all defaults here:
# https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
configmapConfig:
ExternalSecure: true
Machine:
Identification:
Hostname:
Enabled: true
Webhook:
Enabled: false
# The ZITADEL config under secretConfig is written to a Kubernetes Secret
# See all defaults here:
# https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
secretConfig:
# Annotations set on secretConfig secret
secretConfigAnnotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
# Reference the name of a secret that contains ZITADEL configuration.
configSecretName:
# The key under which the ZITADEL configuration is located in the secret.
configSecretKey: config-yaml
# ZITADEL uses the masterkey for symmetric encryption.
# You can generate it for example with tr -dc A-Za-z0-9 </dev/urandom | head -c 32
masterkey: ""
# Reference the name of the secret that contains the masterkey. The key should be named "masterkey".
# Note: Either zitadel.masterkey or zitadel.masterkeySecretName must be set
masterkeySecretName: ""
# Annotations set on masterkey secret
masterkeyAnnotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
# The CA Certificate needed for establishing secure database connections
dbSslCaCrt: ""
# The Secret containing the CA certificate at key ca.crt needed for establishing secure database connections
dbSslCaCrtSecret: ""
# The db admins secret containing the client certificate and key at tls.crt and tls.key needed for establishing secure database connections
dbSslAdminCrtSecret: ""
# The db users secret containing the client certificate and key at tls.crt and tls.key needed for establishing secure database connections
dbSslUserCrtSecret: ""
# Generate a self-signed certificate using an init container
# This will also mount the generated files to /etc/tls/ so that you can reference them in the pod.
# E.G. KeyPath: /etc/tls/tls.key CertPath: /etc/tls/tls.crt
# By default, the SAN DNS names include, localhost, the POD IP address and the POD name. You may include one more by using additionalDnsName like "my.zitadel.fqdn".
selfSignedCert:
enabled: false
additionalDnsName:
replicaCount: 3
image:
repository: ghcr.io/zitadel/zitadel
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
chownImage:
repository: alpine
pullPolicy: IfNotPresent
tag: "3.19"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# Annotations to add to the deployment
annotations: {}
# Annotations to add to the configMap
configMap:
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podAdditionalLabels: {}
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
securityContext: {}
# Additional environment variables
env:
[]
# - name: ZITADEL_DATABASE_POSTGRES_HOST
# valueFrom:
# secretKeyRef:
# name: postgres-pguser-postgres
# key: host
service:
type: ClusterIP
# If service type is "ClusterIP", this can optionally be set to a fixed IP address.
clusterIP: ""
port: 8080
protocol: http2
annotations: {}
scheme: HTTP
ingress:
enabled: false
className: ""
annotations: {}
hosts:
- host: localhost
paths:
- path: /
pathType: Prefix
tls: []
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
topologySpreadConstraints: []
initJob:
# Once ZITADEL is installed, the initJob can be disabled.
enabled: true
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "1"
resources: {}
backoffLimit: 5
activeDeadlineSeconds: 300
extraContainers: []
podAnnotations: {}
# Available init commands :
# "": initialize ZITADEL instance (without skip anything)
# database: initialize only the database
# grant: set ALL grant to user
# user: initialize only the database user
# zitadel: initialize ZITADEL internals (skip "create user" and "create database")
command: ""
setupJob:
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "2"
resources: {}
activeDeadlineSeconds: 300
extraContainers: []
podAnnotations: {}
additionalArgs:
- "--init-projections=true"
machinekeyWriter:
image:
repository: bitnami/kubectl
tag: ""
resources: {}
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 3
livenessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 3
startupProbe:
enabled: true
periodSeconds: 1
failureThreshold: 30
metrics:
enabled: false
serviceMonitor:
# If true, the chart creates a ServiceMonitor that is compatible with Prometheus Operator
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.ServiceMonitor.
# The Prometheus community Helm chart installs this operator
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#kube-prometheus-stack
enabled: false
honorLabels: false
honorTimestamps: true
pdb:
enabled: false
# these values are used for the PDB and are mutally exclusive
minAvailable: 1
# maxUnavailable: 1
annotations: {}

View File

@@ -3066,7 +3066,7 @@ import (
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
securityContext?: null | #SecurityContext @go(SecurityContext,*SecurityContext) @protobuf(15,bytes,opt)
securityContext?: #SecurityContext @go(SecurityContext,*SecurityContext) @protobuf(15,bytes,opt)
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
@@ -3982,7 +3982,7 @@ import (
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
securityContext?: null | #PodSecurityContext @go(SecurityContext,*PodSecurityContext) @protobuf(14,bytes,opt)
securityContext?: #PodSecurityContext @go(SecurityContext,*PodSecurityContext) @protobuf(14,bytes,opt)
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use.

View File

@@ -324,13 +324,7 @@ import "strings"
// withoutHeader has the same syntax with the header, but has
// opposite meaning.
withoutHeaders?: {
[string]: ({} | {
exact: _
} | {
prefix: _
} | {
regex: _
}) & {
[string]: {
exact?: string
prefix?: string
@@ -383,11 +377,7 @@ import "strings"
// A HTTP rule can either return a direct_response, redirect or
// forward (default) traffic.
redirect?: ({} | {
port: _
} | {
derivePort: _
}) & {
redirect?: {
// On a redirect, overwrite the Authority/Host portion of the URL
// with this value.
authority?: string

View File

@@ -0,0 +1,6 @@
package v1
#Deployment: {
apiVersion: "apps/v1"
kind: "Deployment"
}

View File

@@ -19,3 +19,8 @@ package v1
apiVersion: "v1"
kind: "Pod"
}
#Service: {
apiVersion: "v1"
kind: "Service"
}

View File

@@ -4,6 +4,7 @@ package holos
#PlatformNamespace: {
name: string
labels?: {[string]: string}
annotations?: {[string]: string}
}
// #PlatformNamespaces is a list of namespaces to manage across the platform.

View File

@@ -0,0 +1,10 @@
package holos
// Components under this directory are part of this collection
#InputKeys: project: "iam"
// Shared dependencies for all components in this collection.
#DependsOn: _Namespaces
// Common Dependencies
_Namespaces: Namespaces: name: "\(#StageName)-secrets-namespaces"

View File

@@ -0,0 +1,17 @@
# IAM
The IAM service provides identity and access management for a holos managed platform. Zitadel is the identity provider which integrates tightly with:
1. AuthorizationPolicy at the level of the service mesh.
2. Application level oidc login (ArgoCD, Grafana, etc...)
3. Cloud provider IAM via oidc.
## Preflight
The zitadel master key needs to have a data key named `masterkey` with a Secret name of `zitadel-masterkey`.
```bash
holos create secret zitadel-masterkey --namespace prod-iam-zitadel --append-hash=false --data-stdin <<EOF
{"masterkey":"$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32)"}
EOF
```

View File

@@ -0,0 +1,26 @@
package holos
#InputKeys: component: "crdb"
#HelmChart & {
namespace: #TargetNamespace
chart: {
name: "cockroachdb"
version: "11.2.3"
repository: {
name: "cockroachdb"
url: "https://charts.cockroachdb.com/"
}
}
values: #Values
apiObjects: {
Issuer: {
// https://github.com/cockroachdb/helm-charts/blob/3dcf96726ebcfe3784afb526ddcf4095a1684aea/README.md?plain=1#L196-L201
cockroachdb: #Issuer & {
metadata: name: #ComponentName
metadata: namespace: #TargetNamespace
spec: selfSigned: {}
}
}
}
}

View File

@@ -0,0 +1,606 @@
package holos
#Values: {
// Generated file, DO NOT EDIT. Source: build/templates/values.yaml
// Overrides the chart name against the label "app.kubernetes.io/name: " placed on every resource this chart creates.
nameOverride: ""
// Override the resource names created by this chart which originally is generated using release and chart name.
fullnameOverride: string | *""
image: {
repository: string | *"cockroachdb/cockroach"
tag: "v23.1.13"
pullPolicy: "IfNotPresent"
credentials: {}
}
// registry: docker.io
// username: john_doe
// password: changeme
// Additional labels to apply to all Kubernetes resources created by this chart.
labels: {}
// app.kubernetes.io/part-of: my-app
// Cluster's default DNS domain.
// You should overwrite it if you're using a different one,
// otherwise CockroachDB nodes discovery won't work.
clusterDomain: "cluster.local"
conf: {
// An ordered list of CockroachDB node attributes.
// Attributes are arbitrary strings specifying machine capabilities.
// Machine capabilities might include specialized hardware or number of cores
// (e.g. "gpu", "x16c").
attrs: []
// - x16c
// - gpu
// Total size in bytes for caches, shared evenly if there are multiple
// storage devices. Size suffixes are supported (e.g. `1GB` and `1GiB`).
// A percentage of physical memory can also be specified (e.g. `.25`).
cache: "25%"
// Sets a name to verify the identity of a cluster.
// The value must match between all nodes specified via `conf.join`.
// This can be used as an additional verification when either the node or
// cluster, or both, have not yet been initialized and do not yet know their
// cluster ID.
// To introduce a cluster name into an already-initialized cluster, pair this
// option with `conf.disable-cluster-name-verification: yes`.
"cluster-name": ""
// Tell the server to ignore `conf.cluster-name` mismatches.
// This is meant for use when opting an existing cluster into starting to use
// cluster name verification, or when changing the cluster name.
// The cluster should be restarted once with `conf.cluster-name` and
// `conf.disable-cluster-name-verification: yes` combined, and once all nodes
// have been updated to know the new cluster name, the cluster can be restarted
// again with `conf.disable-cluster-name-verification: no`.
// This option has no effect if `conf.cluster-name` is not specified.
"disable-cluster-name-verification": false
// The addresses for connecting a CockroachDB nodes to an existing cluster.
// If you are deploying a second CockroachDB instance that should join a first
// one, use the below list to join to the existing instance.
// Each item in the array should be a FQDN (and port if needed) resolvable by
// new Pods.
join: []
// New logging configuration.
log: {
enabled: false
// https://www.cockroachlabs.com/docs/v21.1/configure-logs
config: {}
}
// file-defaults:
// dir: /custom/dir/path/
// fluent-defaults:
// format: json-fluent
// sinks:
// stderr:
// channels: [DEV]
// Logs at or above this threshold to STDERR. Ignored when "log" is enabled
logtostderr: "INFO"
// Maximum storage capacity available to store temporary disk-based data for
// SQL queries that exceed the memory budget (e.g. join, sorts, etc are
// sometimes able to spill intermediate results to disk).
// Accepts numbers interpreted as bytes, size suffixes (e.g. `32GB` and
// `32GiB`) or a percentage of disk size (e.g. `10%`).
// The location of the temporary files is within the first store dir.
// If expressed as a percentage, `max-disk-temp-storage` is interpreted
// relative to the size of the storage device on which the first store is
// placed. The temp space usage is never counted towards any store usage
// (although it does share the device with the first store) so, when
// configuring this, make sure that the size of this temp storage plus the size
// of the first store don't exceed the capacity of the storage device.
// If the first store is an in-memory one (i.e. `type=mem`), then this
// temporary "disk" data is also kept in-memory.
// A percentage value is interpreted as a percentage of the available internal
// memory.
// max-disk-temp-storage: 0GB
// Maximum allowed clock offset for the cluster. If observed clock offsets
// exceed this limit, servers will crash to minimize the likelihood of
// reading inconsistent data. Increasing this value will increase the time
// to recovery of failures as well as the frequency of uncertainty-based
// read restarts.
// Note, that this value must be the same on all nodes in the cluster.
// In order to change it, all nodes in the cluster must be stopped
// simultaneously and restarted with the new value.
// max-offset: 500ms
// Maximum memory capacity available to store temporary data for SQL clients,
// including prepared queries and intermediate data rows during query
// execution. Accepts numbers interpreted as bytes, size suffixes
// (e.g. `1GB` and `1GiB`) or a percentage of physical memory (e.g. `.25`).
"max-sql-memory": "25%"
// An ordered, comma-separated list of key-value pairs that describe the
// topography of the machine. Topography might include country, datacenter
// or rack designations. Data is automatically replicated to maximize
// diversities of each tier. The order of tiers is used to determine
// the priority of the diversity, so the more inclusive localities like
// country should come before less inclusive localities like datacenter.
// The tiers and order must be the same on all nodes. Including more tiers
// is better than including fewer. For example:
// locality: country=us,region=us-west,datacenter=us-west-1b,rack=12
// locality: country=ca,region=ca-east,datacenter=ca-east-2,rack=4
// locality: planet=earth,province=manitoba,colo=secondary,power=3
locality: ""
// Run CockroachDB instances in standalone mode with replication disabled
// (replication factor = 1).
// Enabling this option makes the following values to be ignored:
// - `conf.cluster-name`
// - `conf.disable-cluster-name-verification`
// - `conf.join`
//
// WARNING: Enabling this option makes each deployed Pod as a STANDALONE
// CockroachDB instance, so the StatefulSet does NOT FORM A CLUSTER.
// Don't use this option for production deployments unless you clearly
// understand what you're doing.
// Usually, this option is intended to be used in conjunction with
// `statefulset.replicas: 1` for temporary one-time deployments (like
// running E2E tests, for example).
"single-node": false
// If non-empty, create a SQL audit log in the specified directory.
"sql-audit-dir": ""
// CockroachDB's port to listen to inter-communications and client connections.
port: 26257
// CockroachDB's port to listen to HTTP requests.
"http-port": 8080
// CockroachDB's data mount path.
path: "cockroach-data"
// CockroachDB's storage configuration https://www.cockroachlabs.com/docs/v21.1/cockroach-start.html#storage
// Uses --store flag
store: {
enabled: false
// Should be empty or 'mem'
type: null
// Required for type=mem. If type and size is empty - storage.persistentVolume.size is used
size: null
// Arbitrary strings, separated by colons, specifying disk type or capability
attrs: null
}
}
statefulset: {
replicas: 3
updateStrategy: type: "RollingUpdate"
podManagementPolicy: "Parallel"
budget: maxUnavailable: 1
// List of additional command-line arguments you want to pass to the
// `cockroach start` command.
args: []
// - --disable-cluster-name-verification
// List of extra environment variables to pass into container
env: []
// - name: COCKROACH_ENGINE_MAX_SYNC_DURATION
// value: "24h"
// List of Secrets names in the same Namespace as the CockroachDB cluster,
// which shall be mounted into `/etc/cockroach/secrets/` for every cluster
// member.
secretMounts: []
// Additional labels to apply to this StatefulSet and all its Pods.
labels: {
"app.kubernetes.io/component": "cockroachdb"
}
// Additional annotations to apply to the Pods of this StatefulSet.
annotations: {}
// Affinity rules for scheduling Pods of this StatefulSet on Nodes.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity
nodeAffinity: {}
// Inter-Pod Affinity rules for scheduling Pods of this StatefulSet.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity
podAffinity: {}
// Anti-affinity rules for scheduling Pods of this StatefulSet.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity
// You may either toggle options below for default anti-affinity rules,
// or specify the whole set of anti-affinity rules instead of them.
podAntiAffinity: {
// The topologyKey to be used.
// Can be used to spread across different nodes, AZs, regions etc.
topologyKey: "kubernetes.io/hostname"
// Type of anti-affinity rules: either `soft`, `hard` or empty value (which
// disables anti-affinity rules).
type: "soft"
// Weight for `soft` anti-affinity rules.
// Does not apply for other anti-affinity types.
weight: 100
}
// Node selection constraints for scheduling Pods of this StatefulSet.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
// PriorityClassName given to Pods of this StatefulSet
// https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
// Taints to be tolerated by Pods of this StatefulSet.
// https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
// https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: {
maxSkew: 1
topologyKey: "topology.kubernetes.io/zone"
whenUnsatisfiable: "ScheduleAnyway"
}
// Uncomment the following resources definitions or pass them from
// command line to control the CPU and memory resources allocated
// by Pods of this StatefulSet.
resources: {}
// limits:
// cpu: 100m
// memory: 512Mi
// requests:
// cpu: 100m
// memory: 512Mi
// Custom Liveness probe
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-http-request
customLivenessProbe: {}
// httpGet:
// path: /health
// port: http
// scheme: HTTPS
// initialDelaySeconds: 30
// periodSeconds: 5
// Custom Rediness probe
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes
customReadinessProbe: {}
// httpGet:
// path: /health
// port: http
// scheme: HTTPS
// initialDelaySeconds: 30
// periodSeconds: 5
securityContext: {
enabled: true
}
serviceAccount: {
// Specifies whether this ServiceAccount should be created.
create: true
// The name of this ServiceAccount to use.
// If not set and `create` is `true`, then service account is auto-generated.
// If not set and `create` is `false`, then it uses default service account.
name: ""
// Additional serviceAccount annotations (e.g. for attaching AWS IAM roles to pods)
annotations: {}
}
}
service: {
ports: {
// You can set a different external and internal gRPC ports and their name.
grpc: {
external: {
port: 26257
name: "grpc"
}
// If the port number is different than `external.port`, then it will be
// named as `internal.name` in Service.
internal: {
port: 26257
// If using Istio set it to `cockroach`.
name: "grpc-internal"
}
}
http: {
port: 8080
name: "http"
}
}
// This Service is meant to be used by clients of the database.
// It exposes a ClusterIP that will automatically load balance connections
// to the different database Pods.
public: {
type: "ClusterIP"
// Additional labels to apply to this Service.
labels: {
"app.kubernetes.io/component": "cockroachdb"
}
// Additional annotations to apply to this Service.
annotations: {}
}
// This service only exists to create DNS entries for each pod in
// the StatefulSet such that they can resolve each other's IP addresses.
// It does not create a load-balanced ClusterIP and should not be used directly
// by clients in most circumstances.
discovery: {
// Additional labels to apply to this Service.
labels: {
"app.kubernetes.io/component": "cockroachdb"
}
// Additional annotations to apply to this Service.
annotations: {}
}
}
// CockroachDB's ingress for web ui.
ingress: {
enabled: false
labels: {}
annotations: {}
// kubernetes.io/ingress.class: nginx
// cert-manager.io/cluster-issuer: letsencrypt
paths: ["/"]
hosts: []
// - cockroachlabs.com
tls: []
}
// - hosts: [cockroachlabs.com]
// secretName: cockroachlabs-tls
prometheus: {
enabled: true
}
securityContext: enabled: true
// CockroachDB's Prometheus operator ServiceMonitor support
serviceMonitor: {
enabled: false
labels: {}
annotations: {}
interval: "10s"
// scrapeTimeout: 10s
// Limits the ServiceMonitor to the current namespace if set to `true`.
namespaced: false
// tlsConfig: TLS configuration to use when scraping the endpoint.
// Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
tlsConfig: {}
}
// CockroachDB's data persistence.
// If neither `persistentVolume` nor `hostPath` is used, then data will be
// persisted in ad-hoc `emptyDir`.
storage: {
// Absolute path on host to store CockroachDB's data.
// If not specified, then `emptyDir` will be used instead.
// If specified, but `persistentVolume.enabled` is `true`, then has no effect.
hostPath: ""
// If `enabled` is `true` then a PersistentVolumeClaim will be created and
// used to store CockroachDB's data, otherwise `hostPath` is used.
persistentVolume: {
enabled: true
size: string | *"100Gi"
// If defined, then `storageClassName: <storageClass>`.
// If set to "-", then `storageClassName: ""`, which disables dynamic
// provisioning.
// If undefined or empty (default), then no `storageClassName` spec is set,
// so the default provisioner will be chosen (gp2 on AWS, standard on
// GKE, AWS & OpenStack).
storageClass: ""
// Additional labels to apply to the created PersistentVolumeClaims.
labels: {}
// Additional annotations to apply to the created PersistentVolumeClaims.
annotations: {}
}
}
// Kubernetes Job which initializes multi-node CockroachDB cluster.
// It's not created if `statefulset.replicas` is `1`.
init: {
// Additional labels to apply to this Job and its Pod.
labels: {
"app.kubernetes.io/component": "init"
}
// Additional annotations to apply to this Job.
jobAnnotations: {}
// Additional annotations to apply to the Pod of this Job.
annotations: {}
// Affinity rules for scheduling the Pod of this Job.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity
affinity: {}
// Node selection constraints for scheduling the Pod of this Job.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
// Taints to be tolerated by the Pod of this Job.
// https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
// The init Pod runs at cluster creation to initialize CockroachDB. It finishes
// quickly and doesn't continue to consume resources in the Kubernetes
// cluster. Normally, you should leave this section commented out, but if your
// Kubernetes cluster uses Resource Quotas and requires all pods to specify
// resource requests or limits, you can set those here.
resources: {}
// requests:
// cpu: "10m"
// memory: "128Mi"
// limits:
// cpu: "10m"
// memory: "128Mi"
securityContext: {
enabled: true
}
provisioning: {
enabled: false
// https://www.cockroachlabs.com/docs/stable/cluster-settings.html
clusterSettings: null
// cluster.organization: "'FooCorp - Local Testing'"
// enterprise.license: "'xxxxx'"
users: []
// - name:
// password:
// # https://www.cockroachlabs.com/docs/stable/create-user.html#parameters
// options: [LOGIN]
databases: []
}
}
// - name:
// # https://www.cockroachlabs.com/docs/stable/create-database.html#parameters
// options: [encoding='utf-8']
// owners: []
// # https://www.cockroachlabs.com/docs/stable/grant.html#parameters
// owners_with_grant_option: []
// # Backup schedules are not idemponent for now and will fail on next run
// # https://github.com/cockroachdb/cockroach/issues/57892
// backup:
// into: s3://
// # Enterprise-only option (revision_history)
// # https://www.cockroachlabs.com/docs/stable/create-schedule-for-backup.html#backup-options
// options: [revision_history]
// recurring: '@always'
// # Enterprise-only feature. Remove this value to use `FULL BACKUP ALWAYS`
// fullBackup: '@daily'
// schedule:
// # https://www.cockroachlabs.com/docs/stable/create-schedule-for-backup.html#schedule-options
// options: [first_run = 'now']
// Whether to run securely using TLS certificates.
tls: {
enabled: true
copyCerts: image: "busybox"
certs: {
// Bring your own certs scenario. If provided, tls.init section will be ignored.
provided: false
// Secret name for the client root cert.
clientRootSecret: "cockroachdb-root"
// Secret name for node cert.
nodeSecret: "cockroachdb-node"
// Secret name for CA cert
caSecret: "cockroach-ca"
// Enable if the secret is a dedicated TLS.
// TLS secrets are created by cert-mananger, for example.
tlsSecret: false
// Enable if the you want cockroach db to create its own certificates
selfSigner: {
// If set, the cockroach db will generate its own certificates
enabled: false | *true
// Run selfSigner as non-root
securityContext: {
enabled: true
}
// If set, the user should provide the CA certificate to sign other certificates.
caProvided: false
// It holds the name of the secret with caCerts. If caProvided is set, this can not be empty.
caSecret: ""
// Minimum Certificate duration for all the certificates, all certs duration will be validated against this.
minimumCertDuration: "624h"
// Duration of CA certificates in hour
caCertDuration: "43800h"
// Expiry window of CA certificates means a window before actual expiry in which CA certs should be rotated.
caCertExpiryWindow: "648h"
// Duration of Client certificates in hour
clientCertDuration: "672h"
// Expiry window of client certificates means a window before actual expiry in which client certs should be rotated.
clientCertExpiryWindow: "48h"
// Duration of node certificates in hour
nodeCertDuration: "8760h"
// Expiry window of node certificates means a window before actual expiry in which node certs should be rotated.
nodeCertExpiryWindow: "168h"
// If set, the cockroachdb cert selfSigner will rotate the certificates before expiry.
rotateCerts: true
// Wait time for each cockroachdb replica to become ready once it comes in running state. Only considered when rotateCerts is set to true
readinessWait: "30s"
// Wait time for each cockroachdb replica to get to running state. Only considered when rotateCerts is set to true
podUpdateTimeout: "2m"
// ServiceAccount annotations for selfSigner jobs (e.g. for attaching AWS IAM roles to pods)
svcAccountAnnotations: {}
}
// Use cert-manager to issue certificates for mTLS.
certManager: true | *false
// Specify an Issuer or a ClusterIssuer to use, when issuing
// node and client certificates. The values correspond to the
// issuerRef specified in the certificate.
certManagerIssuer: {
group: "cert-manager.io"
kind: "Issuer"
name: string | *"cockroachdb"
// Make it false when you are providing your own CA issuer
isSelfSignedIssuer: true
// Duration of Client certificates in hours
clientCertDuration: "672h"
// Expiry window of client certificates means a window before actual expiry in which client certs should be rotated.
clientCertExpiryWindow: "48h"
// Duration of node certificates in hours
nodeCertDuration: "8760h"
// Expiry window of node certificates means a window before actual expiry in which node certs should be rotated.
nodeCertExpiryWindow: "168h"
}
}
selfSigner: {
// Additional annotations to apply to the Pod of this Job.
annotations: {}
// Affinity rules for scheduling the Pod of this Job.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity
affinity: {}
// Node selection constraints for scheduling the Pod of this Job.
// https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
// Taints to be tolerated by the Pod of this Job.
// https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
// Image Placeholder for the selfSigner utility. This will be changed once the CI workflows for the image is in place.
image: {
repository: "cockroachlabs-helm-charts/cockroach-self-signer-cert"
tag: "1.5"
pullPolicy: "IfNotPresent"
credentials: {}
registry: "gcr.io"
}
}
}
// username: john_doe
// password: changeme
networkPolicy: {
enabled: false
ingress: {
// List of sources which should be able to access the CockroachDB Pods via
// gRPC port. Items in this list are combined using a logical OR operation.
// Rules for allowing inter-communication are applied automatically.
// If empty, then connections from any Pod is allowed.
grpc: []
// - podSelector:
// matchLabels:
// app.kubernetes.io/name: my-app-django
// app.kubernetes.io/instance: my-app
// List of sources which should be able to access the CockroachDB Pods via
// HTTP port. Items in this list are combined using a logical OR operation.
// If empty, then connections from any Pod is allowed.
http: []
}
}
// - namespaceSelector:
// matchLabels:
// project: my-project
// To put the admin interface behind Identity Aware Proxy (IAP) on Google Cloud Platform
// make sure to set ingress.paths: ['/*']
iap: {
enabled: false
}
}

View File

@@ -0,0 +1,25 @@
package holos
#Values: {
image: repository: "quay.io/holos/cockroachdb/cockroach"
fullnameOverride: #ComponentName
tls: {
enabled: true
certs: {
// https://github.com/cockroachdb/helm-charts/blob/3dcf96726ebcfe3784afb526ddcf4095a1684aea/README.md?plain=1#L204-L215
selfSigner: enabled: false
certManager: true
certManagerIssuer: {
kind: "Issuer"
name: #ComponentName
}
}
}
storage: persistentVolume: {
enabled: true
size: "1Gi"
}
}

View File

@@ -0,0 +1,10 @@
package holos
#TargetNamespace: #InstancePrefix + "-zitadel"
#DB: {
Host: "crdb-public"
}
// The canonical login domain for the entire platform. Zitadel will be active on a singlec cluster at a time, but always accessible from this hostname.
#ExternalDomain: "login.\(#Platform.org.domain)"

View File

@@ -0,0 +1,251 @@
package holos
#Values: {
// Default values for zitadel.
zitadel: {
// The ZITADEL config under configmapConfig is written to a Kubernetes ConfigMap
// See all defaults here:
// https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
configmapConfig: {
ExternalSecure: true
Machine: Identification: {
Hostname: Enabled: true
Webhook: Enabled: false
}
}
// The ZITADEL config under secretConfig is written to a Kubernetes Secret
// See all defaults here:
// https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
secretConfig: null
// Annotations set on secretConfig secret
secretConfigAnnotations: {
"helm.sh/hook": "pre-install,pre-upgrade"
"helm.sh/hook-delete-policy": "before-hook-creation"
"helm.sh/hook-weight": "0"
}
// Reference the name of a secret that contains ZITADEL configuration.
configSecretName: null
// The key under which the ZITADEL configuration is located in the secret.
configSecretKey: "config-yaml"
// ZITADEL uses the masterkey for symmetric encryption.
// You can generate it for example with tr -dc A-Za-z0-9 </dev/urandom | head -c 32
masterkey: ""
// Reference the name of the secret that contains the masterkey. The key should be named "masterkey".
// Note: Either zitadel.masterkey or zitadel.masterkeySecretName must be set
masterkeySecretName: string | *""
// Annotations set on masterkey secret
masterkeyAnnotations: {
"helm.sh/hook": "pre-install,pre-upgrade"
"helm.sh/hook-delete-policy": "before-hook-creation"
"helm.sh/hook-weight": "0"
}
// The CA Certificate needed for establishing secure database connections
dbSslCaCrt: ""
// The Secret containing the CA certificate at key ca.crt needed for establishing secure database connections
dbSslCaCrtSecret: string | *""
// The db admins secret containing the client certificate and key at tls.crt and tls.key needed for establishing secure database connections
dbSslAdminCrtSecret: string | *""
// The db users secret containing the client certificate and key at tls.crt and tls.key needed for establishing secure database connections
dbSslUserCrtSecret: string | *""
// Generate a self-signed certificate using an init container
// This will also mount the generated files to /etc/tls/ so that you can reference them in the pod.
// E.G. KeyPath: /etc/tls/tls.key CertPath: /etc/tls/tls.crt
// By default, the SAN DNS names include, localhost, the POD IP address and the POD name. You may include one more by using additionalDnsName like "my.zitadel.fqdn".
selfSignedCert: {
enabled: false
additionalDnsName: null
}
}
replicaCount: 3
image: {
repository: "ghcr.io/zitadel/zitadel"
pullPolicy: "IfNotPresent"
// Overrides the image tag whose default is the chart appVersion.
tag: ""
}
chownImage: {
repository: "alpine"
pullPolicy: "IfNotPresent"
tag: "3.19"
}
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
// Annotations to add to the deployment
annotations: {}
// Annotations to add to the configMap
configMap: {
annotations: {
"helm.sh/hook": "pre-install,pre-upgrade"
"helm.sh/hook-delete-policy": "before-hook-creation"
"helm.sh/hook-weight": "0"
}
}
serviceAccount: {
// Specifies whether a service account should be created
create: true
// Annotations to add to the service account
annotations: {
"helm.sh/hook": "pre-install,pre-upgrade"
"helm.sh/hook-delete-policy": "before-hook-creation"
"helm.sh/hook-weight": "0"
}
// The name of the service account to use.
// If not set and create is true, a name is generated using the fullname template
name: ""
}
podAnnotations: {}
podAdditionalLabels: {}
podSecurityContext: {
runAsNonRoot: true
runAsUser: 1000
}
securityContext: {}
// Additional environment variables
env: []
// - name: ZITADEL_DATABASE_POSTGRES_HOST
// valueFrom:
// secretKeyRef:
// name: postgres-pguser-postgres
// key: host
service: {
type: "ClusterIP"
// If service type is "ClusterIP", this can optionally be set to a fixed IP address.
clusterIP: ""
port: 8080
protocol: "http2"
annotations: {}
scheme: "HTTP"
}
ingress: {
enabled: false
className: ""
annotations: {}
hosts: [{
host: "localhost"
paths: [{
path: "/"
pathType: "Prefix"
}]
}]
tls: []
}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
topologySpreadConstraints: []
initJob: {
// Once ZITADEL is installed, the initJob can be disabled.
enabled: true
annotations: {
"helm.sh/hook": "pre-install,pre-upgrade"
"helm.sh/hook-delete-policy": "before-hook-creation"
"helm.sh/hook-weight": "1"
}
resources: {}
backoffLimit: 5
activeDeadlineSeconds: 300
extraContainers: []
podAnnotations: {}
// Available init commands :
// "": initialize ZITADEL instance (without skip anything)
// database: initialize only the database
// grant: set ALL grant to user
// user: initialize only the database user
// zitadel: initialize ZITADEL internals (skip "create user" and "create database")
command: ""
}
setupJob: {
annotations: {
"helm.sh/hook": "pre-install,pre-upgrade"
"helm.sh/hook-delete-policy": "before-hook-creation"
"helm.sh/hook-weight": "2"
}
resources: {}
activeDeadlineSeconds: 300
extraContainers: []
podAnnotations: {}
additionalArgs: ["--init-projections=true"]
machinekeyWriter: {
image: {
repository: "bitnami/kubectl"
tag: ""
}
resources: {}
}
}
readinessProbe: {
enabled: true
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 3
}
livenessProbe: {
enabled: true
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 3
}
startupProbe: {
enabled: true
periodSeconds: 1
failureThreshold: 30
}
metrics: {
enabled: false
serviceMonitor: {
// If true, the chart creates a ServiceMonitor that is compatible with Prometheus Operator
// https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.ServiceMonitor.
// The Prometheus community Helm chart installs this operator
// https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#kube-prometheus-stack
enabled: false
honorLabels: false
honorTimestamps: true
}
}
pdb: {
enabled: false
// these values are used for the PDB and are mutally exclusive
minAvailable: 1
// maxUnavailable: 1
annotations: {}
}
}

View File

@@ -0,0 +1,34 @@
package holos
#Values: {
// https://raw.githubusercontent.com/zitadel/zitadel-charts/main/examples/4-cockroach-secure/zitadel-values.yaml
zitadel: {
masterkeySecretName: "zitadel-masterkey"
// https://github.com/zitadel/zitadel-charts/blob/zitadel-7.4.0/charts/zitadel/templates/configmap.yaml#L13
configmapConfig: {
// NOTE: You can change the ExternalDomain, ExternalPort and ExternalSecure
// configuration options at any time. However, for ZITADEL to be able to
// pick up the changes, you need to rerun ZITADELs setup phase. Do so with
// kubectl delete job zitadel-setup, then re-apply the new config.
//
// https://zitadel.com/docs/self-hosting/manage/custom-domain
ExternalDomain: #ExternalDomain
ExternalPort: 443
ExternalSecure: true
TLS: Enabled: false
Database: Cockroach: {
Host: #DB.Host
User: SSL: Mode: "verify-full"
Admin: SSL: Mode: "verify-full"
}
}
// Managed by crdb component
dbSslCaCrtSecret: "cockroach-ca"
dbSslAdminCrtSecret: "cockroachdb-root"
// Managed by this component
dbSslUserCrtSecret: "cockroachdb-zitadel"
}
}

View File

@@ -0,0 +1,43 @@
package holos
#InputKeys: component: "zitadel"
// Upstream helm chart doesn't specify the namespace field for all resources.
#Kustomization: spec: targetNamespace: #TargetNamespace
#HelmChart & {
namespace: #TargetNamespace
chart: {
name: "zitadel"
version: "7.9.0"
repository: {
name: "zitadel"
url: "https://charts.zitadel.com"
}
}
values: #Values
apiObjects: {
ExternalSecret: masterkey: #ExternalSecret & {
_name: "zitadel-masterkey"
}
Certificate: zitadel: #Certificate & {
metadata: name: "crdb-zitadel-client"
metadata: namespace: #TargetNamespace
spec: {
commonName: "zitadel"
issuerRef: {
group: "cert-manager.io"
kind: "Issuer"
name: "crdb-ca-issuer"
}
privateKey: algorithm: "RSA"
privateKey: size: 2048
renewBefore: "48h0m0s"
secretName: "cockroachdb-zitadel"
subject: organizations: ["Cockroach"]
usages: ["digital signature", "key encipherment", "client auth"]
}
}
}
}

View File

@@ -10,7 +10,9 @@ package holos
}
#HelmChart & {
values: installCRDs: true
values: #UpstreamValues & {
installCRDs: true
}
namespace: #TargetNamespace
chart: {
name: "cert-manager"

View File

@@ -0,0 +1,10 @@
package holos
#InputKeys: component: "cni"
#TargetNamespace: "kube-system"
#HelmChart & {
namespace: #TargetNamespace
chart: name: "cni"
values: #IstioValues
}

View File

@@ -0,0 +1,46 @@
package holos
// The primary istio Gateway, named default
let Name = "gateway"
#InputKeys: component: Name
#TargetNamespace: "istio-ingress"
#DependsOn: _IngressGateway
// TODO: We need to generalize this for multiple services hanging off the default gateway.
let LoginCert = #Certificate & {
metadata: {
name: "login"
namespace: #TargetNamespace
}
spec: {
commonName: "login.\(#Platform.org.domain)"
dnsNames: [commonName]
secretName: metadata.name
issuerRef: kind: "ClusterIssuer"
issuerRef: name: "letsencrypt"
}
}
#KubernetesObjects & {
apiObjects: {
Certificate: login: LoginCert
Gateway: default: #Gateway & {
metadata: name: "default"
metadata: namespace: #TargetNamespace
spec: selector: istio: "ingressgateway"
spec: servers: [
{
hosts: ["prod-iam-zitadel/\(LoginCert.spec.commonName)"]
port: name: "https-prod-iam-zitadel"
port: number: 443
port: protocol: "HTTPS"
tls: credentialName: LoginCert.spec.secretName
tls: mode: "SIMPLE"
},
]
}
}
}

View File

@@ -0,0 +1,75 @@
package holos
let Name = "httpbin"
let SecretName = #InputKeys.cluster + "-" + Name
let MatchLabels = {app: Name} & #SelectorLabels
let Metadata = {
name: Name
namespace: #TargetNamespace
labels: app: Name
}
#InputKeys: component: Name
#TargetNamespace: "istio-ingress"
#DependsOn: _IngressGateway
let Cert = #HTTP01Cert & {
_name: Name
_secret: SecretName
}
#KubernetesObjects & {
apiObjects: {
Certificate: httpbin: Cert.object
Deployment: httpbin: #Deployment & {
metadata: Metadata
spec: selector: matchLabels: MatchLabels
spec: template: {
metadata: labels: MatchLabels
metadata: labels: #CommonLabels
metadata: labels: #IstioSidecar
spec: securityContext: seccompProfile: type: "RuntimeDefault"
spec: containers: [{
name: Name
image: "quay.io/holos/mccutchen/go-httpbin"
ports: [{containerPort: 8080}]
securityContext: {
seccompProfile: type: "RuntimeDefault"
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1337
runAsGroup: 1337
capabilities: drop: ["ALL"]
}}]
}
}
Service: httpbin: #Service & {
metadata: Metadata
spec: selector: MatchLabels
spec: ports: [
{port: 80, targetPort: 8080, protocol: "TCP", name: "http"},
]
}
Gateway: httpbin: #Gateway & {
metadata: Metadata
spec: selector: istio: "ingressgateway"
spec: servers: [
{
hosts: ["\(#TargetNamespace)/\(Cert.Host)"]
port: name: "https-\(#InstanceName)"
port: number: 443
port: protocol: "HTTPS"
tls: credentialName: Cert.SecretName
tls: mode: "SIMPLE"
},
]
}
VirtualService: httpbin: #VirtualService & {
metadata: Metadata
spec: hosts: [Cert.Host]
spec: gateways: ["\(#TargetNamespace)/\(Name)"]
spec: http: [{route: [{destination: host: Name}]}]
}
}
}

View File

@@ -0,0 +1,155 @@
package holos
import "encoding/json"
#InputKeys: component: "ingress"
#TargetNamespace: "istio-ingress"
#DependsOn: _IstioD
#HelmChart & {
chart: name: "gateway"
namespace: #TargetNamespace
values: #GatewayValues & {
// This component expects the load balancer to send the PROXY protocol header.
// Refer to: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/annotations/#proxy-protocol-v2
podAnnotations: "proxy.istio.io/config": json.Marshal(_ProxyProtocol)
// TODO This configuration is specific to the OIS Metal NLB, refactor it out to the metal collection.
service: {
type: "NodePort"
annotations: "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol": "*"
externalTrafficPolicy: "Local"
// Add 30000 to the port to get the Nodeport
ports: [
{
name: "status-port"
port: 15021
protocol: "TCP"
targetPort: 15021
nodePort: 30021
},
{
name: "http2"
port: 80
protocol: "TCP"
targetPort: 80
nodePort: 30080
},
{
name: "https"
port: 443
protocol: "TCP"
targetPort: 443
nodePort: 30443
},
]
}
}
apiObjects: _APIObjects
}
_ProxyProtocol: gatewayTopology: proxyProtocol: {}
// Additional holos specific API Objects
let Name = #GatewayValues.name
let GatewayLabels = {
app: Name
istio: "ingressgateway"
}
let RedirectMetaName = {
name: Name + "-https-redirect"
namespace: #TargetNamespace
}
// https-redirect
_APIObjects: {
Gateway: {
httpsRedirect: #Gateway & {
metadata: RedirectMetaName
spec: selector: GatewayLabels
spec: servers: [{
port: {
number: 80
name: "http2"
protocol: "HTTP2"
}
hosts: ["*"]
// handled by the VirtualService
tls: httpsRedirect: false
}]
}
}
VirtualService: {
httpsRedirect: #VirtualService & {
metadata: RedirectMetaName
spec: hosts: ["*"]
spec: gateways: [RedirectMetaName.name]
spec: http: [{
match: [{withoutHeaders: ":path": prefix: "/.well-known/acme-challenge/"}]
redirect: {
scheme: "https"
redirectCode: 302
}
}]
}
}
}
let LoopbackName = Name + "-loopback"
let LoopbackDescription = "Allows in-cluster traffic to stay in cluster via traffic routing"
let LoopbackLabels = {
app: LoopbackName
istio: "ingressgateway"
}
let LoopbackMetaName = {
name: LoopbackName
namespace: #TargetNamespace
}
// istio-ingressgateway-loopback
_APIObjects: {
Deployment: {
loopback: #Deployment & {
_description: LoopbackDescription
metadata: LoopbackMetaName
spec: {
selector: matchLabels: LoopbackLabels
template: {
metadata: {
annotations: "inject.istio.io/templates": "gateway"
annotations: #Description & {
_Description: LoopbackDescription
}
labels: LoopbackLabels & {"sidecar.istio.io/inject": "true"}
}
spec: {
serviceAccountName: "istio-ingressgateway"
// Allow binding to all ports (such as 80 and 443)
securityContext: {
runAsNonRoot: true
seccompProfile: type: "RuntimeDefault"
sysctls: [{name: "net.ipv4.ip_unprivileged_port_start", value: "0"}]
}
containers: [{
name: "istio-proxy"
image: "auto" // Managed by istiod
securityContext: {
allowPrivilegeEscalation: false
capabilities: drop: ["ALL"]
runAsUser: 1337
runAsGroup: 1337
}
}]
}
}
}
}
}
Service: {
loopback: #Service & {
_description: LoopbackDescription
metadata: LoopbackMetaName
spec: selector: LoopbackLabels
spec: ports: [{port: 80, name: "http"}, {port: 443, name: "https"}]
}
}
}

View File

@@ -1,3 +1,13 @@
package holos
#DependsOn: _IstioBase
#HelmChart: {
chart: {
version: "1.20.3"
repository: {
name: "istio"
url: "https://istio-release.storage.googleapis.com/charts"
}
}
}

View File

@@ -8,12 +8,7 @@ import "encoding/yaml"
#HelmChart & {
namespace: #TargetNamespace
chart: {
name: "istiod"
version: "1.20.3"
repository: {
name: "istio"
url: "https://istio-release.storage.googleapis.com/charts"
}
name: "istiod"
}
values: #IstioValues & {
pilot: {

View File

@@ -44,7 +44,7 @@ _MeshConfig: {
"cookie",
"x-forwarded-for",
]
port: 4180
port: 4180
service: "oauth2-proxy.istio-ingress.svc.cluster.local"
}
}, {

View File

@@ -0,0 +1,161 @@
package holos
// Default values.yaml imported from the cni chart
#CNIValues: {
cni: {
hub: ""
tag: ""
variant: ""
image: "install-cni"
pullPolicy: ""
// Refer to https://istio.io/latest/docs/setup/additional-setup/cni/#installing-with-helm
enabled: #IstioValues.istio_cni.enabled
// Configuration log level of istio-cni binary
// by default istio-cni send all logs to UDS server
// if want to see them you need change global.logging.level with cni:debug
logLevel: "debug"
// Configuration file to insert istio-cni plugin configuration
// by default this will be the first file found in the cni-conf-dir
// Example
// cniConfFileName: 10-calico.conflist
// CNI bin and conf dir override settings
// defaults:
cniBinDir: "" // Auto-detected based on version; defaults to /opt/cni/bin.
cniConfDir: "/etc/cni/net.d"
cniConfFileName: ""
// This directory must exist on the node, if it does not, consult your container runtime
// documentation for the appropriate path.
cniNetnsDir: null // Defaults to '/var/run/netns', in minikube/docker/others can be '/var/run/docker/netns'.
excludeNamespaces: [
"istio-system",
"kube-system",
]
// Allows user to set custom affinity for the DaemonSet
affinity: {}
// Custom annotations on pod level, if you need them
podAnnotations: {}
// If this value is set a RoleBinding will be created
// in the same namespace as the istio-cni DaemonSet is created.
// This can be used to bind a preexisting ClusterRole to the istio/cni ServiceAccount
// e.g. if you use PodSecurityPolicies
psp_cluster_role: ""
// Deploy the config files as plugin chain (value "true") or as standalone files in the conf dir (value "false")?
// Some k8s flavors (e.g. OpenShift) do not support the chain approach, set to false if this is the case
chained: #IstioValues.istio_cni.chained
// Allow the istio-cni container to run in privileged mode, needed for some platforms (e.g. OpenShift) or features (repairPods)
privileged: false
// Custom configuration happens based on the CNI provider.
// Possible values: "default", "multus"
provider: "default"
// Configure ambient settings
ambient: {
// If enabled, ambient redirection will be enabled
enabled: false
// Set ambient redirection mode: "iptables" or "ebpf"
redirectMode: "iptables"
// Set ambient config dir path: defaults to /etc/ambient-config
configDir: ""
}
repair: {
enabled: true
hub: ""
tag: ""
// Repair controller has 3 modes. Pick which one meets your use cases. Note only one may be used.
// This defines the action the controller will take when a pod is detected as broken.
// labelPods will label all pods with <brokenPodLabelKey>=<brokenPodLabelValue>.
// This is only capable of identifying broken pods; the user is responsible for fixing them (generally, by deleting them).
labelPods: false
// deletePods will delete any broken pod. These will then be rescheduled, hopefully onto a node that is fully ready.
deletePods: true
// repairPods will dynamically repair any broken pod by setting up the pod networking configuration even after it has started.
// Note the pod will be crashlooping, so this may take a few minutes to become fully functional based on when the retry occurs.
// This requires no RBAC privilege, but does require `securityContext.privileged`.
repairPods: false
initContainerName: "istio-validation"
brokenPodLabelKey: "cni.istio.io/uninitialized"
brokenPodLabelValue: "true"
}
// Set to `type: RuntimeDefault` to use the default profile if available.
seccompProfile: {}
resources: requests: {
cpu: "100m"
memory: "100Mi"
}
resourceQuotas: {
enabled: false
pods: 5000
}
// The number of pods that can be unavailable during rolling update (see
// `updateStrategy.rollingUpdate.maxUnavailable` here:
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec).
// May be specified as a number of pods or as a percent of the total number
// of pods at the start of the update.
rollingMaxUnavailable: 1
}
// Revision is set as 'version' label and part of the resource names when installing multiple control planes.
revision: ""
// For Helm compatibility.
ownerName: ""
global: {
// Default hub for Istio images.
// Releases are published to docker hub under 'istio' project.
// Dev builds from prow are on gcr.io
hub: "docker.io/istio"
// Default tag for Istio images.
tag: "1.20.3"
// Variant of the image to use.
// Currently supported are: [debug, distroless]
variant: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent.
imagePullPolicy: ""
// change cni scope level to control logging out of istio-cni-node DaemonSet
logging: {
level: "default:info,cni:info"
}
logAsJson: false
// ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace
// to use for pulling any images in pods that reference this ServiceAccount.
// For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)
// ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.
// Must be set for any cluster configured with private docker registry.
imagePullSecrets: []
// - private-registry-key
// Default resources allocated
defaultResources: {
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}
}

View File

@@ -0,0 +1,170 @@
package holos
// Gateway default values.yaml imported from the gateway chart.
#GatewayValues: {
// Name allows overriding the release name. Generally this should not be set
name: "istio-ingressgateway"
// revision declares which revision this gateway is a part of
revision: ""
// Controls the spec.replicas setting for the Gateway deployment if set.
// Otherwise defaults to Kubernetes Deployment default (1).
replicaCount: null
kind: "Deployment"
rbac: {
// If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
// when using http://gateway-api.org/.
enabled: true
}
serviceAccount: {
// If set, a service account will be created. Otherwise, the default is used
create: true
// Annotations to add to the service account
annotations: {}
// The name of the service account to use.
// If not set, the release name is used
name: ""
}
podAnnotations: {
"prometheus.io/port": "15020"
"prometheus.io/scrape": "true"
"prometheus.io/path": "/stats/prometheus"
"inject.istio.io/templates": "gateway"
"sidecar.istio.io/inject": "true"
...
}
// Define the security context for the pod.
// If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
// On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: {
seccompProfile: type: "RuntimeDefault"
sysctls: [{name: "net.ipv4.ip_unprivileged_port_start", value: "0"}]
}
containerSecurityContext: null
service: {
// Type of service. Set to "None" to disable the service entirely
type: string | *"LoadBalancer"
ports: [...] | *[{
name: "status-port"
port: 15021
protocol: "TCP"
targetPort: 15021
}, {
name: "http2"
port: 80
protocol: "TCP"
targetPort: 80
}, {
name: "https"
port: 443
protocol: "TCP"
targetPort: 443
}]
annotations: {...}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: string | *""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
}
resources: {
requests: {
cpu: "100m"
memory: "128Mi"
}
limits: {
cpu: "2000m"
memory: "1024Mi"
}
}
autoscaling: {
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
autoscaleBehavior: {}
}
// Pod environment variables
env: {}
// Labels to apply to all resources
labels: {}
// Annotations to apply to all resources
annotations: {}
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
// If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
imagePullSecrets: []
// This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
//
// By default, the `podDisruptionBudget` is disabled (set to `{}`),
// which means that no PodDisruptionBudget resource will be created.
//
// To enable the PodDisruptionBudget, configure it by specifying the
// `minAvailable` or `maxUnavailable`. For example, to set the
// minimum number of available replicas to 1, you can update this value as follows:
//
// podDisruptionBudget:
// minAvailable: 1
//
// Or, to allow a maximum of 1 unavailable replica, you can set:
//
// podDisruptionBudget:
// maxUnavailable: 1
//
// You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
// For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
//
// podDisruptionBudget:
// minAvailable: 1
// unhealthyPodEvictionPolicy: AlwaysAllow
//
// To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
//
// podDisruptionBudget: {}
//
podDisruptionBudget: {}
terminationGracePeriodSeconds: 30
// A list of `Volumes` added into the Gateway Pods. See
// https://kubernetes.io/docs/concepts/storage/volumes/.
volumes: []
// A list of `VolumeMounts` added into the Gateway Pods. See
// https://kubernetes.io/docs/concepts/storage/volumes/.
volumeMounts: []
// Configure this to a higher priority class in order to make sure your Istio gateway pods
// will not be killed because of low priority class.
// Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
// for more detail.
priorityClassName: ""
}

View File

@@ -4,11 +4,11 @@ package holos
#InputKeys: project: "mesh"
// Shared dependencies for all components in this collection.
#Kustomization: spec: targetNamespace: #TargetNamespace
#DependsOn: _Namespaces
// Common Dependencies
_CertManager: CertManager: name: "\(#InstancePrefix)-certmanager"
_Namespaces: Namespaces: name: "\(#StageName)-secrets-namespaces"
_IstioBase: IstioBase: name: "\(#InstancePrefix)-istio-base"
_IstioPilot: IstioPilot: name: "\(#InstancePrefix)-istiod"
_CertManager: CertManager: name: "\(#InstancePrefix)-certmanager"
_Namespaces: Namespaces: name: "\(#StageName)-secrets-namespaces"
_IstioBase: IstioBase: name: "\(#InstancePrefix)-istio-base"
_IstioD: IstioD: name: "\(#InstancePrefix)-istiod"
_IngressGateway: IngressGateway: name: "\(#InstancePrefix)-ingress"

View File

@@ -538,7 +538,10 @@ package holos
// keep in sync with settings used when installing the Istio CNI chart
istio_cni: {
enabled: false
// Refer to https://istio.io/latest/docs/setup/additional-setup/cni/#installing-with-helm
// values.istio_cni.enabled should be set to the same value as values.cni.enabled.
// values.istio_cni.chained should be set to the same value as values.cni.chained.
enabled: true
chained: true
}
}

View File

@@ -1,6 +1,14 @@
package holos
let Privileged = {labels: "pod-security.kubernetes.io/enforce": "privileged"}
// Refer to https://kubernetes.io/docs/concepts/security/pod-security-standards/
let Restricted = {
labels: "pod-security.kubernetes.io/enforce": "restricted"
labels: "pod-security.kubernetes.io/enforce-version": "latest"
}
let Privileged = {
labels: "pod-security.kubernetes.io/enforce": "privileged"
labels: "pod-security.kubernetes.io/enforce-version": "latest"
}
// #PlatformNamespaces is the union of all namespaces across all cluster types. Namespaces are created in all clusters regardless of if they're
// used within the cluster or not. The is important for security and consistency with IAM, RBAC, and Secrets sync between clusters.
@@ -10,7 +18,8 @@ let Privileged = {labels: "pod-security.kubernetes.io/enforce": "privileged"}
{name: "flux-system"},
{name: "ceph-system"} & Privileged,
{name: "istio-system"} & Privileged,
{name: "istio-ingress"} & Privileged,
{name: "istio-ingress"} & Restricted,
{name: "cert-manager"},
{name: "argocd"},
{name: "prod-iam-zitadel"},
]

View File

@@ -3,12 +3,17 @@ package holos
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ksv1 "kustomize.toolkit.fluxcd.io/kustomization/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
batchv1 "k8s.io/api/batch/v1"
es "external-secrets.io/externalsecret/v1beta1"
ss "external-secrets.io/secretstore/v1beta1"
cm "cert-manager.io/clusterissuer/v1"
is "cert-manager.io/issuer/v1"
ci "cert-manager.io/clusterissuer/v1"
crt "cert-manager.io/certificate/v1"
gw "networking.istio.io/gateway/v1beta1"
vs "networking.istio.io/virtualservice/v1beta1"
"encoding/yaml"
)
@@ -36,11 +41,17 @@ _apiVersion: "holos.run/v1alpha1"
// #TargetNamespace is the target namespace for a holos component.
#TargetNamespace: string
// #SelectorLabels are mixed into selectors.
#SelectorLabels: {
"holos.run/stage.name": #StageName
"holos.run/project.name": #CollectionName
"holos.run/component.name": #ComponentName
...
}
// #CommonLabels are mixed into every kubernetes api object.
#CommonLabels: {
"holos.run/stage.name": #StageName
"holos.run/project.name": #CollectionName
"holos.run/component.name": #ComponentName
#SelectorLabels
"app.kubernetes.io/part-of": #StageName
"app.kubernetes.io/name": #CollectionName
"app.kubernetes.io/component": #ComponentName
@@ -49,14 +60,26 @@ _apiVersion: "holos.run/v1alpha1"
}
#ClusterObject: {
_description: string | *""
metadata: metav1.#ObjectMeta & {
labels: #CommonLabels
annotations: #Description & {
_Description: _description
...
}
}
...
}
#Description: {
_Description: string | *""
"holos.run/description": _Description
...
}
#NamespaceObject: #ClusterObject & {
metadata: namespace: string
...
}
// Kubernetes API Objects
@@ -68,14 +91,42 @@ _apiVersion: "holos.run/v1alpha1"
}
#ClusterRole: #ClusterObject & rbacv1.#ClusterRole
#ClusterRoleBinding: #ClusterObject & rbacv1.#ClusterRoleBinding
#ClusterIssuer: #ClusterObject & cm.#ClusterIssuer & {...}
#ClusterIssuer: #ClusterObject & ci.#ClusterIssuer & {...}
#Issuer: #NamespaceObject & is.#Issuer
#Role: #NamespaceObject & rbacv1.#Role
#RoleBinding: #NamespaceObject & rbacv1.#RoleBinding
#ConfigMap: #NamespaceObject & corev1.#ConfigMap
#ServiceAccount: #NamespaceObject & corev1.#ServiceAccount
#Pod: #NamespaceObject & corev1.#Pod
#Service: #NamespaceObject & corev1.#Service
#Job: #NamespaceObject & batchv1.#Job
#CronJob: #NamespaceObject & batchv1.#CronJob
#Deployment: #NamespaceObject & appsv1.#Deployment
#Gateway: #NamespaceObject & gw.#Gateway
#VirtualService: #NamespaceObject & vs.#VirtualService
#Certificate: #NamespaceObject & crt.#Certificate
// #HTTP01Cert defines a http01 certificate.
#HTTP01Cert: {
_name: string
_secret: string | *_name
SecretName: _secret
Host: _name + "." + #ClusterDomain
object: #Certificate & {
metadata: {
name: _secret
namespace: string | *#TargetNamespace
}
spec: {
commonName: Host
dnsNames: [Host]
secretName: _secret
issuerRef: kind: "ClusterIssuer"
issuerRef: name: "letsencrypt"
}
}
}
// Flux Kustomization CRDs
#Kustomization: #NamespaceObject & ksv1.#Kustomization & {
@@ -300,6 +351,15 @@ _apiVersion: "holos.run/v1alpha1"
// #SecretName is the name of a Secret, ususally coupling a Deployment to an ExternalSecret
#SecretName: string
// Cluster Domain is the cluster specific domain
#ClusterDomain: #InputKeys.cluster + "." + #Platform.org.domain
// #SidecarInject represents the istio sidecar inject label
#IstioSidecar: {
"sidecar.istio.io/inject": "true"
...
}
// By default, render kind: Skipped so holos knows to skip over intermediate cue files.
// This enables the use of holos render ./foo/bar/baz/... when bar contains intermediary constraints which are not complete components.
// Holos skips over these intermediary cue instances.

View File

@@ -17,6 +17,7 @@ import (
"os/exec"
"path/filepath"
"slices"
"strings"
"cuelang.org/go/cue/cuecontext"
"cuelang.org/go/cue/load"
@@ -381,6 +382,13 @@ func runHelm(ctx context.Context, hc *HelmChart, r *Result, path holos.PathCompo
chart := hc.Chart
helmOut, err := runCmd(ctx, "helm", "template", "--values", valuesPath, "--namespace", hc.Namespace, "--kubeconfig", "/dev/null", "--version", chart.Version, chart.Name, cachedChartPath)
if err != nil {
stderr := helmOut.stderr.String()
lines := strings.Split(stderr, "\n")
for _, line := range lines {
if strings.HasPrefix(line, "Error:") {
err = fmt.Errorf("%s: %w", line, err)
}
}
return wrapper.Wrap(fmt.Errorf("could not run helm template: %w", err))
}

View File

@@ -1 +1 @@
49
50

View File

@@ -1 +1 @@
1
2