Compare commits

..

1 Commits

Author SHA1 Message Date
Jeff McCune
27e5846a4c Changes we discuessed at Zipper on 7/17 2024-07-18 07:57:47 -07:00
1395 changed files with 161430 additions and 214857 deletions

View File

@@ -5,324 +5,48 @@
"mdx"
],
"words": [
"acmesolver",
"acraccesstokens",
"admissionregistration",
"alertmanager",
"alertmanagers",
"anthos",
"apiextensions",
"apimachinery",
"apiobjects",
"apiservers",
"applicationset",
"applicationsets",
"appproject",
"appprojects",
"argoproj",
"argumentless",
"authcode",
"authorizationpolicies",
"authorizationpolicy",
"authpolicy",
"authproxy",
"authroutes",
"automount",
"automounting",
"autoscaler",
"balancereader",
"blackbox",
"buildplan",
"builtinpluginloadingoptions",
"cachedir",
"cadvisor",
"cainjector",
"CAROOT",
"certificaterequest",
"certificaterequests",
"certificatesigningrequests",
"clientset",
"clsx",
"clusterexternalsecret",
"clusterexternalsecrets",
"clusterissuer",
"clusterissuers",
"clusterrole",
"clusterrolebinding",
"clustersecretstore",
"clustersecretstores",
"clusterwide",
"Cmds",
"CNCF",
"CODEOWNERS",
"configdir",
"configmap",
"configmapargs",
"connectrpc",
"cookiesecret",
"coredns",
"corev",
"CRD's",
"crds",
"creds",
"crossplane",
"crunchydata",
"cuecontext",
"cuelang",
"customresourcedefinition",
"daemonset",
"deploymentruntimeconfig",
"destinationrule",
"destinationrules",
"devicecode",
"dnsmasq",
"dscacheutil",
"ecrauthorizationtokens",
"edns",
"endpointslices",
"entgo",
"envoyfilter",
"envoyfilters",
"errdetails",
"errgroup",
"etcdsnapshotfiles",
"externalsecret",
"externalsecrets",
"fctr",
"fieldmaskpb",
"fieldspec",
"flushcache",
"fullname",
"gatewayclass",
"gatewayclasses",
"gcraccesstokens",
"gendoc",
"generationbehavior",
"generatorargs",
"generatoroptions",
"genproto",
"ggnpl",
"ghaction",
"githubaccesstokens",
"gitops",
"GOBIN",
"godoc",
"golangci",
"gomarkdoc",
"googleapis",
"goreleaser",
"gotypesalias",
"grpcreflect",
"grpcroute",
"grpcroutes",
"grpcurl",
"healthchecks",
"healthz",
"helmchartargs",
"helmchartconfigs",
"helmcharts",
"Hiera",
"holos",
"holoslogger",
"horizontalpodautoscaler",
"horizontalpodautoscalers",
"Hostaliases",
"Hostnames",
"htpasswd",
"httpbin",
"httproute",
"httproutes",
"iampolicygenerator",
"Infima",
"intstr",
"isatty",
"istiod",
"jbrx",
"jeffmccune",
"jetstack",
"jiralert",
"Jsonnet",
"kfbh",
"killall",
"kubeadm",
"kubeconfig",
"kubelet",
"kubelogin",
"kubernetesobjects",
"Kustomization",
"Kustomizations",
"kustomize",
"kustomizebuild",
"kvpairsources",
"labeldrop",
"labelmap",
"ldflags",
"leaderelection",
"ledgerwriter",
"libnss",
"limitranges",
"livez",
"loadbalancer",
"loadrestrictions",
"logfmt",
"mattn",
"mccutchen",
"metav",
"mindmap",
"mktemp",
"msqbn",
"mtls",
"Multicluster",
"mutatingwebhookconfiguration",
"mutatingwebhookconfigurations",
"mvdan",
"mxcl",
"myhostname",
"myRegistrKeySecretName",
"mysecret",
"nameofclusterrole",
"nameserver",
"namespacedname",
"ndots",
"networkpolicies",
"nodename",
"nolint",
"oauthproxy",
"objectmap",
"objectmeta",
"organizationconnect",
"orgid",
"otelconnect",
"outfile",
"overriden",
"Parentspanid",
"patchstrategicmerge",
"pcjc",
"peerauthentication",
"peerauthentications",
"persistentvolumeclaim",
"persistentvolumeclaims",
"persistentvolumes",
"pflag",
"pgadmin",
"pgupgrade",
"pipefail",
"PKCE",
"platformconnect",
"pluginconfig",
"pluginrestrictions",
"podcli",
"poddisruptionbudget",
"poddisruptionbudgets",
"podinfo",
"podmonitor",
"portmapping",
"postgrescluster",
"privs",
"prometheuses",
"promhttp",
"protobuf",
"protojson",
"providerconfig",
"proxyconfig",
"proxyconfigs",
"Pulumi",
"pushgateway",
"pushsecret",
"pushsecrets",
"putenv",
"qjbp",
"quickstart",
"QVRFLS",
"readyz",
"referencegrant",
"referencegrants",
"Registr",
"replacementfield",
"replicasets",
"replicationcontrollers",
"requestauthentication",
"requestauthentications",
"resourcequotas",
"retryable",
"rogpeppe",
"rolebinding",
"rootfs",
"ropc",
"seccomp",
"secretargs",
"SECRETKEY",
"secretstore",
"secretstores",
"serverlb",
"serverside",
"serviceaccount",
"servicebindings",
"serviceentries",
"serviceentry",
"servicemonitor",
"somevalue",
"SOMEVAR",
"sortoptions",
"spanid",
"spiffe",
"stackdriver",
"startupapicheck",
"statefulset",
"statefulsets",
"stefanprodan",
"storageclasses",
"streamwatcher",
"struct",
"structpb",
"subcharts",
"subjectaccessreviews",
"svclb",
"sysfs",
"systemconnect",
"tablewriter",
"templatable",
"testscript",
"thanos",
"Tiltfile",
"timestamppb",
"Timoni",
"tlsclientconfig",
"tokencache",
"Tokener",
"tolerations",
"Traceid",
"traefik",
"transactionhistory",
"tsdb",
"typemeta",
"udev",
"uibutton",
"unstage",
"untar",
"upbound",
"Upsert",
"urandom",
"usecases",
"userconnect",
"userdata",
"userservice",
"validatingwebhookconfiguration",
"validatingwebhookconfigurations",
"vaultdynamicsecrets",
"virtualservice",
"virtualservices",
"volumeattachments",
"wasmplugin",
"wasmplugins",
"workdir",
"workloadentries",
"workloadentry",
"workloadgroup",
"workloadgroups",
"yournamespace",
"zerolog",
"zitadel",
"ztunnel"
"zitadel"
]
}

View File

@@ -13,7 +13,7 @@ permissions:
jobs:
test:
runs-on: ubuntu-latest
runs-on: gha-rs
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -35,7 +35,7 @@ jobs:
uses: azure/setup-helm@v4
- name: Set up Kubectl
uses: azure/setup-kubectl@v4
uses: azure/setup-kubectl@v3
- name: Install Tools
run: |

View File

@@ -1,57 +0,0 @@
name: Dev Deploy
on:
push:
branches: ['main', 'dev-deploy']
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
## Not needed on ubuntu-latest
# - name: Provide GPG and Git
# run: sudo apt update && sudo apt -qq -y install gnupg git curl zip unzip tar bzip2 make jq
## Not needed on ubuntu-latest
# - name: Provide Holos Dependencies
# run: |
# sudo mkdir -p -m 755 /etc/apt/keyrings
# curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
# sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list
# sudo apt update
# sudo apt install -qq -y kubectl
# curl -fsSL -o- https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Must come after git executable is provided
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: '1.22.x'
- uses: ko-build/setup-ko@v0.7
env:
KO_DOCKER_REPO: quay.io/holos-run/holos
- name: Setup SSH
run: |
mkdir -p ~/.ssh
echo "${{ secrets.DEPLOY_SSH_PRIVATE_KEY }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
ssh-keyscan github.com >> ~/.ssh/known_hosts
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
- name: make dev-deploy
env:
auth_user: holos-run+pusher
auth_token: ${{ secrets.QUAY_TOKEN }}
run: |
echo "${auth_token}" | ko login quay.io --username "${auth_user}" --password-stdin
make dev-deploy

View File

@@ -1,30 +0,0 @@
name: golangci-lint
on:
push:
branches:
- main
- test
pull_request:
types: [opened, synchronize]
permissions:
# Required: allow read access to the content for analysis.
contents: read
# Optional: allow read access to pull request. Use with `only-new-issues` option.
pull-requests: read
# Optional: allow write access to checks to allow the action to annotate code in the PR.
checks: write
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: stable
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: v1.60

View File

@@ -13,9 +13,9 @@ permissions:
contents: read
jobs:
lint:
golangci:
name: lint
runs-on: ubuntu-latest
runs-on: gha-rs
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -30,13 +30,16 @@ jobs:
with:
go-version: stable
## Not needed on ubuntu-latest
# - name: Install Packages
# run: sudo apt update && sudo apt -qq -y install git curl zip unzip tar bzip2 make
- name: Install Packages
run: sudo apt update && sudo apt -qq -y install git curl zip unzip tar bzip2 make
- name: Install Tools
run: make tools
run: |
set -x
make tools
- name: Lint
# golangci-lint runs in a separate workflow.
run: make lint -o golangci-lint
- name: golangci-lint
uses: golangci/golangci-lint-action@v4
with:
version: latest
skip-pkg-cache: true

View File

@@ -12,12 +12,11 @@ permissions:
jobs:
goreleaser:
runs-on: ubuntu-latest
runs-on: gha-rs
steps:
## Not needed on ubuntu-latest
# Must come before Checkout, otherwise goreleaser fails
# - name: Provide GPG and Git
# run: sudo apt update && sudo apt -qq -y install gnupg git curl zip unzip tar bzip2 make
- name: Provide GPG and Git
run: sudo apt update && sudo apt -qq -y install gnupg git curl zip unzip tar bzip2 make
# Must come after git executable is provided
- name: Checkout

2
.gitignore vendored
View File

@@ -1,4 +1,5 @@
/bin/
vendor/
.idea/
coverage.out
/dist/
@@ -6,7 +7,6 @@ coverage.out
/deploy/
.vscode/
tmp/
.DS_*
# In case we run through the tutorial in this directory.
/holos-k3d/

View File

@@ -1,13 +0,0 @@
# Refer to https://ko.build/configuration/#overriding-go-build-settings
builds:
- id: holos
dir: .
main: ./cmd/holos
env:
- GOPRIVATE=github.com/holos-run/\*
ldflags:
- -s
- -w
- -X
# Makefile provides GIT_DETAIL and GIT_SUFFIX.
- github.com/holos-run/holos/version.GitDescribe={{.Env.GIT_DETAIL}}{{.Env.GIT_SUFFIX}}

View File

@@ -48,16 +48,13 @@ bumpmajor: ## Bump the major version.
show-version: ## Print the full version.
@echo $(VERSION)
.PHONY: tag
tag: ## Tag a release
git tag v$(VERSION)
.PHONY: tidy
tidy: ## Tidy go module.
go mod tidy
.PHONY: fmt
fmt: ## Format code.
cd docs/examples && cue fmt ./...
cd internal/generate/platforms && cue fmt ./...
go fmt ./...
@@ -92,14 +89,11 @@ clean: ## Clean executables.
test: ## Run tests.
scripts/test
.PHONY: golangci-lint
golangci-lint:
golangci-lint run
.PHONY: lint
lint: golangci-lint ## Run linters.
lint: ## Run linters.
buf lint
cd internal/frontend/holos && ng lint
golangci-lint run
./hack/cspell
.PHONY: coverage
@@ -123,37 +117,30 @@ go-deps: ## tool versions pinned in tools.go
go install honnef.co/go/tools/cmd/staticcheck
go install golang.org/x/tools/cmd/godoc
go install github.com/princjef/gomarkdoc/cmd/gomarkdoc
go install github.com/google/ko
# curl https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash
.PHONY: frontend-deps
frontend-deps: ## Install Angular deps for go generate
cd internal/frontend/holos && npm install
cd internal/frontend/holos && npm install --save-dev @bufbuild/buf @connectrpc/protoc-gen-connect-es
cd internal/frontend/holos && npm install @connectrpc/connect @connectrpc/connect-web @bufbuild/protobuf
# https://github.com/connectrpc/connect-query-es/blob/1350b6f07b6aead81793917954bdb1cc3ce09df9/packages/protoc-gen-connect-query/README.md?plain=1#L23
cd internal/frontend/holos && npm install --save-dev @connectrpc/protoc-gen-connect-query @bufbuild/protoc-gen-es
cd internal/frontend/holos && npm install @connectrpc/connect-query @bufbuild/protobuf
.PHONY: website-deps
website-deps: ## Install Docusaurus deps for go generate
cd doc/website && npm install
.PHONY: image # refer to .ko.yaml as well
image: ## Container image build for workflows/publish.yaml
KO_DOCKER_REPO=$(DOCKER_REPO) GIT_DETAIL=$(GIT_DETAIL) GIT_SUFFIX=$(GIT_SUFFIX) ko build --platform=all --bare ./cmd/holos --tags $(GIT_DETAIL)$(GIT_SUFFIX) --tags latest
.PHONY: prod-deploy
prod-deploy: install image ## deploy to PROD
GIT_DETAIL=$(GIT_DETAIL) GIT_SUFFIX=$(GIT_SUFFIX) bash ./hack/deploy
.PHONY: dev-deploy
dev-deploy: install image ## deploy to dev
GIT_DETAIL=$(GIT_DETAIL) GIT_SUFFIX=$(GIT_SUFFIX) bash ./hack/deploy-dev
.PHONY: image
image: build ## Docker image build
docker build . -t ${DOCKER_REPO}:v$(shell ./bin/holos --version)
docker push ${DOCKER_REPO}:v$(shell ./bin/holos --version)
.PHONY: website
website: ## Build website
./hack/build-website
.PHONY: unity
unity: ## https://cuelabs.dev/unity/
./scripts/unity
.PHONY: help
help: ## Display this help menu.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

View File

@@ -1,4 +1,4 @@
## Holos - A Holistic Development Platform
## Holos - A Holostic Development Platform
<img width="50%"
align="right"
@@ -9,27 +9,24 @@ Building and maintaining a software development platform is a complex and time
consuming endeavour. Organizations often dedicate a team of 3-4 who need 6-12
months to build the platform.
Holos is a tool and a reference platform to reduce the complexity and speed up
Holos is a tool and a reference platform to reduce the compexity and speed up
the process of building a modern, cloud native software development platform.
- **Accelerate new projects** - Reduce time to market and operational complexity by starting your new project on top of the Holos reference platform.
- **Modernize existing projects** - Incrementally incorporate your existing platform services into Holos for simpler integration.
- **Unified configuration model** - Increase safety and reduce the risk of config changes with CUE.
- **First class Helm and Kustomize support** - Leverage and reuse your existing investment in existing configuration tools such as Helm and Kustomize.
- **Modern Authentication and Authorization** - Holos seamlessly integrates platform identity and access management with zero-trust beyond corp style authorization policy.
- **Accelerate new projects** - Reduce time to market and operational complexity by starting your new project on top of the Holos reference platform.
- **Modernize existing projects** - Incrementally incorporate your existing platform services into Holos for simpler integration.
- **Unified configuration model** - Increase safety and reduce the risk of config changes with CUE.
- **First class Helm and Kustomize support** - Leverage and reuse your existing investment in existing configuration tools such as Helm and Kustomize.
- **Modern Authentication and Authorization** - Holos seamlessly integrates platform identity and access mangement with zero-trust beyond corp style authorization policy.
## Quick Installation
```console
go install github.com/holos-run/holos/cmd/holos@latest
```
## Docs and Support
The documentation for developing and using Holos is available at: https://holos.run
The documentation for developing and using Holos is avaialble at: https://holos.run
For discussion and support, [open a discussion](https://github.com/orgs/holos-run/discussions/new/choose).
## License
Holos is licensed under Apache 2.0 as found in the [LICENSE file](LICENSE).

View File

@@ -9,7 +9,7 @@ if os.getenv('TILT_WRAPPER') != '1':
fail("could not run, ./hack/tilt/bin/tilt was not used to start tilt")
# Resource ids
holos_backend = 'Holos Server'
holos_backend = 'Holos Backend'
compile_id = 'Go Build'
# Default Registry.
@@ -61,10 +61,8 @@ docker_build_with_restart(
entrypoint=[
'/app/bin/holos.linux',
'server',
'--log-format=text',
'--oidc-issuer=https://login.holos.run',
'--oidc-audience=275804490387516853@holos_quickstart', # auth proxy
'--oidc-audience=270319630705329162@holos_platform', # holos cli
'--oidc-audience=275571128859132936',
],
dockerfile='./Dockerfile',
only=['./bin'],

View File

@@ -1,226 +0,0 @@
// Package v1alpha3 contains CUE definitions intended as convenience wrappers
// around the core data types defined in package core. The purpose of these
// wrappers is to make life easier for platform engineers by reducing boiler
// plate code and generating component build plans in a consistent manner.
package v1alpha3
import (
core "github.com/holos-run/holos/api/core/v1alpha3"
"google.golang.org/protobuf/types/known/structpb"
)
//go:generate ../../../hack/gendoc
// Component represents the fields common the different kinds of component. All
// components have a name, support mixing in resources, and produce a BuildPlan.
type ComponentFields struct {
// Name represents the Component name.
Name string
// Resources are kubernetes api objects to mix into the output.
Resources map[string]any
// ArgoConfig represents the ArgoCD GitOps configuration for this Component.
ArgoConfig ArgoConfig
// BuildPlan represents the derived BuildPlan for the Holos cli to render.
BuildPlan core.BuildPlan
}
// Helm provides a BuildPlan via the Output field which contains one HelmChart
// from package core. Useful as a convenience wrapper to render a HelmChart
// with optional mix-in resources and Kustomization post-processing.
type Helm struct {
ComponentFields `json:",inline"`
// Version represents the chart version.
Version string
// Namespace represents the helm namespace option when rendering the chart.
Namespace string
// Repo represents the chart repository
Repo struct {
Name string `json:"name"`
URL string `json:"url"`
}
// Values represents data to marshal into a values.yaml for helm.
Values interface{} `cue:"{...}"`
// Chart represents the derived HelmChart for inclusion in the BuildPlan
// Output field value. The default HelmChart field values are derived from
// other Helm field values and should be sufficient for most use cases.
Chart core.HelmChart
// EnableKustomizePostProcessor processes helm output with kustomize if true.
EnableKustomizePostProcessor bool `cue:"true | *false"`
// KustomizeFiles represents additional files to include in a Kustomization
// resources list. Useful to patch helm output. The implementation is a
// struct with filename keys and structs as values. Holos encodes the struct
// value to yaml then writes the result to the filename key. Component
// authors may then reference the filename in the kustomization.yaml resources
// or patches lists.
// Requires EnableKustomizePostProcessor: true.
KustomizeFiles map[string]any `cue:"{[string]: {...}}"`
// KustomizePatches represents patches to apply to the helm output. Requires
// EnableKustomizePostProcessor: true.
KustomizePatches map[core.InternalLabel]any `cue:"{[string]: {...}}"`
// KustomizeResources represents additional resources files to include in the
// kustomize resources list.
KustomizeResources map[string]any `cue:"{[string]: {...}}"`
}
// Kustomize provides a BuildPlan via the Output field which contains one
// KustomizeBuild from package core.
type Kustomize struct {
ComponentFields `json:",inline"`
// Kustomization represents the kustomize build plan for holos to render.
Kustomization core.KustomizeBuild
}
// Kubernetes provides a BuildPlan via the Output field which contains inline
// API Objects provided directly from CUE.
type Kubernetes struct {
ComponentFields `json:",inline"`
// Objects represents the kubernetes api objects for the Component.
Objects core.KubernetesObjects
}
// ArgoConfig represents the ArgoCD GitOps configuration for a Component.
// Useful to define once at the root of the Platform configuration and reuse
// across all Components.
type ArgoConfig struct {
// Enabled causes holos to render an ArgoCD Application resource for GitOps if true.
Enabled bool `cue:"true | *false"`
// ClusterName represents the cluster within the platform the Application
// resource is intended for.
ClusterName string
// DeployRoot represents the path from the git repository root to the `deploy`
// rendering output directory. Used as a prefix for the
// Application.spec.source.path field.
DeployRoot string `cue:"string | *\".\""`
// RepoURL represents the value passed to the Application.spec.source.repoURL
// field.
RepoURL string
// TargetRevision represents the value passed to the
// Application.spec.source.targetRevision field. Defaults to the branch named
// main.
TargetRevision string `cue:"string | *\"main\""`
// AppProject represents the ArgoCD Project to associate the Application with.
AppProject string `cue:"string | *\"default\""`
}
// Cluster represents a cluster managed by the Platform.
type Cluster struct {
// Name represents the cluster name, for example "east1", "west1", or
// "management".
Name string `json:"name"`
// Primary represents if the cluster is marked as the primary among a set of
// candidate clusters. Useful for promotion of database leaders.
Primary bool `json:"primary" cue:"true | *false"`
}
// Fleet represents a named collection of similarly configured Clusters. Useful
// to segregate workload clusters from their management cluster.
type Fleet struct {
Name string `json:"name"`
// Clusters represents a mapping of Clusters by their name.
Clusters map[string]Cluster `json:"clusters" cue:"{[Name=_]: name: Name}"`
}
// StandardFleets represents the standard set of Clusters in a Platform
// segmented into Fleets by their purpose. The management Fleet contains a
// single Cluster, for example a GKE autopilot cluster with no workloads
// deployed for reliability and cost efficiency. The workload Fleet contains
// all other Clusters which contain workloads and sync Secrets from the
// management cluster.
type StandardFleets struct {
// Workload represents a Fleet of zero or more workload Clusters.
Workload Fleet `json:"workload" cue:"{name: \"workload\"}"`
// Management represents a Fleet with one Cluster named management.
Management Fleet `json:"management" cue:"{name: \"management\"}"`
}
// Platform is a convenience structure to produce a core Platform specification
// value in the Output field. Useful to collect components at the root of the
// Platform configuration tree as a struct, which are automatically converted
// into a list for the core Platform spec output.
type Platform struct {
// Name represents the Platform name.
Name string `cue:"string | *\"holos\""`
// Components is a structured map of components to manage by their name.
Components map[string]core.PlatformSpecComponent
// Model represents the Platform model holos gets from from the
// PlatformService.GetPlatform rpc method and provides to CUE using a tag.
Model structpb.Struct `cue:"{...}"`
// Output represents the core Platform spec for the holos cli to iterate over
// and render each listed Component, injecting the Model.
Output core.Platform
// Domain represents the primary domain the Platform operates in. This field
// is intended as a sensible default for component authors to reference and
// platform operators to define.
Domain string `cue:"string | *\"holos.localhost\""`
}
// Organization represents organizational metadata useful across the platform.
type Organization struct {
Name string
DisplayName string
Domain string
}
// OrganizationStrict represents organizational metadata useful across the
// platform. This is an example of using CUE regular expressions to constrain
// and validate configuration.
type OrganizationStrict struct {
Organization `json:",inline"`
// Name represents the organization name as a resource name. Must be 63
// characters or less. Must start with a letter. May contain non-repeating
// hyphens, letters, and numbers. Must end with a letter or number.
Name string `cue:"=~ \"^[a-z][0-9a-z-]{1,61}[0-9a-z]$\" & !~ \"--\""`
// DisplayName represents the human readable organization name.
DisplayName string `cue:"=~ \"^[0-9A-Za-z][0-9A-Za-z ]{2,61}[0-9A-Za-z]$\" & !~ \" \""`
}
// Projects represents projects managed by the platform team for use by other
// teams using the platform.
type Projects map[core.NameLabel]Project
// Project represents logical grouping of components owned by one or more teams.
// Useful for the platform team to manage resources for project teams to use.
type Project struct {
// Name represents project name.
Name string
// Owner represents the team who own this project.
Owner Owner
// Namespaces represents the namespaces assigned to this project.
Namespaces map[core.NameLabel]Namespace
// Hostnames represents the host names to expose for this project.
Hostnames map[core.NameLabel]Hostname
}
// Owner represents the owner of a resource. For example, the name and email
// address of an engineering team.
type Owner struct {
Name string
Email string
}
// Namespace represents a Kubernetes namespace.
type Namespace struct {
Name string
}
// Hostname represents the left most dns label of a domain name.
type Hostname struct {
// Name represents the subdomain to expose, e.g. "www"
Name string
// Namespace represents the namespace metadata.name field of backend object
// reference.
Namespace string
// Service represents the Service metadata.name field of backend object
// reference.
Service string
// Port represents the Service port of the backend object reference.
Port int
}

View File

@@ -1,4 +0,0 @@
---
description: Simplified abstraction to generate core v1alpha3 components.
sidebar_position: 997
---

View File

@@ -1,290 +0,0 @@
// # Author API
//
// Package v1alpha4 contains ergonomic CUE definitions for Holos component
// authors. These definitions serve as adapters to produce [Core API] resources
// for the holos command line tool.
//
// [Core API]: https://holos.run/docs/api/core/v1alpha4/
package v1alpha4
import core "github.com/holos-run/holos/api/core/v1alpha4"
//go:generate ../../../hack/gendoc
// Platform assembles a Core API [Platform] in the Resource field for the holos
// render platform command. Use the Components field to register components
// with the platform using a struct. This struct is converted into a list for
// final output to holos.
//
// See related:
//
// - [Component] collection of components composing the platform.
// - [Platform] resource assembled for holos to process.
//
// [Platform]: https://holos.run/docs/api/core/v1alpha4/#Platform
// [Component]: https://holos.run/docs/api/core/v1alpha4/#Component
type Platform struct {
Name string
Components map[NameLabel]core.Component
Resource core.Platform
}
// Cluster represents a cluster managed by the Platform.
type Cluster struct {
// Name represents the cluster name, for example "east1", "west1", or
// "management".
Name string `json:"name"`
// Primary represents if the cluster is marked as the primary among a set of
// candidate clusters. Useful for promotion of database leaders.
Primary bool `json:"primary" cue:"true | *false"`
}
// Fleet represents a named collection of similarly configured Clusters. Useful
// to segregate workload clusters from their management cluster.
type Fleet struct {
Name string `json:"name"`
// Clusters represents a mapping of Clusters by their name.
Clusters map[string]Cluster `json:"clusters" cue:"{[Name=_]: name: Name}"`
}
// StandardFleets represents the standard set of Clusters in a Platform
// segmented into Fleets by their purpose. The management Fleet contains a
// single Cluster, for example a GKE autopilot cluster with no workloads
// deployed for reliability and cost efficiency. The workload Fleet contains
// all other Clusters which contain workloads and sync Secrets from the
// management cluster.
type StandardFleets struct {
// Workload represents a Fleet of zero or more workload Clusters.
Workload Fleet `json:"workload" cue:"{name: \"workload\"}"`
// Management represents a Fleet with one Cluster named management.
Management Fleet `json:"management" cue:"{name: \"management\"}"`
}
// ArgoConfig represents the ArgoCD GitOps configuration associated with a
// [BuildPlan]. Useful to define once at the root of the Platform configuration
// and reuse across all components.
//
// [BuildPlan]: https://holos.run/docs/api/core/v1alpha4/#buildplan
type ArgoConfig struct {
// Enabled causes holos to render an Application resource when true.
Enabled bool `cue:"true | *false"`
// RepoURL represents the value passed to the Application.spec.source.repoURL
// field.
RepoURL string
// Root represents the path from the git repository root to the WriteTo output
// directory, the behavior of the holos render component --write-to flag and
// the Core API Component WriteTo field. Used as a prefix for the
// Application.spec.source.path field.
Root string `cue:"string | *\"deploy\""`
// TargetRevision represents the value passed to the
// Application.spec.source.targetRevision field. Defaults to the branch named
// main.
TargetRevision string `cue:"string | *\"main\""`
// AppProject represents the ArgoCD Project to associate the Application with.
AppProject string `cue:"string | *\"default\""`
}
// Organization represents organizational metadata useful across the platform.
type Organization struct {
Name string
DisplayName string
Domain string
}
// OrganizationStrict represents organizational metadata useful across the
// platform. This is an example of using CUE regular expressions to constrain
// and validate configuration.
type OrganizationStrict struct {
Organization `json:",inline"`
// Name represents the organization name as a resource name. Must be 63
// characters or less. Must start with a letter. May contain non-repeating
// hyphens, letters, and numbers. Must end with a letter or number.
Name string `cue:"=~ \"^[a-z][0-9a-z-]{1,61}[0-9a-z]$\" & !~ \"--\""`
// DisplayName represents the human readable organization name.
DisplayName string `cue:"=~ \"^[0-9A-Za-z][0-9A-Za-z ]{2,61}[0-9A-Za-z]$\" & !~ \" \""`
}
// Kubernetes provides a [BuildPlan] via the Output field which contains inline
// API Objects provided directly from CUE in the Resources field of
// [ComponentConfig].
//
// See related:
//
// - [ComponentConfig]
// - [BuildPlan]
//
// [BuildPlan]: https://holos.run/docs/api/core/v1alpha4/#BuildPlan
type Kubernetes struct {
ComponentConfig `json:",inline"`
// BuildPlan represents the derived BuildPlan produced for the holos render
// component command.
BuildPlan core.BuildPlan
}
// Helm provides a [BuildPlan] via the Output field which generates manifests
// from a helm chart with optional mix-in resources provided directly from CUE
// in the Resources field.
//
// This definition is a convenient way to produce a [BuildPlan] composed of
// three [Resources] generators with one [Kustomize] transformer.
//
// See related:
//
// - [ComponentConfig]
// - [Chart]
// - [Values]
// - [BuildPlan]
//
// [BuildPlan]: https://holos.run/docs/api/core/v1alpha4/#BuildPlan
// [Chart]: https://holos.run/docs/api/core/v1alpha4/#Chart
// [Values]: https://holos.run/docs/api/core/v1alpha4/#Values
type Helm struct {
ComponentConfig `json:",inline"`
// Chart represents a Helm chart.
Chart core.Chart
// Values represents data to marshal into a values.yaml for helm.
Values core.Values
// EnableHooks enables helm hooks when executing the `helm template` command.
EnableHooks bool
// BuildPlan represents the derived BuildPlan produced for the holos render
// component command.
BuildPlan core.BuildPlan
}
// Kustomize provides a [BuildPlan] via the Output field which generates
// manifests from a kustomize kustomization with optional mix-in resources
// provided directly from CUE in the Resources field.
//
// See related:
//
// - [ComponentConfig]
// - [BuildPlan]
//
// [BuildPlan]: https://holos.run/docs/api/core/v1alpha4/#buildplan
type Kustomize struct {
ComponentConfig `json:",inline"`
// BuildPlan represents the derived BuildPlan produced for the holos render
// component command.
BuildPlan core.BuildPlan
}
// ComponentConfig represents the configuration common to all kinds of
// component.
//
// - [Helm] charts.
// - [Kubernetes] resources generated from CUE.
// - [Kustomize] bases.
//
// See the following resources for additional details:
//
// - [Resources]
// - [ArgoConfig]
// - [KustomizeConfig]
// - [BuildPlan]
//
// [BuildPlan]: https://holos.run/docs/api/core/v1alpha4/#BuildPlan
// [Resources]: https://holos.run/docs/api/core/v1alpha4/#Resources
type ComponentConfig struct {
// Name represents the BuildPlan metadata.name field. Used to construct the
// fully rendered manifest file path.
Name string
// Component represents the path to the component producing the BuildPlan.
Component string
// Cluster represents the name of the cluster this BuildPlan is for.
Cluster string
// Resources represents kubernetes resources mixed into the rendered manifest.
Resources core.Resources
// ArgoConfig represents the ArgoCD GitOps configuration for this BuildPlan.
ArgoConfig ArgoConfig
// CommonLabels represents common labels to manage on all rendered manifests.
CommonLabels map[string]string
// Namespace manages the metadata.namespace field on all resources except the
// ArgoCD Application.
Namespace string `json:",omitempty"`
// KustomizeConfig represents the configuration for kustomize.
KustomizeConfig KustomizeConfig
}
// KustomizeConfig represents the configuration for kustomize post processing.
// The Files field is used to mixing in static manifest files from the component
// directory. The Resources field is used for mixing in manifests from network
// locations urls.
//
// See related:
//
// - [ComponentConfig]
// - [Kustomization]
//
// [Kustomization]: https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/
type KustomizeConfig struct {
// Kustomization represents the kustomization used to transform resources.
// Note the resources field is internally managed from the Files and Resources fields.
Kustomization map[string]any `json:",omitempty"`
// Files represents files to copy from the component directory for kustomization.
Files map[string]struct{ Source string } `cue:"{[NAME=_]: Source: NAME}"`
// Resources represents additional entries to included in the resources list.
Resources map[string]struct{ Source string } `cue:"{[NAME=_]: Source: NAME}"`
}
// Projects represents projects managed by the platform team for use by other
// teams using the platform.
type Projects map[NameLabel]Project
// Project represents logical grouping of components owned by one or more teams.
// Useful for the platform team to manage resources for project teams to use.
type Project struct {
// Name represents project name.
Name string
// Owner represents the team who own this project.
Owner Owner
// Namespaces represents the namespaces assigned to this project.
Namespaces map[NameLabel]Namespace
// Hostnames represents the host names to expose for this project.
Hostnames map[NameLabel]Hostname
// CommonLabels represents common labels to manage on all rendered manifests.
CommonLabels map[string]string
}
// Owner represents the owner of a resource. For example, the name and email
// address of an engineering team.
type Owner struct {
Name string
Email string
}
// Namespace represents a Kubernetes namespace.
type Namespace struct {
Name string
}
// Hostname represents the left most dns label of a domain name.
type Hostname struct {
// Name represents the subdomain to expose, e.g. "www"
Name string
// Namespace represents the namespace metadata.name field of backend object
// reference.
Namespace string
// Service represents the Service metadata.name field of backend object
// reference.
Service string
// Port represents the Service port of the backend object reference.
Port int
}
// NameLabel signals the common use case of converting a struct to a list where
// the name field of each value unifies with the field name of the outer struct.
//
// For example:
//
// S: [NameLabel=string]: name: NameLabel
// S: jeff: _
// S: gary: _
// S: nate: _
// L: [for x in S {x}]
// // L is [{name: "jeff"}, {name: "gary"}, {name: "nate"}]
type NameLabel string

View File

@@ -1,4 +0,0 @@
---
description: Simplified abstraction to generate core v1alpha4 build plans.
sidebar_position: 996
---

View File

@@ -1,4 +0,0 @@
---
description: Core v1alpha4 schema for advanced use cases.
sidebar_position: 998
---

View File

@@ -1,53 +0,0 @@
package v1alpha3
import "google.golang.org/protobuf/types/known/structpb"
// InternalLabel is an arbitrary unique identifier internal to holos itself.
// The holos cli is expected to never write a InternalLabel value to rendered
// output files, therefore use a [InternalLabel] when the identifier must be
// unique and internal. Defined as a type for clarity and type checking.
//
// A InternalLabel is useful to convert a CUE struct to a list, for example
// producing a list of [APIObject] resources from an [APIObjectMap]. A CUE
// struct using InternalLabel keys is guaranteed to not lose data when rendering
// output because a InternalLabel is expected to never be written to the final
// output.
type InternalLabel string
// NameLabel is a unique identifier useful to convert a CUE struct to a list
// when the values have a Name field with a default value. This type is
// intended to indicate the common use case of converting a struct to a list
// where the Name field of the value aligns with the struct field name.
type NameLabel string
// Kind is a kubernetes api object kind. Defined as a type for clarity and type
// checking.
type Kind string
// APIObject represents the most basic generic form of a single kubernetes api
// object. Represented as a JSON object internally for compatibility between
// tools, for example loading from CUE.
type APIObject structpb.Struct
// APIObjectMap represents the marshalled yaml representation of kubernetes api
// objects. Do not produce an APIObjectMap directly, instead use [APIObjects]
// to produce the marshalled yaml representation from CUE data, then provide the
// result to [Component].
type APIObjectMap map[Kind]map[InternalLabel]string
// APIObjects represents Kubernetes API objects defined directly from CUE code.
// Useful to mix in resources to any kind of [Component], for example
// adding an ExternalSecret resource to a [HelmChart].
//
// [Kind] must be the resource kind, e.g. Deployment or Service.
//
// [InternalLabel] is an arbitrary internal identifier to uniquely identify the resource
// within the context of a `holos` command. Holos will never write the
// intermediate label to rendered output.
//
// Refer to [Component] which accepts an [APIObjectMap] field provided by
// [APIObjects].
type APIObjects struct {
APIObjects map[Kind]map[InternalLabel]APIObject `json:"apiObjects"`
APIObjectMap APIObjectMap `json:"apiObjectMap"`
}

View File

@@ -1,52 +0,0 @@
package v1alpha3
// FilePath represents a file path.
type FilePath string
// FileContent represents file contents.
type FileContent string
// FileContentMap represents a mapping of file paths to file contents.
type FileContentMap map[FilePath]FileContent
// BuildPlan represents a build plan for the holos cli to execute. The purpose
// of a BuildPlan is to define one or more [Component] kinds. For example a
// [HelmChart], [KustomizeBuild], or [KubernetesObjects].
//
// A BuildPlan usually has an additional empty [KubernetesObjects] for the
// purpose of using the [Component] DeployFiles field to deploy an ArgoCD
// or Flux gitops resource for the holos component.
type BuildPlan struct {
Kind string `json:"kind" cue:"\"BuildPlan\""`
APIVersion string `json:"apiVersion" cue:"string | *\"v1alpha3\""`
Spec BuildPlanSpec `json:"spec"`
}
// BuildPlanSpec represents the specification of the build plan.
type BuildPlanSpec struct {
// Disabled causes the holos cli to take no action over the [BuildPlan].
Disabled bool `json:"disabled,omitempty"`
// Components represents multiple [HolosComponent] kinds to manage.
Components BuildPlanComponents `json:"components,omitempty"`
}
type BuildPlanComponents struct {
Resources map[InternalLabel]KubernetesObjects `json:"resources,omitempty"`
KubernetesObjectsList []KubernetesObjects `json:"kubernetesObjectsList,omitempty"`
HelmChartList []HelmChart `json:"helmChartList,omitempty"`
KustomizeBuildList []KustomizeBuild `json:"kustomizeBuildList,omitempty"`
}
// Kustomize represents resources necessary to execute a kustomize build.
// Intended for at least two use cases:
//
// 1. Process a [KustomizeBuild] [Component] which represents raw yaml
// file resources in a holos component directory.
// 2. Post process a [HelmChart] [Component] to inject istio, patch jobs,
// add custom labels, etc...
type Kustomize struct {
// KustomizeFiles holds file contents for kustomize, e.g. patch files.
KustomizeFiles FileContentMap `json:"kustomizeFiles,omitempty"`
// ResourcesFile is the file name used for api objects in kustomization.yaml
ResourcesFile string `json:"resourcesFile,omitempty"`
}

View File

@@ -1,43 +0,0 @@
package v1alpha3
// Component defines the fields common to all holos component kinds. Every
// holos component kind should embed Component.
type Component struct {
// Kind is a string value representing the resource this object represents.
Kind string `json:"kind"`
// APIVersion represents the versioned schema of this representation of an object.
APIVersion string `json:"apiVersion" cue:"\"v1alpha3\""`
// Metadata represents data about the holos component such as the Name.
Metadata Metadata `json:"metadata"`
// APIObjectMap holds the marshalled representation of api objects. Useful to
// mix in resources to each Component type, for example adding an
// ExternalSecret to a [HelmChart] Component. Refer to [APIObjects].
APIObjectMap APIObjectMap `json:"apiObjectMap,omitempty"`
// DeployFiles represents file paths relative to the cluster deploy directory
// with the value representing the file content. Intended for defining the
// ArgoCD Application resource or Flux Kustomization resource from within CUE,
// but may be used to render any file related to the build plan from CUE.
DeployFiles FileContentMap `json:"deployFiles,omitempty"`
// Kustomize represents a kubectl kustomize build post-processing step.
Kustomize `json:"kustomize,omitempty"`
// Skip causes holos to take no action regarding this component.
Skip bool `json:"skip" cue:"bool | *false"`
}
// Metadata represents data about the object such as the Name.
type Metadata struct {
// Name represents the name of the holos component.
Name string `json:"name"`
// Namespace is the primary namespace of the holos component. A holos
// component may manage resources in multiple namespaces, in this case
// consider setting the component namespace to default.
//
// This field is optional because not all resources require a namespace,
// particularly CRDs and DeployFiles functionality.
// +optional
Namespace string `json:"namespace,omitempty"`
}

View File

@@ -1,11 +0,0 @@
package v1alpha3
const (
APIVersion = "v1alpha3"
BuildPlanKind = "BuildPlan"
HelmChartKind = "HelmChart"
// ChartDir is the directory name created in the holos component directory to cache a chart.
ChartDir = "vendor"
// ResourcesFile is the file name used to store component output when post-processing with kustomize.
ResourcesFile = "resources.yaml"
)

View File

@@ -1,26 +0,0 @@
// Package v1alpha3 contains the core API contract between the holos cli and CUE
// configuration code. Platform designers, operators, and software developers
// use this API to write configuration in CUE which `holos` loads. The overall
// shape of the API defines imperative actions `holos` should carry out to
// render the complete yaml that represents a Platform.
//
// [Platform] defines the complete configuration of a platform. With the holos
// reference platform this takes the shape of one management cluster and at
// least two workload cluster. Each cluster has multiple [Component]
// resources applied to it.
//
// Each holos component path, e.g. `components/namespaces` produces exactly one
// [BuildPlan] which in turn contains a set of [Component] kinds.
//
// The primary kinds of [Component] are:
//
// 1. [HelmChart] to render config from a helm chart.
// 2. [KustomizeBuild] to render config from [Kustomize]
// 3. [KubernetesObjects] to render [APIObjects] defined directly in CUE
// configuration.
//
// Note that Holos operates as a data pipeline, so the output of a [HelmChart]
// may be provided to [Kustomize] for post-processing.
package v1alpha3
//go:generate ../../../hack/gendoc

View File

@@ -1,4 +0,0 @@
---
description: Core v1alpha3 schema for advanced use cases.
sidebar_position: 997
---

View File

@@ -1,38 +0,0 @@
package v1alpha3
// HelmChart represents a holos component which wraps around an upstream helm
// chart. Holos orchestrates helm by providing values obtained from CUE,
// renders the output using `helm template`, then post-processes the helm output
// yaml using the general functionality provided by [Component], for
// example [Kustomize] post-rendering and mixing in additional kubernetes api
// objects.
type HelmChart struct {
Component `json:",inline"`
Kind string `json:"kind" cue:"\"HelmChart\""`
// Chart represents a helm chart to manage.
Chart Chart `json:"chart"`
// ValuesContent represents the values.yaml file holos passes to the `helm
// template` command.
ValuesContent string `json:"valuesContent"`
// EnableHooks enables helm hooks when executing the `helm template` command.
EnableHooks bool `json:"enableHooks" cue:"bool | *false"`
}
// Chart represents a helm chart.
type Chart struct {
// Name represents the chart name.
Name string `json:"name"`
// Version represents the chart version.
Version string `json:"version"`
// Release represents the chart release when executing helm template.
Release string `json:"release"`
// Repository represents the repository to fetch the chart from.
Repository Repository `json:"repository,omitempty"`
}
// Repository represents a helm chart repository.
type Repository struct {
Name string `json:"name"`
URL string `json:"url"`
}

View File

@@ -1,10 +0,0 @@
package v1alpha3
const KubernetesObjectsKind = "KubernetesObjects"
// KubernetesObjects represents a [Component] composed of Kubernetes API
// objects provided directly from CUE using [APIObjects].
type KubernetesObjects struct {
Component `json:",inline"`
Kind string `json:"kind" cue:"\"KubernetesObjects\""`
}

View File

@@ -1,8 +0,0 @@
package v1alpha3
// KustomizeBuild represents a [Component] that renders plain yaml files in
// the holos component directory using `kubectl kustomize build`.
type KustomizeBuild struct {
Component `json:",inline"`
Kind string `json:"kind" cue:"\"KustomizeBuild\""`
}

View File

@@ -1,44 +0,0 @@
package v1alpha3
import "google.golang.org/protobuf/types/known/structpb"
// Platform represents a platform to manage. A Platform resource informs holos
// which components to build. The platform resource also acts as a container
// for the platform model form values provided by the PlatformService. The
// primary use case is to collect the cluster names, cluster types, platform
// model, and holos components to build into one resource.
type Platform struct {
// Kind is a string value representing the resource this object represents.
Kind string `json:"kind" cue:"\"Platform\""`
// APIVersion represents the versioned schema of this representation of an object.
APIVersion string `json:"apiVersion" cue:"string | *\"v1alpha3\""`
// Metadata represents data about the object such as the Name.
Metadata PlatformMetadata `json:"metadata"`
// Spec represents the specification.
Spec PlatformSpec `json:"spec"`
}
type PlatformMetadata struct {
// Name represents the Platform name.
Name string `json:"name"`
}
// PlatformSpec represents the specification of a Platform. Think of a platform
// specification as a list of platform components to apply to a list of
// kubernetes clusters combined with the user-specified Platform Model.
type PlatformSpec struct {
// Model represents the platform model holos gets from from the
// PlatformService.GetPlatform rpc method and provides to CUE using a tag.
Model structpb.Struct `json:"model" cue:"{...}"`
// Components represents a list of holos components to manage.
Components []PlatformSpecComponent `json:"components"`
}
// PlatformSpecComponent represents a holos component to build or render.
type PlatformSpecComponent struct {
// Path is the path of the component relative to the platform root.
Path string `json:"path"`
// Cluster is the cluster name to provide when rendering the component.
Cluster string `json:"cluster"`
}

View File

@@ -1,4 +0,0 @@
---
description: Core schema for holos to render a component BuildPlan.
sidebar_position: 100
---

View File

@@ -1,402 +0,0 @@
// # Core API
//
// Package v1alpha4 contains the core API contract between the holos cli and CUE
// configuration code. Platform designers, operators, and software developers
// use this API to write configuration in CUE which holos loads. The Core API
// is declarative. Each resource represents a desired state necessary for holos
// to fully render Kubernetes manifests into plain files.
//
// The following resources provide important context for the Core API. The
// [Author API] is intended for component authors as a convenient adapter for
// the Core API resources Holos expects.
//
// 1. [Technical Overview]
// 2. [Quickstart]
// 3. [Author API]
//
// # Platform
//
// [Platform] defines the complete configuration of a platform. A platform
// represents a [Component] collection.
//
// Inspect a Platform resource holos would process by executing:
//
// cue export --out yaml ./platform
//
// # Component
//
// A [Component] is the combination of CUE code along one path relative to the
// platform root directory plus data injected from the [PlatformSpec] via CUE tags.
// The platform configuration root is the directory containing cue.mod.
//
// A [Component] always produces exactly one [BuildPlan].
//
// # BuildPlan
//
// A [BuildPlan] contains an [Artifact] collection. A BuildPlan often produces
// two artifacts, one containing the fully rendered Kubernetes API resources,
// the other containing an additional resource to manage the former with GitOps.
// For example, a BuildPlan for a podinfo component produces a manifest
// containing a Deployment and a Service, along with a second manifest
// containing an ArgoCD Application.
//
// Inspect a BuildPlan resource holos render component would process by executing:
//
// cue export --out yaml ./projects/platform/components/namespaces
//
// # Artifact
//
// An [Artifact] is one fully rendered manifest file produced from the final
// [Transformer] in a sequence of transformers. An Artifact may also be
// produced directly from a [Generator], but this use case is uncommon.
//
// # Transformer
//
// A [Transformer] takes multiple inputs from prior [Generator] or [Transformer]
// outputs, then transforms the data into one output. [Kustomize] is the most
// commonly used transformer, though a simple [Join] is also supported.
//
// 1. [Kustomize] - Patch and transform the output from prior generators or
// transformers. See [Introduction to Kustomize].
// 2. [Join] - Concatenate multiple prior outputs into one output.
//
// # Generators
//
// A [Generator] generates Kubernetes resources. [Helm] and [Resources] are the
// most commonly used, often paired together to mix-in resources to an
// unmodified Helm chart. A simple [File] generator is also available for use
// with the [Kustomize] transformer.
//
// 1. [Resources] - Generates resources from CUE code.
// 2. [Helm] - Generates rendered yaml from a [Chart].
// 3. [File] - Generates data by reading a file from the component directory.
//
// [Introduction to Kustomize]: https://kubectl.docs.kubernetes.io/guides/config_management/introduction/
// [Author API]: https://holos.run/docs/api/author/
// [Quickstart]: https://holos.run/docs/quickstart/
// [Technical Overview]: https://holos.run/docs/technical-overview/
package v1alpha4
//go:generate ../../../hack/gendoc
// BuildPlan represents a build plan for holos to execute. Each [Platform]
// component produces exactly one BuildPlan.
//
// One or more [Artifact] files are produced by a BuildPlan, representing the
// fully rendered manifests for the Kubernetes API Server.
//
// # Example BuildPlan
//
// Command:
//
// cue export --out yaml ./projects/platform/components/namespaces
//
// Output:
//
// kind: BuildPlan
// apiVersion: v1alpha4
// metadata:
// name: dev-namespaces
// spec:
// component: projects/platform/components/namespaces
// artifacts:
// - artifact: clusters/no-cluster/components/dev-namespaces/dev-namespaces.gen.yaml
// generators:
// - kind: Resources
// output: resources.gen.yaml
// resources:
// Namespace:
// dev-jeff:
// metadata:
// name: dev-jeff
// labels:
// kubernetes.io/metadata.name: dev-jeff
// kind: Namespace
// apiVersion: v1
// transformers:
// - kind: Kustomize
// inputs:
// - resources.gen.yaml
// output: clusters/no-cluster/components/dev-namespaces/dev-namespaces.gen.yaml
// kustomize:
// kustomization:
// commonLabels:
// holos.run/component.name: dev-namespaces
// resources:
// - resources.gen.yaml
type BuildPlan struct {
// Kind represents the type of the resource.
Kind string `json:"kind" cue:"\"BuildPlan\""`
// APIVersion represents the versioned schema of the resource.
APIVersion string `json:"apiVersion" cue:"string | *\"v1alpha4\""`
// Metadata represents data about the resource such as the Name.
Metadata Metadata `json:"metadata"`
// Spec specifies the desired state of the resource.
Spec BuildPlanSpec `json:"spec"`
}
// BuildPlanSpec represents the specification of the [BuildPlan].
type BuildPlanSpec struct {
// Component represents the component that produced the build plan.
// Represented as a path relative to the platform root.
Component string `json:"component"`
// Disabled causes the holos cli to disregard the build plan.
Disabled bool `json:"disabled,omitempty"`
// Artifacts represents the artifacts for holos to build.
Artifacts []Artifact `json:"artifacts"`
}
// Artifact represents one fully rendered manifest produced by a [Transformer]
// sequence, which transforms a [Generator] collection. A [BuildPlan] produces
// an [Artifact] collection.
//
// Each Artifact produces one manifest file artifact. Generator Output values
// are used as Transformer Inputs. The Output field of the final [Transformer]
// should have the same value as the Artifact field.
//
// When there is more than one [Generator] there must be at least one
// [Transformer] to combine outputs into one Artifact. If there is a single
// Generator, it may directly produce the Artifact output.
//
// An Artifact is processed concurrently with other artifacts in the same
// [BuildPlan]. An Artifact should not use an output from another Artifact as
// an input. Each [Generator] may also run concurrently. Each [Transformer] is
// executed sequentially starting after all generators have completed.
//
// Output fields are write-once. It is an error for multiple Generators or
// Transformers to produce the same Output value within the context of a
// [BuildPlan].
type Artifact struct {
Artifact FilePath `json:"artifact,omitempty"`
Generators []Generator `json:"generators,omitempty"`
Transformers []Transformer `json:"transformers,omitempty"`
Skip bool `json:"skip,omitempty"`
}
// Generator generates an intermediate manifest for a [Artifact].
//
// Each Generator in a [Artifact] must have a distinct Output value for a
// [Transformer] to reference.
//
// Refer to [Resources], [Helm], and [File].
type Generator struct {
// Kind represents the kind of generator. Must be Resources, Helm, or File.
Kind string `json:"kind" cue:"\"Resources\" | \"Helm\" | \"File\""`
// Output represents a file for a Transformer or Artifact to consume.
Output FilePath `json:"output"`
// Resources generator. Ignored unless kind is Resources. Resources are
// stored as a two level struct. The top level key is the Kind of resource,
// e.g. Namespace or Deployment. The second level key is an arbitrary
// InternalLabel. The third level is a map[string]any representing the
// Resource.
Resources Resources `json:"resources,omitempty"`
// Helm generator. Ignored unless kind is Helm.
Helm Helm `json:"helm,omitempty"`
// File generator. Ignored unless kind is File.
File File `json:"file,omitempty"`
}
// Resource represents one kubernetes api object.
type Resource map[string]any
// Resources represents a kubernetes resources [Generator] from CUE.
type Resources map[Kind]map[InternalLabel]Resource
// File represents a simple single file copy [Generator]. Useful with a
// [Kustomize] [Transformer] to process plain manifest files stored in the
// component directory. Multiple File generators may be used to transform
// multiple resources.
type File struct {
// Source represents a file sub-path relative to the component path.
Source FilePath `json:"source"`
}
// Helm represents a [Chart] manifest [Generator].
type Helm struct {
// Chart represents a helm chart to manage.
Chart Chart `json:"chart"`
// Values represents values for holos to marshal into values.yaml when
// rendering the chart.
Values Values `json:"values"`
// EnableHooks enables helm hooks when executing the `helm template` command.
EnableHooks bool `json:"enableHooks,omitempty"`
// Namespace represents the helm namespace flag
Namespace string `json:"namespace,omitempty"`
}
// Values represents [Helm] Chart values generated from CUE.
type Values map[string]any
// Chart represents a [Helm] Chart.
type Chart struct {
// Name represents the chart name.
Name string `json:"name"`
// Version represents the chart version.
Version string `json:"version"`
// Release represents the chart release when executing helm template.
Release string `json:"release"`
// Repository represents the repository to fetch the chart from.
Repository Repository `json:"repository,omitempty"`
}
// Repository represents a [Helm] [Chart] repository.
type Repository struct {
Name string `json:"name"`
URL string `json:"url"`
}
// Transformer transforms [Generator] manifests within a [Artifact].
type Transformer struct {
// Kind represents the kind of transformer. Must be Kustomize, or Join.
Kind string `json:"kind" cue:"\"Kustomize\" | \"Join\""`
// Inputs represents the files to transform. The Output of prior Generators
// and Transformers.
Inputs []FilePath `json:"inputs"`
// Output represents a file for a subsequent Transformer or Artifact to
// consume.
Output FilePath `json:"output"`
// Kustomize transformer. Ignored unless kind is Kustomize.
Kustomize Kustomize `json:"kustomize,omitempty"`
// Join transformer. Ignored unless kind is Join.
Join Join `json:"join,omitempty"`
}
// Join represents a [Transformer] using [bytes.Join] to concatenate multiple
// inputs into one output with a separator. Useful for combining output from
// [Helm] and [Resources] together into one [Artifact] when [Kustomize] is
// otherwise unnecessary.
//
// [bytes.Join]: https://pkg.go.dev/bytes#Join
type Join struct {
Separator string `json:"separator" cue:"string | *\"---\\n\""`
}
// Kustomize represents a kustomization [Transformer].
type Kustomize struct {
// Kustomization represents the decoded kustomization.yaml file
Kustomization Kustomization `json:"kustomization"`
// Files holds file contents for kustomize, e.g. patch files.
Files FileContentMap `json:"files,omitempty"`
}
// Kustomization represents a kustomization.yaml file for use with the
// [Kustomize] [Transformer]. Untyped to avoid tightly coupling holos to
// kubectl versions which was a problem for the Flux maintainers. Type checking
// is expected to happen in CUE against the kubectl version the user prefers.
type Kustomization map[string]any
// FileContent represents file contents.
type FileContent string
// FileContentMap represents a mapping of file paths to file contents.
type FileContentMap map[FilePath]FileContent
// FilePath represents a file path.
type FilePath string
// InternalLabel is an arbitrary unique identifier internal to holos itself.
// The holos cli is expected to never write a InternalLabel value to rendered
// output files, therefore use a InternalLabel when the identifier must be
// unique and internal. Defined as a type for clarity and type checking.
type InternalLabel string
// Kind is a discriminator. Defined as a type for clarity and type checking.
type Kind string
// NameLabel is a unique identifier useful to convert a CUE struct to a list
// when the values have a Name field with a default value. NameLabel indicates
// the common use case of converting a struct to a list where the Name field of
// the value aligns with the outer struct field name.
//
// For example:
//
// Outer: [NAME=_]: Name: NAME
type NameLabel string
// Platform represents a platform to manage. A Platform resource informs holos
// which components to build. The platform resource also acts as a container
// for the platform model form values provided by the PlatformService. The
// primary use case is to collect the cluster names, cluster types, platform
// model, and holos components to build into one resource.
type Platform struct {
// Kind is a string value representing the resource.
Kind string `json:"kind" cue:"\"Platform\""`
// APIVersion represents the versioned schema of this resource.
APIVersion string `json:"apiVersion" cue:"string | *\"v1alpha4\""`
// Metadata represents data about the resource such as the Name.
Metadata Metadata `json:"metadata"`
// Spec represents the specification.
Spec PlatformSpec `json:"spec"`
}
// Metadata represents data about the resource such as the Name.
type Metadata struct {
// Name represents the resource name.
Name string `json:"name"`
}
// PlatformSpec represents the specification of a [Platform]. Think of a
// platform spec as a [Component] collection for multiple kubernetes clusters
// combined with the user-specified Platform Model.
type PlatformSpec struct {
// Components represents a list of holos components to manage.
Components []Component `json:"components"`
}
// Component represents the complete context necessary to produce a [BuildPlan].
// Component carries information injected from holos render platform to holos
// render component to produce each [BuildPlan].
//
// All of these fields are passed to the holos render component command using
// flags, which in turn are injected to CUE using tags. For clarity, CUE field
// and tag names should match the struct json tag names below.
type Component struct {
// Name represents the name of the component. Injected as the tag variable
// "holos_name" to set the BuildPlan metadata.name field. Necessary for clear
// user feedback during platform rendering.
Name string `json:"name"`
// Component represents the path of the component relative to the platform
// root. Injected as the tag variable "holos_component".
Component string `json:"component"`
// Cluster is the cluster name to provide when rendering the component.
// Injected as the tag variable "holos_cluster".
Cluster string `json:"cluster"`
// Model represents the platform model holos gets from from the
// PlatformService.GetPlatform rpc method and provides to CUE using a tag.
// Injected as the tag "holos_model".
Model map[string]any `json:"model,omitempty"`
// Tags represents cue @tag variables injected into the holos render component
// command from the holos render platform command. Tags with a "holos_"
// prefix are reserved for use by the Holos Authors.
Tags map[string]string `json:"tags,omitempty"`
// WriteTo represents the holos render component --write-to flag. If empty,
// the default value for the --write-to flag is used.
WriteTo string `json:"writeTo,omitempty"`
}
// Tags represents standardized fields injected into the component [BuildPlan]
// from the [Platform].
//
// Note, tags should have a reasonable default value to easily use cue eval and
// cue export without needing to make a bunch of decisions about tag values.
//
// Example:
//
// import core "github.com/holos-run/holos/api/core/v1alpha4"
// _Tags: core.#Tags & {
// cluster: _ @tag(cluster, type=string)
// environment: _ @tag(environment, type=string)
// component: _ @tag(component, type=string)
// name: _ @tag(name, type=string)
// }
type Tags struct {
// Name represents the BuildPlan metadata.name field injected from the Platform.
Name string `json:"name" cue:"string | *\"no-name\""`
// Cluster represents the cluster name injected from
Cluster string `json:"cluster" cue:"string | *\"no-cluster\""`
// Environment represents the build plan environment.
Environment string `json:"environment" cue:"string | *\"no-environment\""`
// Component represents the path of the component relative to the platform root.
Component string `json:"component" cue:"string | *\"no-component\""`
}

View File

@@ -1,7 +1,6 @@
package v1alpha1
import (
"errors"
"fmt"
"strings"
)
@@ -39,7 +38,7 @@ func (bp *BuildPlan) Validate() error {
errs = append(errs, fmt.Sprintf("apiVersion invalid: want: %s have: %s", APIVersion, bp.APIVersion))
}
if len(errs) > 0 {
return errors.New("invalid BuildPlan: " + strings.Join(errs, ", "))
return fmt.Errorf("invalid BuildPlan: " + strings.Join(errs, ", "))
}
return nil
}

View File

@@ -2,10 +2,8 @@ package main
import (
"os"
"path/filepath"
"testing"
cue "cuelang.org/go/cmd/cue/cmd"
"github.com/holos-run/holos/internal/cli"
"github.com/rogpeppe/go-internal/testscript"
)
@@ -13,29 +11,11 @@ import (
func TestMain(m *testing.M) {
os.Exit(testscript.RunMain(m, map[string]func() int{
"holos": cli.MakeMain(),
"cue": cue.Main,
}))
}
func TestGuides(t *testing.T) {
testscript.Run(t, params(filepath.Join("v1alpha4", "guides")))
}
func TestCLI(t *testing.T) {
testscript.Run(t, params("cli"))
}
func params(dir string) testscript.Params {
return testscript.Params{
Dir: filepath.Join("tests", dir),
RequireExplicitExec: true,
RequireUniqueNames: true,
Setup: func(env *testscript.Env) error {
// Just like cmd/cue/cmd.TestScript, set up separate cache and config dirs per test.
env.Setenv("CUE_CACHE_DIR", filepath.Join(env.WorkDir, "tmp/cachedir"))
configDir := filepath.Join(env.WorkDir, "tmp/configdir")
env.Setenv("CUE_CONFIG_DIR", configDir)
return nil
},
}
func TestGetSecrets(t *testing.T) {
testscript.Run(t, testscript.Params{
Dir: "testdata",
})
}

34
cmd/holos/testdata/constraints.txt vendored Normal file
View File

@@ -0,0 +1,34 @@
# Want support for intermediary constraints
exec holos build ./foo/... --log-level debug
stdout '^bf2bc7f9-9ba0-4f9e-9bd2-9a205627eb0b$'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- foo/constraints.cue --
package holos
metadata: name: "jeff"
-- foo/bar/bar.cue --
package holos
spec: components: KubernetesObjectsList: [
#KubernetesObjects & {
apiObjectMap: foo: bar: "bf2bc7f9-9ba0-4f9e-9bd2-9a205627eb0b"
}
]
-- schema.cue --
package holos
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
#KubernetesObjects: {
apiVersion: "holos.run/v1alpha1"
kind: "KubernetesObjects"
apiObjectMap: {...}
}
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"

View File

@@ -0,0 +1,20 @@
# Want cue errors to show files and lines
! exec holos build .
stderr 'apiObjectMap.foo.bar: cannot convert incomplete value'
stderr '/component.cue:\d+:\d+$'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- component.cue --
package holos
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"
spec: components: KubernetesObjectsList: [{apiObjectMap: foo: bar: _baz}]
_baz: string

View File

@@ -0,0 +1,61 @@
# Want kube api objects in the apiObjects output.
exec holos build .
stdout '^kind: SecretStore$'
stdout '# Source: CUE apiObjects.SecretStore.default'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- component.cue --
package holos
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"
spec: components: KubernetesObjectsList: [{apiObjectMap: #APIObjects.apiObjectMap}]
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
#SecretStore: {
kind: string
metadata: name: string
}
#APIObjects: {
apiObjects: {
SecretStore: {
default: #SecretStore & { metadata: name: "default" }
}
}
}
-- schema.cue --
package holos
// #APIObjects is the output type for api objects produced by cue. A map is used to aid debugging and clarity.
import "encoding/yaml"
#APIObjects: {
// apiObjects holds each the api objects produced by cue.
apiObjects: {
[Kind=_]: {
[Name=_]: {
kind: Kind
metadata: name: Name
}
}
}
// apiObjectsContent holds the marshalled representation of apiObjects
apiObjectMap: {
for kind, v in apiObjects {
"\(kind)": {
for name, obj in v {
"\(name)": yaml.Marshal(obj)
}
}
}
}
}

View File

@@ -0,0 +1,62 @@
# Want kube api objects in the apiObjects output.
exec holos build .
stdout '^kind: SecretStore$'
stdout '# Source: CUE apiObjects.SecretStore.default'
stderr 'skipping helm: no chart name specified'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- component.cue --
package holos
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"
spec: components: HelmChartList: [{apiObjectMap: #APIObjects.apiObjectMap}]
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
#SecretStore: {
kind: string
metadata: name: string
}
#APIObjects: {
apiObjects: {
SecretStore: {
default: #SecretStore & { metadata: name: "default" }
}
}
}
-- schema.cue --
package holos
// #APIObjects is the output type for api objects produced by cue. A map is used to aid debugging and clarity.
import "encoding/yaml"
#APIObjects: {
// apiObjects holds each the api objects produced by cue.
apiObjects: {
[Kind=_]: {
[Name=_]: {
kind: Kind
metadata: name: Name
}
}
}
// apiObjectsContent holds the marshalled representation of apiObjects
apiObjectMap: {
for kind, v in apiObjects {
"\(kind)": {
for name, obj in v {
"\(name)": yaml.Marshal(obj)
}
}
}
}
}

View File

@@ -0,0 +1,25 @@
# Want api object kind and name in errors
! exec holos build .
stderr 'apiObjects.secretstore.default.foo: field not allowed'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- component.cue --
package holos
apiVersion: "holos.run/v1alpha1"
kind: "KubernetesObjects"
cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
#SecretStore: {
metadata: name: string
}
apiObjects: {
secretstore: {
default: #SecretStore & { foo: "not allowed" }
}
}

View File

@@ -0,0 +1,289 @@
# Want helm errors to show up
! exec holos build .
stderr 'Error: execution error at \(zitadel/templates/secret_zitadel-masterkey.yaml:2:4\): Either set .Values.zitadel.masterkey xor .Values.zitadel.masterkeySecretName'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- zitadel.cue --
package holos
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"
spec: components: HelmChartList: [_HelmChart]
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
_HelmChart: {
apiVersion: "holos.run/v1alpha1"
kind: "HelmChart"
metadata: name: "zitadel"
namespace: "zitadel"
chart: {
name: "zitadel"
version: "7.9.0"
release: name
repository: {
name: "zitadel"
url: "https://charts.zitadel.com"
}
}
}
-- vendor/zitadel/templates/secret_zitadel-masterkey.yaml --
{{- if (or (and .Values.zitadel.masterkey .Values.zitadel.masterkeySecretName) (and (not .Values.zitadel.masterkey) (not .Values.zitadel.masterkeySecretName)) ) }}
{{- fail "Either set .Values.zitadel.masterkey xor .Values.zitadel.masterkeySecretName" }}
{{- end }}
{{- if .Values.zitadel.masterkey -}}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: zitadel-masterkey
{{- with .Values.zitadel.masterkeyAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "zitadel.labels" . | nindent 4 }}
stringData:
masterkey: {{ .Values.zitadel.masterkey }}
{{- end -}}
-- vendor/zitadel/Chart.yaml --
apiVersion: v2
appVersion: v2.46.0
description: A Helm chart for ZITADEL
icon: https://zitadel.com/zitadel-logo-dark.svg
kubeVersion: '>= 1.21.0-0'
maintainers:
- email: support@zitadel.com
name: zitadel
url: https://zitadel.com
name: zitadel
type: application
version: 7.9.0
-- vendor/zitadel/values.yaml --
# Default values for zitadel.
zitadel:
# The ZITADEL config under configmapConfig is written to a Kubernetes ConfigMap
# See all defaults here:
# https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
configmapConfig:
ExternalSecure: true
Machine:
Identification:
Hostname:
Enabled: true
Webhook:
Enabled: false
# The ZITADEL config under secretConfig is written to a Kubernetes Secret
# See all defaults here:
# https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml
secretConfig:
# Annotations set on secretConfig secret
secretConfigAnnotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
# Reference the name of a secret that contains ZITADEL configuration.
configSecretName:
# The key under which the ZITADEL configuration is located in the secret.
configSecretKey: config-yaml
# ZITADEL uses the masterkey for symmetric encryption.
# You can generate it for example with tr -dc A-Za-z0-9 </dev/urandom | head -c 32
masterkey: ""
# Reference the name of the secret that contains the masterkey. The key should be named "masterkey".
# Note: Either zitadel.masterkey or zitadel.masterkeySecretName must be set
masterkeySecretName: ""
# Annotations set on masterkey secret
masterkeyAnnotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
# The CA Certificate needed for establishing secure database connections
dbSslCaCrt: ""
# The Secret containing the CA certificate at key ca.crt needed for establishing secure database connections
dbSslCaCrtSecret: ""
# The db admins secret containing the client certificate and key at tls.crt and tls.key needed for establishing secure database connections
dbSslAdminCrtSecret: ""
# The db users secret containing the client certificate and key at tls.crt and tls.key needed for establishing secure database connections
dbSslUserCrtSecret: ""
# Generate a self-signed certificate using an init container
# This will also mount the generated files to /etc/tls/ so that you can reference them in the pod.
# E.G. KeyPath: /etc/tls/tls.key CertPath: /etc/tls/tls.crt
# By default, the SAN DNS names include, localhost, the POD IP address and the POD name. You may include one more by using additionalDnsName like "my.zitadel.fqdn".
selfSignedCert:
enabled: false
additionalDnsName:
replicaCount: 3
image:
repository: ghcr.io/zitadel/zitadel
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
chownImage:
repository: alpine
pullPolicy: IfNotPresent
tag: "3.19"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# Annotations to add to the deployment
annotations: {}
# Annotations to add to the configMap
configMap:
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "0"
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podAdditionalLabels: {}
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
securityContext: {}
# Additional environment variables
env:
[]
# - name: ZITADEL_DATABASE_POSTGRES_HOST
# valueFrom:
# secretKeyRef:
# name: postgres-pguser-postgres
# key: host
service:
type: ClusterIP
# If service type is "ClusterIP", this can optionally be set to a fixed IP address.
clusterIP: ""
port: 8080
protocol: http2
annotations: {}
scheme: HTTP
ingress:
enabled: false
className: ""
annotations: {}
hosts:
- host: localhost
paths:
- path: /
pathType: Prefix
tls: []
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
topologySpreadConstraints: []
initJob:
# Once ZITADEL is installed, the initJob can be disabled.
enabled: true
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "1"
resources: {}
backoffLimit: 5
activeDeadlineSeconds: 300
extraContainers: []
podAnnotations: {}
# Available init commands :
# "": initialize ZITADEL instance (without skip anything)
# database: initialize only the database
# grant: set ALL grant to user
# user: initialize only the database user
# zitadel: initialize ZITADEL internals (skip "create user" and "create database")
command: ""
setupJob:
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation
helm.sh/hook-weight: "2"
resources: {}
activeDeadlineSeconds: 300
extraContainers: []
podAnnotations: {}
additionalArgs:
- "--init-projections=true"
machinekeyWriter:
image:
repository: bitnami/kubectl
tag: ""
resources: {}
readinessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 3
livenessProbe:
enabled: true
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 3
startupProbe:
enabled: true
periodSeconds: 1
failureThreshold: 30
metrics:
enabled: false
serviceMonitor:
# If true, the chart creates a ServiceMonitor that is compatible with Prometheus Operator
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.ServiceMonitor.
# The Prometheus community Helm chart installs this operator
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#kube-prometheus-stack
enabled: false
honorLabels: false
honorTimestamps: true
pdb:
enabled: false
# these values are used for the PDB and are mutally exclusive
minAvailable: 1
# maxUnavailable: 1
annotations: {}

View File

@@ -0,0 +1,39 @@
# Kustomize is a supported holos component kind
exec holos render component --cluster-name=mycluster . --log-level=debug
# Want generated output
cmp want.yaml deploy/clusters/mycluster/components/kstest/kstest.gen.yaml
-- platform.config.json --
{}
-- cue.mod --
package holos
-- component.cue --
package holos
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"
spec: components: KustomizeBuildList: [{metadata: name: "kstest"}]
-- kustomization.yaml --
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: mynamespace
resources:
- serviceaccount.yaml
-- serviceaccount.yaml --
apiVersion: v1
kind: ServiceAccount
metadata:
name: test
-- want.yaml --
apiVersion: v1
kind: ServiceAccount
metadata:
name: test
namespace: mynamespace

View File

@@ -0,0 +1,17 @@
# https://github.com/holos-run/holos/issues/72
# Want holos to fail on unknown fields to catch typos and aid refactors
! exec holos build .
stderr 'unknown field \\"TypoKubernetesObjectsList\\"'
-- platform.config.json --
{}
-- cue.mod --
package holos
-- component.cue --
package holos
_cluster: string @tag(cluster, string)
_platform_config: string @tag(platform_config, string)
apiVersion: "holos.run/v1alpha1"
kind: "BuildPlan"
spec: components: TypoKubernetesObjectsList: []

File diff suppressed because it is too large Load Diff

1
cue.mod/module.cue Normal file
View File

@@ -0,0 +1 @@
module: "github.com/holos-run/holos"

View File

@@ -1,3 +0,0 @@
# Backstory
Holos is a tool intended to lighten the burden of managing Kubernetes resources. In 2020 we set out to develop a holistic platform composed from open source cloud native components. We quickly became frustrated with how each of the major components packaged and distributed their software in a different way. Many projects choose to distribute their software with Helm charts, while others provide plain yaml files and Kustomize bases. The popular Kube Prometheus Stack project provides Jsonnet to render and update Kubernetes yaml manifests.

View File

@@ -1,29 +0,0 @@
import DocCardList from '@theme/DocCardList';
# Guides
## Technical Overview
Please see the [Technical Overview] to learn about Holos. If you're ready to
drive in and try Holos, please work through the following guides.
## Bank of Holos
The guides are organized as a progression. We'll use Holos to manage a
fictional bank's platform, the Bank of Holos in each of the guides. In doing so
we'll take the time to explain the foundational concepts of Holos.
1. [Quickstart] covers the foundational concepts of Holos.
2. [Deploy a Service] explains how to deploy a containerized service using an
existing Helm chart. This guide then explains how to deploy a similar service
safely and consistently with CUE instead of Helm.
3. [Change a Service] covers the day two task of making configuration changes to
deployed services safely and consistently.
---
<DocCardList />
[Quickstart]: /docs/quickstart/
[Deploy a Service]: /docs/guides/deploy-a-service/
[Change a Service]: /docs/guides/change-a-service/
[Technical Overview]: /docs/technical-overview/

View File

@@ -1,735 +0,0 @@
---
description: Change a service on your platform.
slug: /guides/change-a-service
sidebar_position: 300
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
# Change a Service
In this guide, we'll explore how Holos supports the frontend development team at [Bank of Holos] in reconfiguring an already deployed service. Along the way, we'll demonstrate how simple configuration changes are made safer with type checking, and how rendering the complete platform provides clear visibility into those changes.
This guide builds on the concepts covered in the [Quickstart] and [Deploy a Service] guides.
## What you'll need {#requirements}
Like our other guides, this guide is intended to be useful without needing to
run each command. If you'd like to apply the manifests to a real Cluster,
complete the [Local Cluster Guide](/docs/guides/local-cluster) before this
guide.
You'll need the following tools installed to run the commands in this guide.
1. [holos](/docs/install) - to build the Platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Holos Components that
wrap Helm charts.
3. [kubectl](https://kubernetes.io/docs/tasks/tools/) - to render Holos
Components that render with Kustomize.
## Fork the Git Repository
If you haven't already done so, [fork the Bank of
Holos](https://github.com/holos-run/bank-of-holos/fork) then clone the
repository to your local machine.
<Tabs groupId="git-clone">
<TabItem value="command" label="Command">
```bash
# Change YourName
git clone https://github.com/YourName/bank-of-holos
cd bank-of-holos
```
</TabItem>
<TabItem value="output" label="Output">
```txt
Cloning into 'bank-of-holos'...
remote: Enumerating objects: 1177, done.
remote: Counting objects: 100% (1177/1177), done.
remote: Compressing objects: 100% (558/558), done.
remote: Total 1177 (delta 394), reused 1084 (delta 303), pack-reused 0 (from 0)
Receiving objects: 100% (1177/1177), 2.89 MiB | 6.07 MiB/s, done.
Resolving deltas: 100% (394/394), done.
```
</TabItem>
</Tabs>
Run the rest of the commands in this guide from the root of the repository.
If you plan to apply the changes we make, you can delete and re-create your
local platform synced to the start of this guide.
```bash
./scripts/reset-cluster
./scripts/apply
```
## Rename the Bank
Let's imagine the bank recently re-branded from The Bank of Holos to The
Holistic Bank. The software development team responsible for the front end
website needs to update the branding accordingly.
Let's explore how Holos catches errors early, before they land in production,
then guides the team to the best place to make a change.
The bank front end web service is managed by the
`projects/bank-of-holos/frontend/components/bank-frontend/` component which
refers to the organization display name in `schema.gen.cue`.
<Tabs groupId="F5B546EB-566F-4B83-84C3-C55B40F55555">
<TabItem value="schema.cue" label="schema.cue">
```cue showLineNumbers
package holos
import api "github.com/holos-run/holos/api/author/v1alpha4"
// Define the default organization name.
_Organization: api.#OrganizationStrict & {
DisplayName: string | *"Bank of Holos"
Name: string | *"bank-of-holos"
Domain: string | *"holos.localhost"
}
// Projects represents a way to organize components into projects with owners.
// https://holos.run/docs/api/author/v1alpha4/#Projects
_Projects: api.#Projects
// ArgoConfig represents the configuration of ArgoCD Application resources for
// each component.
// https://holos.run/docs/api/author/v1alpha4/#ArgoConfig
_ArgoConfig: api.#ArgoConfig
#ComponentConfig: api.#ComponentConfig & {
Name: _Tags.name
Component: _Tags.component
Cluster: _Tags.cluster
ArgoConfig: _ArgoConfig & {
if _Tags.project != "no-project" {
AppProject: _Tags.project
}
}
Resources: #Resources
// Mix in project labels if the project is defined by the platform.
if _Tags.project != "no-project" {
CommonLabels: _Projects[_Tags.project].CommonLabels
}
}
// https://holos.run/docs/api/author/v1alpha4/#Kubernetes
#Kubernetes: close({
#ComponentConfig
api.#Kubernetes
})
// https://holos.run/docs/api/author/v1alpha4/#Kustomize
#Kustomize: close({
#ComponentConfig
api.#Kustomize
})
// https://holos.run/docs/api/author/v1alpha4/#Helm
#Helm: close({
#ComponentConfig
api.#Helm
})
```
</TabItem>
<TabItem value="projects/bank-of-holos/frontend/components/bank-frontend/bank-frontend.cue" label="projects/bank-of-holos/frontend/components/bank-frontend/bank-frontend.cue">
```cue showLineNumbers
package holos
// Produce a kubernetes objects build plan.
(#Kubernetes & Objects).BuildPlan
let Objects = {
Name: "bank-frontend"
Namespace: _BankOfHolos.Frontend.Namespace
// Ensure resources go in the correct namespace
Resources: [_]: [_]: metadata: namespace: Namespace
// https://github.com/GoogleCloudPlatform/bank-of-anthos/blob/release/v0.6.5/kubernetes-manifests/frontend.yaml
Resources: {
Service: frontend: {
metadata: name: "frontend"
metadata: labels: {
application: "bank-of-holos"
environment: "development"
team: "frontend"
tier: "web"
}
spec: {
selector: {
app: "frontend"
application: "bank-of-holos"
environment: "development"
team: "frontend"
tier: "web"
}
_ports: http: {
name: "http"
port: 80
targetPort: 8080
protocol: "TCP"
}
ports: [for x in _ports {x}]
}
}
Deployment: frontend: {
metadata: name: "frontend"
metadata: labels: {
application: "bank-of-holos"
environment: "development"
team: "frontend"
tier: "web"
}
spec: {
selector: matchLabels: {
app: "frontend"
application: "bank-of-holos"
environment: "development"
team: "frontend"
tier: "web"
}
template: {
metadata: labels: {
app: "frontend"
application: "bank-of-holos"
environment: "development"
team: "frontend"
tier: "web"
}
spec: {
securityContext: {
seccompProfile: type: "RuntimeDefault"
fsGroup: 1000
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
}
serviceAccountName: "bank-of-holos"
terminationGracePeriodSeconds: 5
containers: [{
env: [{
name: "BANK_NAME"
value: _Organization.DisplayName
}, {
name: "ENV_PLATFORM"
value: "local"
}, {
name: "VERSION"
value: "v0.6.5"
}, {
name: "PORT"
value: "8080"
}, {
name: "ENABLE_TRACING"
value: "false"
}, {
name: "SCHEME"
value: "https"
}, {
name: "LOG_LEVEL"
value: "info"
}, {
name: "DEFAULT_USERNAME"
valueFrom: configMapKeyRef: {
key: "DEMO_LOGIN_USERNAME"
name: "demo-data-config"
}
}, {
name: "DEFAULT_PASSWORD"
valueFrom: configMapKeyRef: {
key: "DEMO_LOGIN_PASSWORD"
name: "demo-data-config"
}
}, {
name: "REGISTERED_OAUTH_CLIENT_ID"
valueFrom: configMapKeyRef: {
key: "DEMO_OAUTH_CLIENT_ID"
name: "oauth-config"
optional: true
}
}, {
name: "ALLOWED_OAUTH_REDIRECT_URI"
valueFrom: configMapKeyRef: {
key: "DEMO_OAUTH_REDIRECT_URI"
name: "oauth-config"
optional: true
}
}]
envFrom: [{
configMapRef: name: "environment-config"
}, {
configMapRef: name: "service-api-config"
}]
image: "us-central1-docker.pkg.dev/bank-of-anthos-ci/bank-of-anthos/frontend:v0.6.5@sha256:d72050f70d12383e4434ad04d189b681dc625f696087ddf0b5df641645c9dafa"
livenessProbe: {
httpGet: {
path: "/ready"
port: 8080
}
initialDelaySeconds: 60
periodSeconds: 15
timeoutSeconds: 30
}
name: "front"
readinessProbe: {
httpGet: {
path: "/ready"
port: 8080
}
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 10
}
resources: {
limits: {
cpu: "250m"
memory: "128Mi"
}
requests: {
cpu: "100m"
memory: "64Mi"
}
}
securityContext: {
allowPrivilegeEscalation: false
capabilities: drop: ["all"]
privileged: false
readOnlyRootFilesystem: true
}
volumeMounts: [{
mountPath: "/tmp"
name: "tmp"
}, {
mountPath: "/tmp/.ssh"
name: "publickey"
readOnly: true
}]
}]
volumes: [
{
emptyDir: {}
name: "tmp"
},
{
name: "publickey"
secret: {
items: [{key: "jwtRS256.key.pub", path: "publickey"}]
secretName: "jwt-key"
}
},
]
}
}
}
}
// Allow HTTPRoutes in the ingress gateway namespace to reference Services
// in this namespace.
ReferenceGrant: grant: _ReferenceGrant & {
metadata: namespace: Namespace
}
// Include shared resources
_BankOfHolos.Resources
}
}
```
</TabItem>
</Tabs>
Line 7 of the `schema.cue` file defines the _default_ value for
`_Organization.DisplayName` by using `string | *"..."`. In CUE, the `*`
asterisk character denotes a [default value].
Line 78 of the `bank-frontend.cue` file refers to `_Organization.DisplayName` to
configure the front end web container.
Let's change the name of the bank by defining a new value for
`_Organization.DisplayName` at the root of the configuration. Create
`projects/organization.cue` with the following content.
<Tabs groupId="B386181F-EBE7-469D-8CB5-37631067669B">
<TabItem value="projects/organization.cue" label="projects/organization.cue">
```cue showLineNumbers
package holos
_Organization: DisplayName: "The Holistic-Bank"
```
</TabItem>
</Tabs>
Let's render the platform and see if this changes the name.
<Tabs groupId="A014333C-3271-4C22-87E6-2B7BF898EA3E">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
could not run: could not marshal json projects/platform/components/istio/cni: cue: marshal error: _Organization.DisplayName: 2 errors in empty disjunction: (and 2 more errors) at internal/builder/builder.go:63
_Organization.DisplayName: _Organization.DisplayName: 2 errors in empty disjunction: (and 2 more errors)
could not run: could not marshal json projects/platform/components/argocd/crds: cue: marshal error: _Organization.DisplayName: 2 errors in empty disjunction: (and 2 more errors) at internal/builder/builder.go:63
_Organization.DisplayName: _Organization.DisplayName: 2 errors in empty disjunction: (and 2 more errors)
could not run: could not render component: exit status 1 at builder/v1alpha4/builder.go:95
```
</TabItem>
</Tabs>
:::warning Whoops
The development team defined a value that isn't allowed by the
configuration.
:::
Someone else in the organization placed a [constraint] on the
configuration to ensure the display name contains only letters, numbers, and
spaces. This constraint is expressed as a [regular expression].
:::tip
CUE provides clear visibility where to start looking to resolve conflicts. Each
file and line number listed is a place the `#Organization.DisplayName` field is
defined.
:::
Let's try again, this time replacing the hyphen with a space.
<Tabs groupId="F93B34FA-C0C6-4793-A32F-DAD094403208">
<TabItem value="projects/organization.cue" label="projects/organization.cue">
```cue showLineNumbers
package holos
_Organization: DisplayName: "The Holistic Bank"
```
</TabItem>
</Tabs>
<Tabs groupId="5FD68778-476A-4F82-8817-71CEE205216E">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered bank-ledger-db for cluster workload in 139.863625ms
rendered bank-accounts-db for cluster workload in 151.74875ms
rendered bank-balance-reader for cluster workload in 154.356083ms
rendered bank-ledger-writer for cluster workload in 161.209541ms
rendered bank-userservice for cluster workload in 163.373417ms
rendered bank-backend-config for cluster workload in 179.271208ms
rendered bank-secrets for cluster workload in 204.35625ms
rendered gateway for cluster workload in 118.707583ms
rendered httproutes for cluster workload in 140.981541ms
rendered bank-transaction-history for cluster workload in 156.066875ms
rendered bank-frontend for cluster workload in 300.102292ms
rendered bank-contacts for cluster workload in 159.89625ms
rendered cni for cluster workload in 150.754458ms
rendered istiod for cluster workload in 222.922625ms
rendered app-projects for cluster workload in 118.422792ms
rendered ztunnel for cluster workload in 142.840625ms
rendered cert-manager for cluster workload in 190.938834ms
rendered base for cluster workload in 340.679416ms
rendered local-ca for cluster workload in 107.120334ms
rendered external-secrets for cluster workload in 145.020834ms
rendered argocd for cluster workload in 299.690917ms
rendered namespaces for cluster workload in 115.862334ms
rendered gateway-api for cluster workload in 225.783833ms
rendered external-secrets-crds for cluster workload in 339.741166ms
rendered crds for cluster workload in 421.849041ms
rendered platform in 718.015959ms
```
</TabItem>
</Tabs>
:::tip Success
Great, the platform rendered. We know the display name is valid according to
the constraints.
:::
Let's see if the new display name value updated the configuration for the bank
frontend.
<Tabs groupId="6C068651-2061-4262-BE1E-7BB3E7EB66CB">
<TabItem value="command" label="Command">
```bash
git status
```
</TabItem>
<TabItem value="output" label="Output">
```txt
On branch main
Your branch and 'jeffmccune/main' have diverged,
and have 2 and 4 different commits each, respectively.
(use "git pull" to merge the remote branch into yours)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git restore <file>..." to discard changes in working directory)
// highlight-next-line
modified: deploy/clusters/workload/components/app-projects/app-projects.gen.yaml
modified: deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
Untracked files:
(use "git add <file>..." to include in what will be committed)
projects/organization.cue
no changes added to commit (use "git add" and/or "git commit -a")
```
</TabItem>
</Tabs>
<Tabs groupId="4A20831E-461B-4EDE-8F6E-E73C3AEC12DB">
<TabItem value="command" label="Command">
```bash
git diff
```
</TabItem>
<TabItem value="output" label="Output">
```diff
diff --git a/deploy/clusters/workload/components/app-projects/app-projects.gen.yaml b/deploy/clusters/workload/components/app-projects/app-projects.gen.yaml
index 7914756..250c660 100644
--- a/deploy/clusters/workload/components/app-projects/app-projects.gen.yaml
+++ b/deploy/clusters/workload/components/app-projects/app-projects.gen.yaml
@@ -9,7 +9,7 @@ spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
- description: Holos managed AppProject for Bank of Holos
+ description: Holos managed AppProject for The Holistic Bank
destinations:
- namespace: '*'
server: '*'
@@ -26,7 +26,7 @@ spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
- description: Holos managed AppProject for Bank of Holos
+ description: Holos managed AppProject for The Holistic Bank
destinations:
- namespace: '*'
server: '*'
@@ -43,7 +43,7 @@ spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
- description: Holos managed AppProject for Bank of Holos
+ description: Holos managed AppProject for The Holistic Bank
destinations:
- namespace: '*'
server: '*'
@@ -60,7 +60,7 @@ spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
- description: Holos managed AppProject for Bank of Holos
+ description: Holos managed AppProject for The Holistic Bank
destinations:
- namespace: '*'
server: '*'
diff --git a/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml b/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
index dae6f93..d41516b 100644
--- a/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
+++ b/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
@@ -71,7 +71,7 @@ spec:
containers:
- env:
- name: BANK_NAME
- value: Bank of Holos
+ value: The Holistic Bank
- name: ENV_PLATFORM
value: local
- name: VERSION
```
</TabItem>
</Tabs>
:::danger
The new display name changed the frontend container, but it _also_ affected the
app-projects component owned by the platform team.
:::
Submitting a pull request would trigger a code review from the platform
engineering team who manages the app-projects component. Let's see how to
narrow the change down to limit the scope to the bank's user facing services.
All of these services are managed under `projects/bank-of-holos/` Move the
`organization.cue` file into this folder to limit the scope of configuration to
the the components contained within.
```bash
mv projects/organization.cue projects/bank-of-holos/
```
Render the platform and let's see what changed.
<Tabs groupId="0FFEC244-B59B-4136-9C82-837985DC2AB8">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered bank-ledger-db for cluster workload in 163.814917ms
rendered bank-accounts-db for cluster workload in 163.960208ms
rendered bank-userservice for cluster workload in 164.1625ms
rendered bank-ledger-writer for cluster workload in 169.185291ms
rendered bank-balance-reader for cluster workload in 174.5455ms
rendered bank-backend-config for cluster workload in 178.092125ms
rendered bank-secrets for cluster workload in 202.305334ms
rendered gateway for cluster workload in 122.81725ms
rendered httproutes for cluster workload in 134.121084ms
rendered bank-contacts for cluster workload in 146.4185ms
rendered bank-frontend for cluster workload in 311.35425ms
rendered bank-transaction-history for cluster workload in 160.103ms
rendered cni for cluster workload in 145.762083ms
rendered istiod for cluster workload in 216.0065ms
rendered app-projects for cluster workload in 117.684333ms
rendered ztunnel for cluster workload in 144.555292ms
rendered cert-manager for cluster workload in 178.247917ms
rendered base for cluster workload in 336.679ms
rendered external-secrets for cluster workload in 142.21825ms
rendered local-ca for cluster workload in 101.249ms
rendered argocd for cluster workload in 280.54525ms
rendered namespaces for cluster workload in 106.822042ms
rendered gateway-api for cluster workload in 200.459791ms
rendered external-secrets-crds for cluster workload in 470.125833ms
rendered crds for cluster workload in 844.388666ms
rendered platform in 1.154937084s
```
</TabItem>
</Tabs>
<Tabs groupId="DE4FEEE5-FC53-48A6-BC6F-D0EA1DBFD00C">
<TabItem value="command" label="Command">
```bash
git diff
```
</TabItem>
<TabItem value="output" label="Output">
```diff
diff --git a/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml b/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
index dae6f93..d41516b 100644
--- a/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
+++ b/deploy/clusters/workload/components/bank-frontend/bank-frontend.gen.yaml
@@ -71,7 +71,7 @@ spec:
containers:
- env:
- name: BANK_NAME
- value: Bank of Holos
+ value: The Holistic Bank
- name: ENV_PLATFORM
value: local
- name: VERSION
```
</TabItem>
</Tabs>
:::tip Success
Great! This time, the only manifest affected is our `bank-frontend.gen.yaml`.
:::
The `BANK_NAME` environment variable will change as we expect, and only the dev
teams managing the bank services components are affected by the change.
Let's commit and push this change and see if it works.
<Tabs groupId="435D9C60-F841-4CF1-A947-506422E6BAC9">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m 'frontend: rename bank to The Holistic Bank'
git push
```
</TabItem>
<TabItem value="output" label="Output">
```txt
[main fda74ec] frontend: rename bank to The Holistic Bank
2 files changed, 4 insertions(+), 1 deletion(-)
create mode 100644 projects/bank-of-holos/organization.cue
```
</TabItem>
</Tabs>
Now that we've pushed the change, let's apply the change to the platform.
## Apply the Change
Once we've pushed the change, navigate to the [bank-frontend GitOps
Application](https://argocd.holos.localhost/applications/argocd/bank-frontend?view=tree&resource=).
We can see the Deployment needs to sync to the desired state we just pushed.
![bank-frontend out of sync](./img/change-a-service-out-of-sync.png)
Clicking on the frontend Deployment, we see the diff with the change we expect.
![bank-frontend diff](./img/change-a-service-diff.png)
Sync the change, ArgoCD applies the desired configuration state to the cluster
and Kubernetes handles rolling out the updated Deployment resource.
![bank-frontend progressing](./img/change-a-service-progressing.png)
Soon, the deployment finishes and the component is in sync again.
![bank-frontend in sync](./img/change-a-service-in-sync.png)
Finally, let's see if the name actually changed on the website. Navigate to
https://bank.holos.localhost/.
![bank-frontend login page](./img/change-a-service-login-page.png)
:::tip Success
We successfully made our change and successfully applied the changed
configuration to the platform.
:::
Thanks for taking the time to work through this guide which covered:
- How multiple teams could be impacted by defining configuration at the
`projects/` path.
- How to scope our change to only affect components within the
`projects/bank-of-holos/` path, eliminating the impact on other teams.
- How CUE can [constrain] values in Holos, increasing safety.
- How to handle a [default value] in CUE.
- How CUE surfaces the file and line number of _every_ place to look for where a
value is defined, making it faster and easier to troubleshoot problems.
[Quickstart]: /docs/quickstart/
[Deploy a Service]: /docs/guides/deploy-a-service/
[Change a Service]: /docs/guides/change-a-service/
[Helm]: /docs/api/author/v1alpha3/#Helm
[Kubernetes]: /docs/api/author/v1alpha3/#Kubernetes
[Kustomize]: /docs/api/author/v1alpha3/#Kustomize
[ComponentFields]: /docs/api/author/v1alpha3/#ComponentFields
[platform-files]: /docs/quickstart/#how-platform-rendering-works
[AppProject]: https://argo-cd.readthedocs.io/en/stable/user-guide/projects/
[unification operator]: https://cuelang.org/docs/reference/spec/#unification
[code-owners]: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
[Kustomization API]: https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v5.2/api/types/kustomization.go#L34
[cue import]: https://cuelang.org/docs/reference/command/cue-help-import/
[cue get go]: https://cuelang.org/docs/concept/how-cue-works-with-go/
[timoni-crds]: https://timoni.sh/cue/module/custom-resources/
[HTTPRoute]: https://gateway-api.sigs.k8s.io/api-types/httproute/?h=filter
[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/
[hidden field]: https://cuelang.org/docs/tour/references/hidden/
[comprehension]: https://cuelang.org/docs/reference/spec/#comprehensions
[code owners]: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
[ReferenceGrant]: https://gateway-api.sigs.k8s.io/api-types/referencegrant/
[Local Cluster Guide]: /docs/guides/local-cluster
[Bank of Holos]: https://github.com/holos-run/bank-of-holos
[default value]: https://cuelang.org/docs/tour/types/defaults/
[constrain]: https://cuelang.org/docs/tour/basics/constraints/
[constraint]: https://cuelang.org/docs/tour/basics/constraints/
[regular expression]: https://cuelang.org/docs/tour/expressions/regexp/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 690 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 997 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 287 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1009 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 617 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 706 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 794 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

View File

@@ -1,277 +0,0 @@
---
description: Build a local Cluster to use with these guides.
slug: /guides/local-cluster
sidebar_position: 999
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
# Local Cluster
In this guide we'll set up a local k3d cluster to apply and explore the
configuration described in our other guides. After completing this guide you'll
have a standard Kubernetes API server with proper DNS and TLS certificates.
You'll be able to easily reset the cluster to a known good state to iterate on
your own Platform.
The [Concepts](/docs/concepts) page defines capitalized terms such as Platform
and Component.
## Reset the Cluster
If you've already followed this guide, reset the cluster by running the
following commands. Skip this section if you're creating a cluster for the
first time.
First, delete the cluster.
<Tabs groupId="k3d-cluster-delete">
<TabItem value="command" label="Command">
```bash
k3d cluster delete workload
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
INFO[0000] Deleting cluster 'workload'
INFO[0000] Deleting cluster network 'k3d-workload'
INFO[0000] Deleting 1 attached volumes...
INFO[0000] Removing cluster details from default kubeconfig...
INFO[0000] Removing standalone kubeconfig file (if there is one)...
INFO[0000] Successfully deleted cluster workload!
```
</TabItem>
</Tabs>
Then create the cluster again.
<Tabs groupId="k3d-cluster-create">
<TabItem value="command" label="Command">
```bash
k3d cluster create workload \
--registry-use k3d-registry.holos.localhost:5100 \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
INFO[0000] portmapping '443:443' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy]
INFO[0000] Prep: Network
INFO[0000] Created network 'k3d-workload'
INFO[0000] Created image volume k3d-workload-images
INFO[0000] Starting new tools node...
INFO[0000] Starting node 'k3d-workload-tools'
INFO[0001] Creating node 'k3d-workload-server-0'
INFO[0001] Creating LoadBalancer 'k3d-workload-serverlb'
INFO[0001] Using the k3d-tools node to gather environment information
INFO[0001] HostIP: using network gateway 172.17.0.1 address
INFO[0001] Starting cluster 'workload'
INFO[0001] Starting servers...
INFO[0001] Starting node 'k3d-workload-server-0'
INFO[0003] All agents already running.
INFO[0003] Starting helpers...
INFO[0003] Starting node 'k3d-workload-serverlb'
INFO[0009] Injecting records for hostAliases (incl. host.k3d.internal) and for 3 network members into CoreDNS configmap...
INFO[0012] Cluster 'workload' created successfully!
INFO[0012] You can now use it like this:
kubectl cluster-info
```
</TabItem>
</Tabs>
Finally, add your trusted certificate authority.
<Tabs groupId="apply-local-ca">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f "$(mkcert -CAROOT)/namespace.yaml"
kubectl apply --server-side=true -n cert-manager -f "$(mkcert -CAROOT)/local-ca.yaml"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
namespace/cert-manager serverside-applied
secret/local-ca serverside-applied
```
</TabItem>
</Tabs>
You're back to the same state as the first time you completed this guide.
## What you'll need {#requirements}
You'll need the following tools installed to complete this guide.
1. [holos](/docs/install) - to build the platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Holos components that wrap upstream Helm charts.
3. [k3d](https://k3d.io/#installation) - to provide a k8s api server.
4. [OrbStack](https://docs.orbstack.dev/install) or [Docker](https://docs.docker.com/get-docker/) - to use k3d.
5. [kubectl](https://kubernetes.io/docs/tasks/tools/) - to interact with the k8s api server.
6. [mkcert](https://github.com/FiloSottile/mkcert?tab=readme-ov-file#installation) - to make trusted TLS certificates.
7. [jq](https://jqlang.github.io/jq/download/) - to fiddle with JSON output.
## Configure DNS {#configure-dns}
Configure your machine to resolve `*.holos.localhost` to your loopback
interface. This is necessary for requests to reach the workload cluster. Save
this script to a file and execute it.
```bash showLineNumbers
#! /bin/bash
#
set -euo pipefail
tmpdir="$(mktemp -d)"
finish() {
[[ -d "$tmpdir" ]] && rm -rf "$tmpdir"
}
trap finish EXIT
cd "$tmpdir"
brew install dnsmasq
cat <<EOF >"$(brew --prefix)/etc/dnsmasq.d/holos.localhost.conf"
# Refer to https://holos.run/docs/tutorial/local/k3d/
address=/holos.localhost/127.0.0.1
EOF
if [[ -r /Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist ]]; then
echo "dnsmasq already configured"
else
sudo cp "$(brew list dnsmasq | grep 'dnsmasq.plist$')" \
/Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist
sudo launchctl unload /Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist
sudo launchctl load /Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist
dscacheutil -flushcache
echo "dnsmasq configured"
fi
sudo mkdir -p /etc/resolver
sudo tee /etc/resolver/holos.localhost <<EOF
domain holos.localhost
nameserver 127.0.0.1
EOF
sudo killall -HUP mDNSResponder
echo "all done."
```
## Create the Cluster {#create-the-cluster}
The Workload Cluster is where your applications and services will be deployed.
In production this is usually an EKS, GKE, or AKS cluster.
:::tip
Holos supports all compliant Kubernetes clusters. Holos was developed and tested
on GKE, EKS, Talos, k3s, and Kubeadm clusters.
:::
Create a local registry to speed up image builds and pulls.
```bash
k3d registry create registry.holos.localhost --port 5100
```
Create the workload cluster configured to use the local registry.
```bash
k3d cluster create workload \
--registry-use k3d-registry.holos.localhost:5100 \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
Traefik is disabled because Istio provides the same functionality.
## Setup Root CA {#setup-root-ca}
Platforms most often use cert-manager to issue tls certificates. The browser
and tools we're using need to trust these certificates to work together.
Generate a local, trusted root certificate authority with the following script.
Admin access is necessary for `mkcert` to manage the certificate into your trust
stores.
```bash
sudo -v
```
Manage the local CA and copy the CA key to the workload cluster so that cert
manager can manage trusted certificates.
Save this script to a file and execute it to configure a trusted certificate
authority.
```bash showLineNumbers
#! /bin/bash
#
set -euo pipefail
mkcert --install
tmpdir="$(mktemp -d)"
finish() {
[[ -d "$tmpdir" ]] && rm -rf "$tmpdir"
}
trap finish EXIT
cd "$tmpdir"
# Create the local CA Secret with ca.crt, tls.crt, tls.key
mkdir local-ca
cd local-ca
CAROOT="$(mkcert -CAROOT)"
cp -p "${CAROOT}/rootCA.pem" ca.crt
cp -p "${CAROOT}/rootCA.pem" tls.crt
cp -p "${CAROOT}/rootCA-key.pem" tls.key
kubectl create secret generic --from-file=. --dry-run=client -o yaml local-ca > ../local-ca.yaml
echo 'type: kubernetes.io/tls' >> ../local-ca.yaml
cd ..
cat <<EOF > namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: cert-manager
name: cert-manager
spec:
finalizers:
- kubernetes
EOF
kubectl apply --server-side=true -f namespace.yaml
kubectl apply -n cert-manager --server-side=true -f local-ca.yaml
# Save the Secret to easily reset the cluster later.
install -m 0644 namespace.yaml "${CAROOT}/namespace.yaml"
install -m 0600 local-ca.yaml "${CAROOT}/local-ca.yaml"
```
:::warning
Take care to run the local-ca script each time you create the workload cluster
so that Certificates are issued correctly.
:::
## Clean Up {#clean-up}
If you'd like to clean up the resources you created in this guide, remove them
with:
```bash
k3d cluster delete workload
```
## Next Steps
Now that you have a real cluster, apply and explore the manifests Holos renders
in the [Quickstart](/docs/quickstart) guide.

File diff suppressed because it is too large Load Diff

View File

@@ -1,724 +0,0 @@
---
description: Try Holos with this quick start guide.
slug: /archive/2024-09-15-quickstart
sidebar_position: 100
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
# Quickstart
In this guide, you'll experience how Holos makes the process of operating a
Platform safer, easier, and more consistent. We'll use Holos to manage a
vendor-provided Helm chart as a Component. Next, we'll mix in our own custom
resources to manage the Component with GitOps. Finally, you'll see how Holos
makes it safer and easier to maintain software over time by surfacing the exact
changes that will be applied when upgrading the vendor's chart to a new version,
before they are actually made.
The [Concepts](/docs/concepts) page defines capitalized terms such as Platform
and Component.
## What you'll need {#requirements}
You'll need the following tools installed to complete this guide.
1. [holos](/docs/install) - to build the Platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Holos Components that
wrap upstream Helm charts.
Optionally, if you'd like to apply the rendered manifests to a real Cluster,
first complete the [Local Cluster Guide](/docs/guides/local-cluster).
## Install Holos
Install Holos with the following command or other methods listed on the
[Installation](/docs/install/) page.
```bash
go install github.com/holos-run/holos/cmd/holos@latest
```
## Create a Git Repository
Start by initializing an empty Git repository. Holos operates on local files
stored in a Git repository.
<Tabs groupId="init">
<TabItem value="command" label="Command">
```bash
mkdir holos-quickstart
cd holos-quickstart
git init
```
</TabItem>
<TabItem value="output" label="Output">
```txt
Initialized empty Git repository in /holos-quickstart/.git/
```
</TabItem>
</Tabs>
This guide assumes you will run commands from the root directory of the Git
repository unless stated otherwise.
## Generate the Platform {#Generate-Platform}
Generate the Platform code in the repository root. A Platform refers to the
entire set of software holistically integrated to provide a software development
platform for your organization. In this guide, the Platform will include a
single Component to demonstrate how the concepts fit together.
```bash
holos generate platform quickstart
```
Commit the generated platform config to the repository.
<Tabs groupId="commit-platform">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "holos generate platform quickstart - $(holos --version)"
```
</TabItem>
<TabItem value="output" label="Output">
```txt
[main (root-commit) 0b17b7f] holos generate platform quickstart
213 files changed, 72349 insertions(+)
...
```
</TabItem>
</Tabs>
## Generate a Component {#generate-component}
The platform you generated is currently empty. Run the following command to
generate the CUE code that defines a Helm Component.
<Tabs groupId="gen-podinfo">
<TabItem value="command" label="Command">
```bash
holos generate component podinfo --component-version 6.6.1
```
</TabItem>
<TabItem value="output" label="Output">
```txt
generated component
```
</TabItem>
</Tabs>
The --component-version 6.6.1 flag intentionally installs an older release.
You'll see how Holos assists with software upgrades later in this guide.
The generate component command creates two files: a leaf file,
`components/podinfo/podinfo.gen.cue`, and a root file, `podinfo.gen.cue`. Holos
leverages the fact that [order is
irrelevant](https://cuelang.org/docs/tour/basics/order-irrelevance/) in CUE to
register the component with the Platform by adding a file to the root of the Git
repository. The second file defines the component in the leaf component
directory.
<Tabs groupId="podinfo-files">
<TabItem value="components/podinfo/podinfo.gen.cue" label="Leaf">
`components/podinfo/podinfo.gen.cue`
```cue showLineNumbers
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "podinfo"
Version: "6.6.1"
Namespace: "default"
Repo: name: "podinfo"
Repo: url: "https://stefanprodan.github.io/podinfo"
Values: {}
}
```
</TabItem>
<TabItem value="podinfo.gen.cue" label="Root">
`podinfo.gen.cue`
```cue showLineNumbers
package holos
// Manage podinfo on workload clusters only
for Cluster in #Fleets.workload.clusters {
#Platform: Components: "\(Cluster.name)/podinfo": {
path: "components/podinfo"
cluster: Cluster.name
}
}
```
</TabItem>
</Tabs>
In this example, we provide the minimal information needed to manage the Helm
chart: the name, version, Kubernetes namespace for deployment, and the chart
repository location.
This chart deploys cleanly without any values provided, but we include an empty
Values struct to show how Holos improves consistency and safety in Helm by
leveraging the strong type-checking in CUE. You can safely pass shared values,
such as the organizations domain name, to all Components across all clusters in
the Platform by defining them at the root of the configuration.
Commit the generated component config to the repository.
<Tabs groupId="commit-component">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "holos generate component podinfo - $(holos --version)"
```
</TabItem>
<TabItem value="output" label="Output">
```txt
[main cc0e90c] holos generate component podinfo
2 files changed, 24 insertions(+)
create mode 100644 components/podinfo/podinfo.gen.cue
create mode 100644 podinfo.gen.cue
```
</TabItem>
</Tabs>
## Render the Component
You can render individual components without adding them to a Platform, which is
helpful when developing a new component.
<Tabs groupId="render-podinfo">
<TabItem value="command" label="Command">
```bash
holos render component ./components/podinfo --cluster-name=default
```
</TabItem>
<TabItem value="output" label="Output">
```txt
cached
rendered podinfo
```
</TabItem>
</Tabs>
First, the command caches the Helm chart locally to speed up subsequent
renderings. Then, the command runs Helm to produce the output and writes it into
the deploy directory.
<Tabs groupId="tree-podinfo">
<TabItem value="command" label="Command">
```bash
tree deploy
```
</TabItem>
<TabItem value="output" label="Output">
```txt
deploy
└── clusters
└── default
└── components
└── podinfo
└── podinfo.gen.yaml
5 directories, 1 file
```
</TabItem>
</Tabs>
The component deploys to one cluster named `default`. In practice, the same
component is often deployed to multiple clusters, such as `east` and `west` to
provide redundancy and increase availability.
:::tip
This example is equivalent to running `helm template` on the chart and saving
the output to a file. Holos simplifies this task, making it safer and more
consistent when managing many charts.
:::
## Mix in an ArgoCD Application
We've seen how Holos works with Helm, but we haven't yet explored how Holos
makes it easier to consistently and safely manage all of the software in a
Platform.
Holos allows you to easily mix in resources that differentiate your Platform.
We'll use this feature to mix in an ArgoCD [Application][application] to manage
the podinfo Component with GitOps. We'll define this configuration in a way that
can be automatically and consistently reused across all future Components added
to the Platform.
Create a new file named `argocd.cue` in the root of your Git repository with the
following contents:
<Tabs groupId="argocd-config">
<TabItem value="command" label="argocd.cue">
```cue showLineNumbers
package holos
#ArgoConfig: {
Enabled: true
RepoURL: "https://github.com/holos-run/holos-quickstart-guide"
}
```
</TabItem>
</Tabs>
:::tip
If you plan to apply the rendered output to a real cluster, change the
`example.com` RepoURL to the URL of the Git repository you created in this
guide. You don't need to change the example if you're just exploring Holos by
inspecting the rendered output without applying it to a live cluster.
:::
With this file in place, render the component again.
<Tabs groupId="render-podinfo-argocd">
<TabItem value="command" label="Command">
```bash
holos render component ./components/podinfo --cluster-name=default
```
</TabItem>
<TabItem value="output" label="Output">
```txt
wrote deploy file
rendered gitops/podinfo
rendered podinfo
```
</TabItem>
</Tabs>
Holos uses the locally cached chart to improve performance and reliability. It
then renders the Helm template output along with an ArgoCD Application resource
for GitOps.
:::tip
By defining the ArgoCD configuration at the root, we again take advantage of the
fact that [order is
irrelevant](https://cuelang.org/docs/tour/basics/order-irrelevance/) in CUE.
:::
Defining the configuration at the root ensures all future leaf Components take
the ArgoCD configuration and render an Application manifest for GitOps
management.
<Tabs groupId="tree-podinfo-argocd">
<TabItem value="command" label="Command">
```bash
tree deploy
```
</TabItem>
<TabItem value="output" label="Output">
```txt
deploy
└── clusters
└── default
├── components
│   └── podinfo
│   └── podinfo.gen.yaml
└── gitops
└── podinfo.application.gen.yaml
6 directories, 2 files
```
</TabItem>
</Tabs>
Notice the new `podinfo.application.gen.yaml` file created by enabling ArgoCD in
the Helm component. The Application resource in the file looks like this:
<Tabs groupId="podinfo-application">
<TabItem value="file" label="podinfo.application.gen.yaml">
```yaml showLineNumbers
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: podinfo
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
path: ./deploy/clusters/default/components/podinfo
repoURL: https://example.com/holos-quickstart.git
targetRevision: main
```
</TabItem>
</Tabs>
:::tip
Holos generates a similar Application resource for every additional Component
added to your Platform.
:::
Finally, add and commit the results to your Platform's Git repository.
<Tabs groupId="commit-argo">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "holos render component ./components/podinfo --cluster-name=default"
```
</TabItem>
<TabItem value="output" label="Output">
```txt
[main f95cef1] holos render component ./components/podinfo --cluster-name=default
3 files changed, 134 insertions(+)
create mode 100644 argocd.cue
create mode 100644 deploy/clusters/default/components/podinfo/podinfo.gen.yaml
create mode 100644 deploy/clusters/default/gitops/podinfo.application.gen.yaml
```
</TabItem>
</Tabs>
In this section, we learned how Holos simplifies mixing resources into
Components, like an ArgoCD Application. Holos ensures consistency by managing an
Application resource for every Component added to the Platform through the
configuration you define in `argocd.cue` at the root of the repository.
## Define Workload Clusters {#workload-clusters}
We've generated a Component to manage podinfo and integrated it with our
Platform, but rendering the Platform doesn't render podinfo. Podinfo isn't
rendered because we haven't assigned any Clusters to the workload Fleet.
Define two new clusters, `east` and `west`, and assign them to the workload
Fleet. Create a new file named `clusters.cue` in the root of your Git repository
with the following contents:
<Tabs groupId="clusters">
<TabItem value="clusters.cue" label="clusters.cue">
```cue showLineNumbers
package holos
// Define two workload clusters for disaster recovery.
#Fleets: workload: clusters: {
// In CUE _ indicates values are defined elsewhere.
east: _
west: _
}
```
</TabItem>
</Tabs>
This example shows how Holos simplifies configuring multiple clusters with
similar configuration by grouping them into a Fleet.
:::tip
Fleets help segment a group of Clusters into one leader and multiple followers
by designating one cluster as the primary. Holos makes it safer, easier, and
more consistent to reconfigure which cluster is the primary. The primary can be
set to automatically restore persistent data from backups, while non-primary
clusters can be configured to automatically replicate from the primary.
Automatic database backup, restore, and streaming replication is an advanced
topic enabled by Cloud Native PG and CUE. Check back for a guide on this and
other Day 2 operations topics.
:::
## Render the Platform {#render-platform}
Render the Platform to render the podinfo Component for each of the workload
clusters.
<Tabs groupId="render-platform">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered components/podinfo for cluster west in 99.480792ms
rendered components/podinfo for cluster east in 99.882667ms
```
</TabItem>
</Tabs>
The render platform command iterates over every Cluster in the Fleet and renders
each Component assigned to the Fleet. Notice the two additional subdirectories
created under the deploy directory, one for each cluster: `east` and `west`.
<Tabs groupId="tree-platform">
<TabItem value="command" label="Command">
```bash
tree deploy
```
</TabItem>
<TabItem value="output" label="Output">
```txt
deploy
└── clusters
├── default
│   ├── components
│   │   └── podinfo
│   │   └── podinfo.gen.yaml
│   └── gitops
│   └── podinfo.application.gen.yaml
# highlight-next-line
├── east
│   ├── components
│   │   └── podinfo
│   │   └── podinfo.gen.yaml
│   └── gitops
│   └── podinfo.application.gen.yaml
# highlight-next-line
└── west
├── components
│   └── podinfo
│   └── podinfo.gen.yaml
└── gitops
└── podinfo.application.gen.yaml
14 directories, 6 files
```
</TabItem>
</Tabs>
Holos ensures consistency and safety by defining the ArgoCD Application once,
with strong type checking, at the configuration root.
New Application resources are automatically generated for the `east` and `west`
workload Clusters.
<Tabs groupId="applications">
<TabItem value="east" label="east">
`deploy/clusters/east/gitops/podinfo.application.gen.yaml`
```yaml showLineNumbers
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: podinfo
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
# highlight-next-line
path: ./deploy/clusters/east/components/podinfo
repoURL: https://example.com/holos-quickstart.git
targetRevision: main
```
</TabItem>
<TabItem value="west" label="west">
`deploy/clusters/west/gitops/podinfo.application.gen.yaml`
```yaml showLineNumbers
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: podinfo
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
# highlight-next-line
path: ./deploy/clusters/west/components/podinfo
repoURL: https://example.com/holos-quickstart.git
targetRevision: main
```
</TabItem>
<TabItem value="default" label="default">
`deploy/clusters/default/gitops/podinfo.application.gen.yaml`
```yaml showLineNumbers
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: podinfo
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
# highlight-next-line
path: ./deploy/clusters/default/components/podinfo
repoURL: https://example.com/holos-quickstart.git
targetRevision: main
```
</TabItem>
</Tabs>
Add and commit the rendered Platform and workload Clusters.
<Tabs groupId="commit-render-platform">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "holos render platform ./platform - $(holos --version)"
```
</TabItem>
<TabItem value="output" label="Output">
```txt
[main 5aebcf5] holos render platform ./platform - 0.93.2
5 files changed, 263 insertions(+)
create mode 100644 clusters.cue
create mode 100644 deploy/clusters/east/components/podinfo/podinfo.gen.yaml
create mode 100644 deploy/clusters/east/gitops/podinfo.application.gen.yaml
create mode 100644 deploy/clusters/west/components/podinfo/podinfo.gen.yaml
create mode 100644 deploy/clusters/west/gitops/podinfo.application.gen.yaml
```
</TabItem>
</Tabs>
## Upgrade a Helm Chart
Holos is designed to ease the burden of Day 2 operations. With Holos, upgrading
software, integrating new software, and making safe platform-wide configuration
changes become easier.
Let's upgrade the podinfo Component to see how this works in practice. First,
update the Component version field to the latest upstream Helm chart version.
<Tabs groupId="gen-podinfo">
<TabItem value="command" label="Command">
```bash
holos generate component podinfo --component-version 6.6.2
```
</TabItem>
<TabItem value="output" label="Output">
```txt
generated component
```
</TabItem>
</Tabs>
Remove the cached chart version.
<Tabs groupId="gen-podinfo">
<TabItem value="command" label="Command">
```bash
rm -rf components/podinfo/vendor
```
</TabItem>
</Tabs>
Now re-render the Platform.
<Tabs groupId="render-platform2">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered components/podinfo for cluster east in 327.10475ms
rendered components/podinfo for cluster west in 327.796541ms
```
</TabItem>
</Tabs>
Notice we're still using the upstream chart without modifying it. The Holos
component wraps around the chart to mix in additional resources and integrate
the component with the broader Platform.
## Visualize the Changes
Holos makes it easier to see exactly what changes are made and which resources
will be applied to the API server. By design, Holos operates on local files,
leaving the task of applying them to ecosystem tools like `kubectl` and ArgoCD.
This allows platform operators to inspect changes during code review, or before
committing the change at all.
For example, using `git diff`, we see that the only functional change when
upgrading this Helm chart is the deployment of a new container image tag to each
cluster. Additionally, we can roll out this change gradually by applying it to
the east cluster first, then to the west cluster, limiting the potential blast
radius of a problematic change.
<Tabs groupId="git-diff">
<TabItem value="command" label="Command">
```bash
git diff deploy/clusters/east
```
</TabItem>
<TabItem value="output" label="Output">
```diff showLineNumbers
diff --git a/deploy/clusters/east/components/podinfo/podinfo.gen.yaml b/deploy/clusters/east/components/podinfo/podinfo.gen.yaml
index 7cc3332..8c1647d 100644
--- a/deploy/clusters/east/components/podinfo/podinfo.gen.yaml
+++ b/deploy/clusters/east/components/podinfo/podinfo.gen.yaml
@@ -5,9 +5,9 @@ kind: Service
metadata:
name: podinfo
labels:
- helm.sh/chart: podinfo-6.6.1
+ helm.sh/chart: podinfo-6.6.2
app.kubernetes.io/name: podinfo
- app.kubernetes.io/version: "6.6.1"
+ app.kubernetes.io/version: "6.6.2"
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
@@ -29,9 +29,9 @@ kind: Deployment
metadata:
name: podinfo
labels:
- helm.sh/chart: podinfo-6.6.1
+ helm.sh/chart: podinfo-6.6.2
app.kubernetes.io/name: podinfo
- app.kubernetes.io/version: "6.6.1"
+ app.kubernetes.io/version: "6.6.2"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
@@ -53,7 +53,7 @@ spec:
terminationGracePeriodSeconds: 30
containers:
- name: podinfo
# highlight-next-line
- image: "ghcr.io/stefanprodan/podinfo:6.6.1"
# highlight-next-line
+ image: "ghcr.io/stefanprodan/podinfo:6.6.2"
imagePullPolicy: IfNotPresent
command:
- ./podinfo
```
</TabItem>
</Tabs>
:::tip
Holos is designed to surface the _fully rendered_ manifests intended for the
Kubernetes API server, making it easier to see and reason about platform-wide
configuration changes.
:::
## Recap {#recap}
In this quickstart guide, we learned how Holos makes it easier, safer, and more
consistent to manage a Platform composed of multiple Clusters and upstream Helm
charts.
We covered how to:
1. Generate a Git repository for the Platform config.
2. Wrap the unmodified upstream podinfo Helm chart into a Component.
3. Render an individual Component.
4. Mix-in your Platform's unique resources to all Components. For example, ArgoCD Application resources.
5. Define multiple similar, but not identical, workload clusters.
6. Render the manifests for the entire Platform with the `holos render platform` command.
7. Upgrade a Helm chart to the latest version as an important Day 2 task.
8. Visualize and surface the details of planned changes Platform wide.
## Dive Deeper
If you'd like to dive deeper, check out the [Schema API][schema] and [Core
API][core] reference docs. The main difference between the schema and core
packages is that the schema is used by users to write refined CUE, while the
core package is what the schema produces for `holos` to execute. Users rarely
need to interact with the Core API when on the happy path, but can use the core
package as an escape hatch when the happy path doesn't go where you want.
[application]: https://argo-cd.readthedocs.io/en/stable/user-guide/application-specification/
[schema]: /docs/api/author/v1alpha3/
[core]: /docs/api/core/v1alpha3/

View File

@@ -1,106 +0,0 @@
---
description: Self service platform resource management for project teams.
slug: /archive/guides/2024-09-17-manage-a-project
sidebar_position: 250
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
# Manage a Project
In this guide we'll explore how Holos easily, safely, and consistently manages
platform resources for teams to develop the projects they're working on.
Intended Audience: Platform Engineers and Software Engineers.
Goal is to demonstrate how the platform team can consistently, easily, and
safely provide platform resources to software engineers.
Assumption is software engineers have a container they want to deploy onto the
platform and make accessible. We'll use httpbin as a stand-in for the dev
team's container.
Project is roughly equivalent to Dev Team for the purpose of this guide, but in
practice multiple teams work on a given project over the lifetime of the
project, so we structure the files into projects instead of teams.
## What you'll need {#requirements}
You'll need the following tools installed to complete this guide.
1. [holos](/docs/install) - to build the Platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Helm Components.
3. [kubectl](https://kubernetes.io/docs/tasks/tools/) - to render Kustomize Components.
If you'd like to apply the manifests we render in this guide complete the
following optional, but recommended, steps.
a. Complete the [Local Cluster] guide to set up a local cluster to work with.
b. You'll need a GitHub account to fork the repository associated with this
guide.
## Fork the Guide Repository
<Tabs groupId="fork">
<TabItem value="command" label="Command">
```bash
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
```
</TabItem>
</Tabs>
This guide assumes you will run commands from the root directory of this
repository unless stated otherwise.
[Quickstart]: /docs/quickstart
[Local Cluster]: /docs/guides/local-cluster
## Render the Platform
So we can build the basic platform. Don't dwell on the platform bits.
## Apply the Manifests
Deploy ArgoCD, but not any of the Application resources.
## Browse to ArgoCD
Note there is nothing here yet.
## Switch to your Fork
Note all of the Applications change consistently.
## Apply the Applications
Note how ArgoCD takes over management, no longer need to k apply.
## Create a Project
Project is a conceptual, not technical, thing in Holos. Mainly about how components are laid out in the filesystem tree.
We use a schematic built into holos as an example, the platform team could use the same or provide a similar template and instructions for development teams to self-serve.
## Render the Platform
Notice:
1. Project is registered with the platform at the root.
2. HTTPRoute and Namespace resources are added close to the root in `projects`
3. Deployment and Service resources are added at the leaf in `projects/httpbin/backend`
## Update the image tag
Add a basic schematic to demonstrate this. May need to add two new flags for image url and image tag to the generate subcommand, but should just be two new fields on the struct.
## Dive Deeper
Set the stage for constraints. Ideas: Limit what resources can be added,
namespaces can be operated in, enforce labels, etc...
Simple, consistent, easy constraints.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 728 KiB

View File

@@ -1,6 +0,0 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# ArgoCD
Coming soon.

View File

@@ -1,3 +0,0 @@
# Backstage
Coming soon.

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
# Observability
Coming soon.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 624 KiB

View File

@@ -1,687 +0,0 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
# Try Holos Locally
This guide walks through the process of building and managing a software
development platform with Holos. The k3d platform built in this guide is a
slimmed down version of the larger, more holistic, Holos reference platform.
Holos is different from existing tools in a few important ways.
1. Holos provides a **unified configuration model** purpose built to improve on
unmodified Helm charts, Kustomize bases, or anything else that produces
structured configuration data.
2. Holos all but **eliminates the need to template yaml**, a common source of
frustration and errors in production.
3. Holos platforms are **composable** and have breadth. The toolchain and
techniques scale down to one machine and up to multiple clusters across
multiple regions.
4. The unified configuration model is well suited to a **Zero Trust security
model**. Platform wide policy configuration is easier to manage with Holos.
---
This guide assumes commands are run locally. Capitalized terms have specific
definitions described in the [Glossary](/docs/glossary).
## What you'll need {#Requirements}
You'll need the following tools installed to complete this guide.
1. [holos](/docs/install) - to build the platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Holos components that wrap upstream Helm charts.
3. [k3d](https://k3d.io/#installation) - to provide a k8s api server.
4. [OrbStack](https://docs.orbstack.dev/install) or [Docker](https://docs.docker.com/get-docker/) - to use k3d.
5. [kubectl](https://kubernetes.io/docs/tasks/tools/) - to interact with the k8s api server.
6. [mkcert](https://github.com/FiloSottile/mkcert?tab=readme-ov-file#installation) - to make trusted TLS certificates.
7. [jq](https://jqlang.github.io/jq/download/) - to fiddle with JSON output.
:::note
Registering an account **is recommended** to try out proper authentication and
authorization in Holos, but you can complete this guide without signing up.
:::
## Goal {#Goal}
By the end of this guide you'll have built the foundation of a software
development platform. The foundation provides Zero Trust security by
holistically integrating off-the-shelf open source software.
1. Istio is configured to authenticate and authorize requests using an OIDC
ID-Token issued by ZITADEL before requests reach backend services.
2. The platform provides single sign-on and role based access control for all
services running on the platform.
This guide strives to keep things neat and tidy. All of the resources are
located in one k3d cluster and one local Git repository. If you want to clean
up at any point, do so with:
```bash
k3d cluster delete workload
rm -rf holos-k3d
```
## Sign In or Out {#Sign-In}
Holos provides integrated authentication and authorization which we'll use in
this guide to protect a service. We recommend registering an account to see
this in action. Registration also enables you to explore the customizable web
form that simplifies complex configuration.
If you opt-out, the platform will be configured to use a fake identity in place
of real id tokens.
<Tabs groupId="registration">
<TabItem value="registered" label="Sign In">
```bash
holos register user
```
</TabItem>
<TabItem value="unregistered" label="Opt Out">
```bash
holos logout
```
</TabItem>
</Tabs>
## Create the Platform {#Create-Platform}
A server-side platform resource in Holos stores the web form used to simplify
platform wide configuration.
First, initialize an empty Git repository:
```bash
mkdir holos-k3d
cd holos-k3d
git init
```
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
Use `holos` to make the rpc call to create the server-side platform
resource.
```bash
holos create platform --name k3d --display-name "Try Holos Locally"
```
</TabItem>
<TabItem value="unregistered" label="Signed Out">
Create a blank `platform.metadata.json` file so subsequent holos commands
skip rpc calls.
```bash
touch platform.metadata.json
```
</TabItem>
</Tabs>
### Generate the Platform {#Generate-Platform}
Generate the platform code in the repository root.
```bash
holos generate platform k3d
```
Commit the generated platform config to the repository.
```bash
git add .
git commit -m "holos generate platform k3d - $(holos --version)"
```
### Push the Platform Form
Each Holos platform has a Platform Form used to submit top level, platform-wide
configuration values. The purpose of the form is to validate configuration
values and simplify complicated configurations and integrations.
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
Push the Platform Form to publish it. Browse to the printed URL to view the
form.
```bash
holos push platform form .
```
</TabItem>
<TabItem value="unregistered" label="Signed Out">
You will update the Platform Model locally in a later step so there's
nothing to do in this step. Only signed-in users can push a Platform Form
to the Holos web server.
```bash
# holos push platform form .
```
</TabItem>
</Tabs>
The Platform Form is defined locally in `forms/platform/platform-form.cue`.
On the web it looks like:
![Platform Form Default Values](./form-pushed.png)
### Update the Platform Model {#Platform-Model}
Holos needs initial, top level configuration values to render the platform. The
Platform Model is the term we use for these values. In this section you will
configure role based access control by way of updating the Platform Model.
In the k3d platform you're building now, role based access control is
implemented by asserting against the oidc id token subject. Update the form
with the `sub` claim value from your id token. This will ensure only you have
access to platform services.
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
Copy and paste the `sub` value into your Platform Form's Subject field.
```bash
holos login --print-claims --log-level=error | jq -r .sub
```
After pasting the `sub` value, click Submit on the form.
</TabItem>
<TabItem value="unregistered" label="Signed Out">
You don't have an id token when you're signed out, so there's nothing for
you to do in this step.
```bash
# holos login --print-claims --log-level=error | jq -r .sub
```
The platform will be configured to assert against the User-Agent header
instead.
</TabItem>
</Tabs>
### Pull the Platform Model {#Pull-the-Platform-Model}
The Platform Model needs to be pulled into the local Git repository after the
form has been submitted. Next, we'll run `holos render` which operates
exclusively on local files.
Holos stores the Platform Model in the `platform.config.json` file. Holos
provides this file as input to CUE when rendering the platform. This file is
intended to be added to version control.
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
Pull the updated Platform Model into the local repository.
```bash
holos pull platform model .
git add platform.config.json
git commit -m "Add platform model"
```
</TabItem>
<TabItem value="unregistered" label="Signed Out">
The holos generate platform k3d command created an initial Platform Model in
`platform.config.json`. As a result there's nothing to do in this step.
```bash
# holos pull platform model .
# git add platform.config.json
# git commit -m "Add platform model"
```
</TabItem>
</Tabs>
## Render the Platform {#Render-the-Platform}
Holos has everything necessary to render the platform once the
`platform.config.json` file and the code from `holos generate` are in the
current directory.
Rendering a platform is the process of iterating over each platform component
and rendering it into plain yaml. Holos does not apply the resulting manifests.
Other tools like kubectl, ArgoCD, or Flux are responsible for applying the
manifests.
```bash
holos render platform ./platform
```
The render command writes the manifest files to the `deploy/` directory. Commit
the files so they can be applied via GitOps later.
```bash
git add deploy
git commit -m "holos render platform ./platform"
```
:::info[Don't blink, this is where Holos builds the platform]
It usually takes no more than a few seconds.
Rendering the holos reference platform currently results in about 500K lines of
yaml. In contrast, roughly 80K lines are produced by this slimmed down k3d
platform.
We mention this because the scale doesn't matter as much as it does with other
tools. Manage millions of lines of configuration with Holos the same way this
guide manages thousands. This is made possible by the unique way CUE unifies
all configuration into one single model.
:::
## Configure DNS {#DNS}
Configure your machine to resolve `*.holos.localhost` to your loopback
interface. This is necessary for requests to reach the workload cluster.
<Tabs>
<TabItem value="macos" label="macOS" default>
Cache sudo credentials.
Admin access is necessary to setup a local dnsmasq instance and configure
macOS's DNS resolver.
```bash
sudo -v
```
Resolve *.holos.localhost DNS queries to 127.0.0.1.
```bash
bash ./scripts/local-dns
```
</TabItem>
<TabItem value="linux" label="Linux">
[NSS-myhostname](http://man7.org/linux/man-pages/man8/nss-myhostname.8.html)
ships with many Linux distributions and should resolve *.localhost
automatically to 127.0.0.1.
Otherwise it is installable with:
```bash
sudo apt install libnss-myhostname
```
</TabItem>
<TabItem value="windows" label="Windows">
Ensure the loopback interface has at least the following names in `C:\windows\system32\drivers\etc\hosts`
```
127.0.0.1 httpbin.holos.localhost app.holos.localhost
```
</TabItem>
</Tabs>
## Create the Cluster {#Create-Cluster}
The Workload Cluster is where your applications and services will be deployed.
In production this is usually an EKS, GKE, or AKS cluster.
:::tip
Holos supports all compliant Kubernetes clusters. Holos was developed and tested
on GKE, EKS, Talos, k3s, and Kubeadm clusters.
:::
<Tabs>
<TabItem value="evaluate" label="Try Holos" default>
Use this command when exploring Holos.
```bash
k3d cluster create workload \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
</TabItem>
<TabItem value="develop" label="Develop Holos">
Use this command when developing Holos.
```bash
k3d registry create registry.holos.localhost --port 5100
```
```bash
k3d cluster create workload \
--registry-use k3d-registry.holos.localhost:5100 \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
</TabItem>
</Tabs>
Traefik is disabled because Istio provides the same functionality.
## Apply the Platform Components {#Apply-Platform-Components}
Use `kubectl` to apply each platform component. In production, it's common to
fully automate this process with ArgoCD, but we use `kubectl` to the same
effect.
### Local CA {#Local-CA}
Holos platforms use cert manager to issue tls certificates. The browser and
tools we're using need to trust these certificates to work together.
Admin access is necessary for `mkcert` to manage the certificate into your trust
stores.
```bash
sudo -v
```
Manage the local CA and copy the CA key to the workload cluster so that cert
manager can manage trusted certificates.
```bash
bash ./scripts/local-ca
```
:::warning
Take care to run the local-ca script each time you create the workload cluster
so that Certificates are issued correctly.
:::
### Service Mesh
The platform service mesh provides an ingress gateway and connectivity useful
for observability, reliability, and security.
#### Namespaces
With Holos, components are automatically added to the namespaces component,
useful for centrally managed policies.
```bash
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/namespaces
```
#### Custom Resource Definitions
```bash
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/gateway-api
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/istio-base
```
#### Cert Manager {#cert-manager}
Apply the cert-manager controller.
```bash
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/cert-manager
```
Apply the ClusterIssuer which issues Certificate resources using the local
certificate authority.
```bash
kubectl -n cert-manager wait pod -l app.kubernetes.io/component=webhook --for=condition=Ready
kubectl apply --server-side=true -f deploy/clusters/workload/components/local-ca
kubectl apply --server-side=true -f deploy/clusters/workload/components/certificates
kubectl -n istio-gateways wait certificate httpbin.holos.localhost --for=condition=Ready
```
:::warning
The certificate will time out before becoming ready if the [local-ca](#Local-CA)
script was not run after the cluster was created.
:::
#### Istio {#Istio}
Istio implements the Service Mesh.
```bash
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/istio-cni
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/istiod
kubectl apply --server-side=true -f ./deploy/clusters/workload/components/gateway
```
Verify the Gateway is programmed and the listeners have been accepted:
```bash
kubectl -n istio-gateways wait gateway default --for=condition=Accepted
```
#### httpbin {#httpbin}
httpbin is a simple backend service useful for end-to-end testing.
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/httpbin-backend
kubectl apply --server-side=true -f deploy/clusters/workload/components/httpbin-routes
kubectl -n holos-system wait pod -l app.kubernetes.io/instance=httpbin --for=condition=Ready
```
:::info
Browse to [https://httpbin.holos.localhost/](https://httpbin.holos.localhost/)
to verify end to end connectivity. You should see the httpbin index page.
:::
### Authenticating Proxy
The auth proxy is responsible for authenticating browser requests, handling the
oidc authentication flow, and providing a signed id token to the rest of the
services in the mesh.
#### Cookie Secret
The auth proxy stores session information in an encrypted cookie. Generate a
random cookie encryption Secret and apply.
```bash
LC_ALL=C tr -dc A-Za-z0-9 </dev/urandom \
| head -c 32 \
| kubectl create secret generic "authproxy" \
--from-file=cookiesecret=/dev/stdin \
--dry-run=client -o yaml \
| kubectl apply -n istio-gateways -f-
```
#### Deployment
The auth proxy Deployment receives requests from web browsers and responds with
an authentication decision.
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/authproxy
kubectl apply --server-side=true -f deploy/clusters/workload/components/authroutes
```
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
<Admonition type="info">
Verify authentication is working by browsing to
[https://httpbin.holos.localhost/holos/authproxy](https://httpbin.holos.localhost/holos/authproxy).
We want a simple `Authenticated` response.
<Admonition type="tip">
You may need to refresh the page a few times while the platform configures
itself.
</Admonition>
</Admonition>
Istio will respond with `no healthy upstream` until the pod becomes ready.
Wait for the pod to become ready with:
```bash
kubectl -n holos-system wait pod -l app.kubernetes.io/instance=httpbin --for=condition=Ready
```
Once authenticated, visit
[https://httpbin.holos.localhost/holos/authproxy/userinfo](https://httpbin.holos.localhost/holos/authproxy/userinfo)
which returns a subset of claims from your id token.
<Admonition type="warning">
If you get `Unauthorized` instead of a json response body, make sure you
[authenticated](https://httpbin.holos.localhost/holos/authproxy) first.
</Admonition>
```json
{
"user": "275552236589843464",
"email": "demo@holos.run",
"preferredUsername": "demo"
}
```
</TabItem>
<TabItem value="unregistered" label="Signed Out">
The auth proxy will always try to sign you in when you are signed out, so
there isn't much to do here. Please do take a moment to glance at the
Signed In tab to see how this would work if you were signed in.
The `k3d` platform relies on `https://login.holos.run` to issue id tokens.
Authorization has been configured against fake request headers instead of
the real `x-oidc-id-token` header.
</TabItem>
</Tabs>
### Authorization Policy
Configure authorization policies using attributes of the authenticated request.
Authorization policies route web requests through the auth proxy and then
validate all requests against the `x-oidc-id-token` header.
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/authpolicy
```
Istio make take a few seconds to program the Gateway with the
AuthorizationPolicy resources.
## Try out Zero Trust
A basic Zero Trust security model is now in place. The platform authenticates
and authorizes requests before they reach the backend service.
### Browser
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
The platform has been configured to authorize requests with a `x-oidc-id-token` header.
1. Verify authentication is working by browsing to [https://httpbin.holos.localhost/dump/request](https://httpbin.holos.localhost/dump/request).
- Refresh the page a few times.
- The `httpbin` backend pods should echo back the `x-oidc-id-token`
header injected by the auth proxy.
2. Note the `x-oidc-id-token` header is not sent by your browser but is
received by the backend service.
- This design reduces the risk of exposing id tokens in the browser.
- Browser request size remains constant as more claims are added to id
tokens.
- Reliability improves because id tokens often overflow request header
buffers when they pass through middle boxes across the internet.
</TabItem>
<TabItem value="unregistered" label="Signed Out">
The platform has been configured to authorize requests with a `User-Agent: anonymous` header.
1. Open an incognito window (Cmd+Shift+N) to verify the platform is
enforcing the authorization policy.
2. Browse to
[https://httpbin.holos.localhost/dump/request](https://httpbin.holos.localhost/dump/request)
you should be redirected to the sign in page by the auth proxy.
- You **do not** need to register or sign in.
- This step verifies the platform is redirecting unauthenticated
requests to the identity provider.
- Navigate back or close and re-open an incognito window.
3. Set your `User-Agent` header to `anonymous` using your browser developer tools.
- For Chrome the process is described
[here](https://developer.chrome.com/docs/devtools/device-mode/override-user-agent#override_the_user_agent_string).
- The purpose is to simulate an authenticated request.
4. Browse to
[https://httpbin.holos.localhost/dump/request](https://httpbin.holos.localhost/dump/request).
- The platform should allow the request through to the backend pod.
- `httpbin` should echo back your request which should contain `User-Agent: anonymous`.
</TabItem>
</Tabs>
### Command Line
Verify unauthenticated requests are blocked by default outside the browser.
```bash
curl -I https://httpbin.holos.localhost/dump/request
```
You should receive a `HTTP/2 302` response that redirects to `location:
https://login.holos.run` to start the oauth login flow.
Next, verify authenticated requests are allowed.
<Tabs groupId="registration">
<TabItem value="registered" label="Signed In">
The platform is configured to authenticate the id token present in the
`x-oidc-id-token` header.
💡 It also works with `grpcurl`.
```bash
curl -H x-oidc-id-token:$(holos token) https://httpbin.holos.localhost/dump/request
```
</TabItem>
<TabItem value="unregistered" label="Signed Out">
The platform is configured to authorize any request with `User-Agent:
anonymous` in place of validating the oidc id token.
💡 Take a moment to click the Signed In tab, I don't want you to miss how
cool `$(holos token)` is.
```bash
curl -A anonymous https://httpbin.holos.localhost/dump/request
```
</TabItem>
</Tabs>
You should receive a response showing the request headers the backend received.
:::tip
Note how the platform secures both web browser and command line api access to
the backend httpbin service. httpbin itself has no authentication or
authorization functionality.
:::
## Summary
Thank you for taking the time to try out Holos. In this guide, you built the
foundation of a software development platform that:
1. Provides a unified configuration model with CUE that
- Supports unmodified Helm Charts, Kustomize Kustomizations, plain YAML.
- Provides a web form to pass top level parameters.
2. Reduces errors by eliminating the need to template unstructured text.
3. Is composable and scales down to a local machine.
4. Provides an way to safely configure broad authentication and authorization
policy.
## Next Steps
Dive deeper with the following resources that build on the foundation you have now.
1. Explore the [Rendering Process](/docs/concepts#rendering) in Holos.
2. Dive deeper into the [Platform Manifests](./platform-manifests) rendered in this guide.
3. Deploy [ArgoCD](../argocd) onto the foundation you built.
4. Deploy [Backstage](../backstage) as a portal to the integrated platform components.
## Clean-Up
If you'd like to clean up the resources you created in this guide, remove them
with:
```bash
k3d cluster delete workload
rm -rf holos-k3d
```

View File

@@ -1,137 +0,0 @@
# Platform Manifests
This document provides an example of how Holos uses CUE and Helm to unify and
render the platform configuration. It refers to the manifests rendered in the
Try Holos Locally guide.
Take a moment to review the manifests `holos` rendered to build the platform.
### ArgoCD Application
Note the Git URL in the Platform Model is used to derive the ArgoCD
`Application` resource for all of the platform components.
```yaml
# deploy/clusters/workload/gitops/namespaces.application.gen.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: namespaces
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
# highlight-next-line
path: /deploy/clusters/workload/components/namespaces
# highlight-next-line
repoURL: https://github.com/holos-run/holos-k3d.git
# highlight-next-line
targetRevision: HEAD
```
One ArgoCD `Application` resource is produced for each Holos component by
default. The CUE definition which produces the rendered output is defined in
`buildplan.cue` around line 222.
:::tip
Note how CUE does not use error-prone text templates, the language is well
specified and typed which reduces errors when unifying the configuration with
the Platform Model in the following `#Argo` definition.
:::
```cue
// buildplan.cue
// #Argo represents an argocd Application resource for each component, written
// using the #HolosComponent.deployFiles field.
#Argo: {
ComponentName: string
Application: app.#Application & {
metadata: name: ComponentName
metadata: namespace: "argocd"
spec: {
destination: server: "https://kubernetes.default.svc"
project: "default"
source: {
// highlight-next-line
path: "\(_Platform.Model.argocd.deployRoot)/deploy/clusters/\(_ClusterName)/components/\(ComponentName)"
// highlight-next-line
repoURL: _Platform.Model.argocd.repoURL
// highlight-next-line
targetRevision: _Platform.Model.argocd.targetRevision
}
}
}
// deployFiles represents the output files to write along side the component.
deployFiles: "clusters/\(_ClusterName)/gitops/\(ComponentName).application.gen.yaml": yaml.Marshal(Application)
}
```
### Helm Chart
The `cert-manger` component renders using the upstream Helm chart. The build
plan that defines the helm chart to use along with the values to provide looks
like the following.
:::tip
Holos fully supports your existing Helm charts. Consider leveraging `holos` as
an alternative to umbrella charts.
:::
```cue
// components/cert-manager/cert-manager.cue
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "cert-manager"
Version: "1.14.5"
Namespace: "cert-manager"
Repo: name: "jetstack"
Repo: url: "https://charts.jetstack.io"
// highlight-next-line
Values: {
installCRDs: true
startupapicheck: enabled: false
// Must not use kube-system on gke autopilot. GKE Warden blocks access.
// highlight-next-line
global: leaderElection: namespace: Namespace
// https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-resource-requests#min-max-requests
resources: requests: {
cpu: "250m"
memory: "512Mi"
"ephemeral-storage": "100Mi"
}
// highlight-next-line
webhook: resources: Values.resources
// highlight-next-line
cainjector: resources: Values.resources
// highlight-next-line
startupapicheck: resource: Values.resources
// https://cloud.google.com/kubernetes-engine/docs/how-to/autopilot-spot-pods
nodeSelector: {
"kubernetes.io/os": "linux"
if _ClusterName == "management" {
"cloud.google.com/gke-spot": "true"
}
}
webhook: nodeSelector: Values.nodeSelector
cainjector: nodeSelector: Values.nodeSelector
startupapicheck: nodeSelector: Values.nodeSelector
}
}
```

View File

@@ -1,52 +0,0 @@
---
description: Introduction
---
# Introduction
Welcome to Holos. Holos is an open source tool to manage software development
platforms safely, easily, and consistently. We built Holos to help engineering
teams work more efficiently together by empowering them to build golden paths
and paved roads for other teams to leverage for quicker delivery.
## Documentation
- [Guides] are organized into example use-cases of how Holos helps engineering
teams at the fictional Bank of Holos deliver business value on the bank's
platform.
- The [API Reference] is a technical reference for when you're writing CUE code to define your platform.
## Backstory
At [Open Infrastructure Services], we've each helped dozens of companies build and operate their software development platforms. During the U.S. presidential election just before the pandemic, our second-largest client, Twitter, experienced a global outage that lasted nearly a full day. We were managing their production configuration system, allowing the core infrastructure team to focus on business-critical objectives. This gave us a front-row seat to the incident.
A close friend and engineer on the team made a trivial one-line change to the firewall configuration. Less than 30 minutes later, everything was down. That change, which passed code review, caused the host firewall to revert to its default state on hundreds of thousands of servers, blocking all connections globally—except for SSH, thankfully. Even a Presidential candidate complained loudly.
This incident forced us to reconsider key issues with Twitter's platform:
1. **Lack of Visibility** - Engineers couldn't foresee the impact of even a small change, making it difficult to assess risks.
2. **Large Blast Radius** - Small changes affected the entire global fleet in under 30 minutes. There was no way to limit the impact of a single change.
3. **Incomplete Tooling** - The right processes were in place, but the tooling didn't fully support them. The change was tested and reviewed, but critical information wasn't surfaced in time.
Over the next few years, we built features to address these issues. Meanwhile, I began exploring how these solutions could work in the Kubernetes and cloud-native space.
As Google Cloud partners, we worked with large customers to understand how they built their platforms on Kubernetes. During the pandemic, we built a platform using CNCF projects like ArgoCD, Prometheus Stack, Istio, Cert Manager, and External Secrets Operator, integrating them into a cohesive platform. We started with upstream recommendations—primarily Helm charts—and wrote scripts to integrate each piece into the platform. For example, we passed Helm outputs to Kustomize to add labels or fix bugs, and wrote umbrella charts to add Ingress, HTTPRoute, and ExternalSecret resources.
These scripts served as necessary glue to hold everything together but became difficult to manage across multiple environments, regions, and cloud providers. YAML templates and nested loops created friction, making them hard to troubleshoot. The scripts themselves made it difficult to see what was happening and to fix issues affecting the entire platform.
Still, the scripts had a key advantage: they produced fully rendered manifests in plain text, committed to version control, and applied via ArgoCD. This clarity made troubleshooting easier and reduced errors in production.
Despite the makeshift nature of the scripts, I kept thinking about the "[Why are we templating YAML]?" post on Hacker News. I wanted to replace our scripts and charts with something more robust and easier to maintain—something that addressed Twitter's issues head-on.
I rewrote our scripts and charts using CUE and Go, replacing the glue layer. The result is **Holos**—a tool designed to complement Helm, Kustomize, and Jsonnet, making it easier and safer to define golden paths and paved roads without bespoke scripts or templates.
Thanks for reading. Take Holos for a spin on your local machine with our [Quickstart] guide.
[Guides]: /docs/guides/
[API Reference]: /docs/api/
[Quickstart]: /docs/quickstart/
[CUE]: https://cuelang.org/
[Author API]: /docs/api/author/
[Core API]: /docs/api/core/
[Open Infrastructure Services]: https://openinfrastructure.co/
[Why are we templating YAML]: https://hn.algolia.com/?dateRange=all&page=0&prefix=false&query=https%3A%2F%2Fleebriggs.co.uk%2Fblog%2F2019%2F02%2F07%2Fwhy-are-we-templating-yaml&sort=byDate&type=story

View File

@@ -1,3 +0,0 @@
# Architecture
Coming soon.

View File

@@ -1,31 +0,0 @@
# Deployment
This document describes how deployment from `main` is configured.
1. Refer to the publish workflow.
2. Uses a SSH deploy key to:
3. Clone the holos-infra repo.
4. Write the image tag to saas/userdata/components/dev-holos-app/images.json
5. Run holos render platform ./platform
6. Commit and push the results.
7. ArgoCD takes over the rollout.
## Credentials
TODO: Lock this down more, the deploy key has too much access to the infra
repository.
```bash
mkdir -p tmp
cd tmp
ssh-keygen -t ed25519 -f holos-infra.key -m pem -C holos-run/holos -N ''
gh secret set DEPLOY_SSH_PRIVATE_KEY < holos-infra.key
gh api --method POST \
-H "Accept: application/vnd.github+json" \
/repos/holos-run/holos-infra/keys \
-f title='holos-run/holos deploy key' \
-f key="$(cat holos-infra.key.pub)" \
-F read_only=false
cd ..
rm -rf tmp
```

View File

@@ -1,10 +0,0 @@
# ZITADEL Backups
Refer to [Schedule backups](https://cloudnative-pg.io/documentation/1.23/backup/#scheduled-backups)
By default ZITADEL is backed up daily to S3. When restoring into a new cluster
of the same name, increment the revision variable to create a new blank folder
for the new cluster WAL. The cluster will not initialize unless the WAL
directory is empty.
The cluster will recovery from the previous rev.

View File

@@ -1,3 +0,0 @@
## Overview
TODO: This runbook needs to be updated to reflect the switch from PGO to CNPG.

View File

@@ -1,3 +0,0 @@
# Postgres Full Backup
Refer to [On-demand backups](https://cloudnative-pg.io/documentation/1.23/backup/#on-demand-backups)

View File

@@ -1,13 +0,0 @@
import DocCardList from '@theme/DocCardList';
# Get Started
## Start with the [Quickstart] guide
---
These documents provide additional context to supplement the [Quickstart] guide.
<DocCardList />
[Quickstart]: /docs/quickstart/

View File

@@ -1,15 +0,0 @@
---
description: Compare Holos with other tools in the ecosystem.
slug: /comparison
sidebar_position: 300
---
# Comparison
## Helm
## Kustomize
## ArgoCD
## Flux

View File

@@ -1,370 +0,0 @@
---
description: Learn the concepts and domain language Holos uses.
slug: /concepts
sidebar_position: 200
---
# Concepts
## Introduction
This page is intended as a high level conceptual overview of the key concepts in
Holos. Refer to the [Core API](/docs/api/core/) for low level reference
documentation.
Holos is a tool built for platform engineers. The Holos authors share three
core values which guide our design decisions for the tool.
1. Safety
2. Ease of use
3. Consistency
Each of the following concepts are intended to support and strengthen one or
more of these core values. In this way we hope to lighten the burden carried by
platform engineers.
## Concepts
- [Component](<#component>) - The primary building block in Holos, wraps a Helm chart, Kustomize base, or plain resources defined in CUE.
- [Platform](<#platform>) - A collection of Components integrated into a software development platform.
- [Model](<#model>) - Structured data included in the Platform specification, available to all Components. For example, your organization's domain name.
- [Rendering](<#rendering>) - Holos is a tool that makes the process of rendering Kubernetes manifests safer, easier, and consistent.
- [Cluster](<#cluster>) - A Kubernetes cluster. Components are rendered for and applied to a Cluster.
- [Fleet](<#fleet>) - A collection of Clusters with a similar purpose. A Platform is typically composed of two Fleets, one for management the second for workloads.
```mermaid
graph TB
Platform[<a href="#platform">Platform</a>]
Cluster[<a href="#cluster">Cluster</a>]
Fleet[<a href="#fleet">Fleet</a>]
Component[<a href="#component">Component</a>]
Helm[<a href="#component">Helm</a>]
Kustomize[<a href="#component">Kustomize</a>]
CUE[<a href="#component">CUE</a>]
Fleet --> Platform
Cluster --> Fleet
Component --> Cluster
Helm --> Component
Kustomize --> Component
CUE --> Component
```
:::tip
This graph is organized as a tree. We often say configuration at the root
defines the broad Platform. Configuration at a leaf defines a Component of the
Platform. The concept of a tree also reflects the filesystem organization of
the configuration.
:::
<!--
```mermaid
---
title: Figure 1 - Holos Concepts
---
mindmap
root((Holos))
Platform
Components
HelmChart
KustomizeBuild
KubernetesObjects
Model
name: Example Org
domain: example.com
Renders
YAML Files
Kubernetes Manifests
ArgoCD Application
FluxCD Kustomization
```
-->
## Component
A Component is the primary building block when managing software with Holos. A
software project you wish to integrate into your platform, for example ArgoCD,
is managed using one or more components.
The primary Component kinds are:
1. **HelmChart** to render config provided by Helm.
2. **KustomizeBuild** to render config provided by Kustomize.
3. **KubernetesObjects** to render config provided by CUE.
Components are intended to integrate unmodified upstream software releases into
your Platform. In this way, the focus of a Component is more about the unique
differentiating aspects of your platform than the upstream software contained in
the Component.
#### Example HelmChart Component
The ArgoCD Component is a good example of a HelmChart component because it takes
advantage of most of the key features that empower you to focus on the key
differentiators of your unique platform.
Take note of the following key points in this example ArgoCD Component:
1. The Component wraps the ArgoCD Helm Chart in a way that's easy to upgrade and maintain over time.
2. Newer Gateway API resources are mixed-in replacing the older Ingress resource included in the chart.
3. Helm output is passed through Kustomize to configure secure mutual TLS encryption.
4. Helm values are easier and safer to manipulate with CUE instead of text markup.
5. Kustomize is easier and safer to manipulate with CUE instead of text markup.
6. Platform data Model values are easily accessible, for example the OIDC issuer and the organizations's domain name.
The Component wraps around the unmodified upstream ArgoCD helm chart
providing easier upgrades as new versions are released.
Note how the Component facilitates composition by allowing us to mix-in new
functionality from the ecosystem without modifying the upstream chart. The
Platform this Component integrates with uses the new Gateway API, but the
upstream helm chart does not yet support Gateway API. See how the Resources
field is used to mix-in a ReferenceGrant from the Gateway API without modifying
the upstream helm chart.
The Platform uses Istio to implement service to service encryption with mutual
TLS. The Component passes the Helm output to Kustomize to integrate with Istio.
Kustomize is used to patch the argocd-server Deployment resource to inject the
Istio sidecar for mutual TLS.
Helm values are safer and easier to work with in CUE. Note how you can modify
helm values using well defined data instead of manipulating text yaml files.
Similarly, the yaml files used for Kustomize are produced by CUE, which is again
safer and easier because the Kustomize spec has been imported into CUE and is
validated.
Finally, the domain name used by this Platform is easily accessible from the
PlatformSpec which is defined at the root level and made available to all
components integrated into the platform. Similarly, data values shared by all
of the Components that make up ArgoCD is defined in a structure accessible by
each of these components.
```cue
package holos
import (
"encoding/yaml"
"strings"
)
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "argo-cd"
Namespace: "argocd"
Version: "7.1.1"
Chart: chart: release: "argocd"
// The upstream chart uses a Job to create the argocd-redis Secret. Enable
// hooks to enable the Job.
Chart: enableHooks: true
Repo: name: "argocd"
Repo: url: "https://argoproj.github.io/argo-helm"
// Ensure all of our mix-in resources go into the same namespace as the Chart.
Resources: [_]: [_]: metadata: namespace: Namespace
// Grant the Gateway namespace the ability to refer to the backend service
// from HTTPRoute resources.
Resources: ReferenceGrant: (#IstioGatewaysNamespace): #ReferenceGrant
// Pass the helm output through kustomize.
EnableKustomizePostProcessor: true
// Force all resources into the component namespace, some resources in the
// helm chart do not specify the namespace so they will get mis-applied
// when the kubectl (client-go) context is another namespace.
KustomizeFiles: "kustomization.yaml": namespace: Namespace
// Patch the backend with the service mesh sidecar.
KustomizePatches: {
mesh: {
target: {
group: "apps"
version: "v1"
kind: "Deployment"
name: "argocd-server"
}
patch: yaml.Marshal(IstioInject)
}
}
Values: #Values & {
kubeVersionOverride: "1.29.0"
// handled in the argo-crds component
crds: install: false
global: domain: "argocd.\(_Platform.Model.org.domain)"
dex: enabled: false
// the service mesh handles secure mTLS
configs: params: "server.insecure": true
configs: cm: {
"admin.enabled": false
"oidc.config": yaml.Marshal(OIDCConfig)
"users.anonymous.enabled": "false"
}
// Refer to https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/
let Policy = [
"g, argocd-view, role:readonly",
"g, prod-cluster-view, role:readonly",
"g, prod-cluster-edit, role:readonly",
"g, prod-cluster-admin, role:admin",
]
configs: rbac: "policy.csv": strings.Join(Policy, "\n")
}
}
let IstioInject = [{
op: "add",
path: "/spec/template/metadata/labels/sidecar.istio.io~1inject",
value: "true",
}]
let OIDCConfig = {
name: "Holos Platform"
issuer: _ArgoCD.issuerURL
clientID: _ArgoCD.clientID
requestedScopes: _ArgoCD.scopesList
// Set redirect uri to https://argocd.example.com/pkce/verify
enablePKCEAuthentication: true
// groups is essential for rbac
requestedIDTokenClaims: groups: essential: true
}
```
## Platform
A Platform refers to all of the software and services integrated together to
provide your organization's software development platform. Holos is designed to
manage all of the resources that compose your Platform using the [Kubernetes
Resource Model][krm] (KRM). Nearly all platforms are larger than Kubernetes
itself. For example, your developers likely need a GCS or S3 bucket to store
data. Holos takes advantage of Crossplane to manage resources in a consistent
way.
Holos defines a [Platform][Platform] object which collects multiple Components
together along with organizational data defined by your Model. Consider the
following example, which is a Platform that manages a single Component which
manages namespaces for each cluster in the Platform.
```cue
package holos
import v1 "github.com/holos-run/holos/api/v1alpha2"
v1.#Platform & {
metadata: name: "example"
spec: components: [{
path: "components/namespaces"
cluster: "cluster1"
}]
}
```
This platform is rendered by the command:
```bash
holos render platform ./platform
```
When Holos renders the platform, it iterates over each component, generates and
executes a [BuildPlan][BuildPlan], then writes the fully rendered output of the
component to the filesystem. In this simple example, two files are produced:
1. `deploy/clusters/cluster1/components/namespaces/namespaces.gen.yaml`
2. `deploy/clusters/cluster1/gitops/namespaces.application.gen.yaml`
The first file is a plain kubernetes manifest containing Namespace resources.
The second file is an ArgoCD Application resource to deploy and manage the
resources defined in the first file.
## Model
The Platform Model is where you store top-level data values used throughout
multiple components in your Platform. Your organization's domain name is a
prime example of the kind of data stored in the Model. Many components derive
host names from your organization's domain name. CUE makes this process safe,
easy, and consistent. For example:
```cue
hostname: "argocd.\(_Platform.Model.org.domain)"
```
When Holos renders a Platform, the model is loaded from a JSON file in the local
filesystem. The platform model file is intended to be committed to version
control along with the rest of the Holos Platform and Component code.
Holos additionally provides a web ui and form to make it easy to enter and
validate top level configuration data. You have complete control over the web
form, it's rendered from JSON data defined by CUE. Customizing the web form is
an advanced topic, the key concept to take away is the Model is for top level,
platform-wide data. You control the shape and structure of the Model, and you
have the ability to collect Model values using a simple web form.
## Rendering
Holos uses the Kubernetes resource model to manage configuration. The Holos
command line interface is the primary method you'll use to manage your platform.
Holos uses CUE to provide a unified configuration model of the platform. This
unified configuration is built up from components packaged with Helm, Kustomize,
CUE, or any other tool that can produce Kubernetes resource manifests as output.
This process can be thought of as a data **rendering pipeline**. The key
concept is that Holos will always produce fully rendered output, but delegates
the _application_ of the configuration to other tools like kubectl apply,
ArgoCD, or Flux.
```mermaid
---
title: Figure 1 - Render Pipeline
---
graph LR
PS[<a href="/docs/api/author/v1alpha3/#Platform">Platform</a>]
HC[<a href="/docs/api/author/v1alpha3/#ComponentFields">Components</a>]
BP[<a href="/docs/api/core/v1alpha3#BuildPlan">BuildPlan</a>]
H[<a href="/docs/api/author/v1alpha3/#Helm">Helm</a>]
K[<a href="/docs/api/author/v1alpha3/#Kustomize">Kustomize</a>]
O[<a href="/docs/api/author/v1alpha3/#Kubernetes">Kubernetes</a>]
P[<a href="/docs/api/core/v1alpha3#Kustomize">Kustomize</a>]
Y[Kubernetes <br/>Resources]
G[GitOps <br/>Resource]
FS[Local Files]
C[Kube API Server]
PS --> HC --> BP
BP --> H --> P
BP --> K --> P
BP --> O --> P
P --> Y --> FS
P --> G --> FS
FS --> ArgoCD --> C
FS --> Flux --> C
FS --> kubectl --> C
```
## Cluster
A Cluster represents a Kubernetes cluster. One component may be reused across
multiple different Clusters.
## Fleet
A Fleet represents a group of Clusters that share a similar purpose. A Platform
typically has two Fleets, one for management and one for workloads.
[krm]: https://docs.google.com/document/d/1RmHXdLhNbyOWPW_AtnnowaRfGejw-qlKQIuLKQWlwzs/view#heading=h.sa6p0aye4ide
[Platform]: /docs/api/core/v1alpha2/#Platform
[BuildPlan]: /docs/api/core/v1alpha2/#BuildPlan

View File

@@ -1,26 +0,0 @@
---
description: Install the Holos executable.
slug: /install
sidebar_position: 100
---
# Installation
Holos is distributed as a single file executable.
## Releases
Download `holos` from the [releases](https://github.com/holos-run/holos/releases) page and place the executable into your shell path.
## Go install
Alternatively, install directly into your go bin path using:
```shell
go install github.com/holos-run/holos/cmd/holos@latest
```
### What you'll need
- [helm](https://github.com/helm/helm/releases) to fetch and render Helm chart components.
- [kubectl](https://kubernetes.io/docs/tasks/tools/) to [kustomize](https://kustomize.io/) components.

View File

@@ -1,23 +0,0 @@
---
description: Get Support for Holos
slug: /support
sidebar_position: 900
---
# Support
## Community Support
You can ask questions in our community forums in [GitHub Discussions](https://github.com/holos-run/holos/discussions), [Discord](https://discord.gg/JgDVbNpye7), or [Google Groups](https://groups.google.com/g/holos-discuss).
## Commercial Support and Services
### Open Infrastructure Services
[Open Infrastructure Services] are the primary stewards of Holos. Contact Open
Infrastructure Services for training, support, and services related to Holos,
platform engineering, and cloud infrastructure automation.
Please email holos-support@openinfrastructure.co for more information.
[Open Infrastructure Services]: https://openinfrastructure.co/

View File

@@ -1,690 +0,0 @@
---
slug: technical-overview
title: Technical Overview
description: Learn how Holos makes it easier for platform teams to integrate software into their platform.
---
<head>
<meta property="og:title" content="Technical Overview | Holos" />
<meta property="og:image" content="/img/cards/technical-overview.png" />
</head>
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
## Overview
Holos makes it easier for platform teams to integrate software into their
platform. Existing tools in the Kubernetes ecosystem are narrowly focused on
application management. Holos takes a holistic approach, focusing on the broad
integration layer where applications are joined into the platform. Holos
improves cross team collaboration through well defined, typed structures at the
integration layer. These definitions provide golden paths for other teams to
easily integrate their own services into the platform.
<!-- truncate -->
## The Problem
Platform teams need to develop and maintain significant glue code to integrate
Helm charts and YAML manifests into a platform built on Kubernetes. This glue
code is often implemented with home grown umbrella charts and scripts.
Maintaining these charts and scripts takes time and effort that could otherwise
be spent improving the platform. The need for each organization to develop and
maintain this glue code indicates a gap in the Kubernetes ecosystem. Holos is a
Go command line tool leveraging [CUE] to fill this gap.
## Key Features
1. Holos enables teams to provide simple definitions for other teams to use as golden paths.
2. Define integrations in [CUE] with strong type checking. No more text templates or bash scripts.
3. Simplify complex integration. Order does not matter. Validation is early and quick.
4. Reuse your existing Helm charts and Kustomize bases.
5. Implement the [rendered manifests pattern]. Changes are clearly visible platform-wide.
6. Fully render manifests to plain files. Use your existing GitOps tools and processes.
7. Post-process with Kustomize from CUE instead of plain text files. Customize your Kustomizations.
8. Mix in resources to Helm charts and Kustomize bases, for example ExternalSecrets.
9. Render all of Helm, Kustomize, CUE, JSON, and YAML consistently with the same process.
## Rendering Pipeline
```mermaid
---
title: Figure 1 - v1alpha4 Rendered Manifest Pipeline
---
graph LR
Platform[<a href="/docs/api/author/v1alpha4/#Platform">Platform</a>]
Component[<a href="/docs/api/author/v1alpha4/#ComponentConfig">Components</a>]
Helm[<a href="/docs/api/author/v1alpha4/#Helm">Helm</a>]
Kustomize[<a href="/docs/api/author/v1alpha4/#Kustomize">Kustomize</a>]
Kubernetes[<a href="/docs/api/author/v1alpha4/#Kubernetes">Kubernetes</a>]
BuildPlan[<a href="/docs/api/core/v1alpha4/#buildplan">BuildPlan</a>]
ResourcesArtifact[<a href="/docs/api/core/v1alpha4/#artifact">Resources<br/>Artifact</a>]
GitOpsArtifact[<a href="/docs/api/core/v1alpha4/#artifact">GitOps<br/>Artifact</a>]
Generators[<a href="/docs/api/core/v1alpha4/#generators">Generators</a>]
Transformers[<a href="/docs/api/core/v1alpha4/#transformer">Transformers</a>]
Files[Manifest<br/>Files]
Platform --> Component
Component --> Helm --> BuildPlan
Component --> Kubernetes --> BuildPlan
Component --> Kustomize --> BuildPlan
BuildPlan --> ResourcesArtifact --> Generators
BuildPlan --> GitOpsArtifact --> Generators
Generators --> Transformers --> Files
```
## Use Case
One of the development teams at the fictional Bank of Holos wants to deploy a
simple web app for an experimental project they're working on.
The platform team at the bank wants to build a simple golden path for teams to
provision projects consistently and easily in compliance with the bank's
policies.
### Platform Team
The platform team builds a golden path for development teams to register their
project with the platform. In compliance with bank policy, the platform team
needs to manage important security resources for each new project. All of these
resources can be derived from only 3 pieces of information.
1. The name of the project the dev team is working on.
2. The name of the team who currently owns the project.
3. The services, if any, the project is exposing.
The platform team defines a structure for the dev team to register this
information. This structure provides the golden path for the dev team.
The development team registers their experimental project, creatively named
"experiment" by submitting a pull request that contains this information.
<Tabs groupId="EB9C9AF1-F1AA-4189-B746-A5B8E3043F87">
<TabItem value="projects/experiment.cue" label="projects/experiment.cue">
```cue showLineNumbers
package holos
// The development team registers a project name.
_Projects: experiment: {
// The project owner must be named.
Owner: Name: "dev-team"
// Expose Service podinfo at https://podinfo.example.com
Hostnames: podinfo: Port: 9898
}
```
</TabItem>
</Tabs>
The platform team uses these three pieces of information to derive all of the
platform resources necessary to support the development team.
1. **Namespace** for the project resources.
2. **RoleBinding** to grant the dev team access to the project namespace.
3. **SecretStore** which implements the secret management policy for the bank.
4. **ReferenceGrant** to expose the project services through the Gateway API.
5. **HTTPRoutes** to expose the project services, if any.
6. **AppProject** to deploy and manage the project Applications with ArgoCD.
7. **Common Labels** to ensure every resource is labeled for resource accounting.
Rendering the platform generates fully rendered manifests for all of these
resources. These manifests are derived from the three pieces of information the
dev team provided.
Note the platform team must manage these resources across multiple namespaces.
The first four reside in the project namespace owned by the dev team. The
HTTPRoute and AppProject go into two namespaces managed by the platform team.
Holos makes it easier for the platform team to organize these resources into
different components with different owners.
:::important
Holos supports [CODEOWNERS] by clearly defining the teams responsible for each
platform component.
:::
<Tabs groupId="2E46EA1C-B118-44BF-AE20-752E8D1CE131">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered httproutes for cluster overview in 177.823625ms
rendered app-projects for cluster overview in 180.946834ms
rendered projects for cluster overview in 181.98725ms
rendered namespaces for cluster overview in 182.30725ms
rendered platform in 182.31075ms
```
:::tip
If you'd like to try this for yourself, `cd` into [examples/tech-overview] and
render the platform.
:::
</TabItem>
</Tabs>
The fully rendered manifests are written into the `deploy/` directory organized
by cluster and component for GitOps.
<Tabs groupId="07FBE14E-E9EA-437B-9FA1-C6D8806524AD">
<TabItem value="deploy/clusters/local/components/namespaces/namespaces.gen.yaml" label="namespaces">
```
cat deploy/clusters/local/components/namespaces/namespaces.gen.yaml
```
```yaml showLineNumbers
apiVersion: v1
kind: Namespace
metadata:
labels:
argocd.argoproj.io/instance: namespaces
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: namespaces
kubernetes.io/metadata.name: experiment
name: experiment
```
</TabItem>
<TabItem value="deploy/clusters/local/components/projects/projects.gen.yaml" label="projects">
```
cat deploy/clusters/local/components/projects/projects.gen.yaml
```
```yaml showLineNumbers
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
argocd.argoproj.io/instance: projects
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: projects
name: admin
namespace: experiment
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: oidc:sg-dev-team@example.com
---
apiVersion: external-secrets.io/v1beta1
kind: SecretStore
metadata:
labels:
argocd.argoproj.io/instance: projects
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: projects
name: default
namespace: experiment
spec:
provider:
kubernetes:
auth:
token:
bearerToken:
key: token
name: eso-reader
remoteNamespace: experiment
server:
caBundle: LS0tLS1CRUd...QVRFLS0tLS0K
url: https://management.example.com:6443
---
apiVersion: gateway.networking.k8s.io/v1beta1
kind: ReferenceGrant
metadata:
labels:
argocd.argoproj.io/instance: projects
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: projects
name: istio-ingress
namespace: experiment
spec:
from:
- group: gateway.networking.k8s.io
kind: HTTPRoute
namespace: istio-ingress
to:
- group: ""
kind: Service
```
</TabItem>
<TabItem value="deploy/clusters/local/components/httproutes/httproutes.gen.yaml" label="httproutes">
```
cat deploy/clusters/local/components/httproutes/httproutes.gen.yaml
```
```yaml showLineNumbers
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
labels:
argocd.argoproj.io/instance: httproutes
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: httproutes
name: podinfo.example.com
namespace: istio-ingress
spec:
hostnames:
- podinfo.example.com
parentRefs:
- name: default
namespace: istio-ingress
rules:
- backendRefs:
- name: podinfo
namespace: experiment
port: 9898
matches:
- path:
type: PathPrefix
value: /
```
</TabItem>
</Tabs>
The rendered manifests are derived from the project registration information by
definitions implemented by the platform team. The [Author API] provides a
[Project] schema, but does not define an implementation. The platform team
implements the [Project] schema by adding a `_Projects` struct to manage
resources according to bank policies.
:::important
The Author API is intended as a convenient, ergonomic reference for component
authors. Definitions **are not** confined to the Author API.
:::
The following example shows how the platform team wrote the `_Projects`
definition to derive the Namespace from the project registration provided by the
dev team.
<Tabs groupId="5732727B-295E-46E1-B851-F8A1C5D7DF88">
<TabItem value="projects/platform/components/namespaces/namespaces.cue" label="Namespaces Component">
```txt
projects/platform/components/namespaces/namespaces.cue
```
```cue showLineNumbers
package holos
_Kubernetes: #Kubernetes & {
Name: "namespaces"
Resources: Namespace: _Namespaces
}
// Produce a kubernetes objects build plan.
_Kubernetes.BuildPlan
```
1. This is the namespaces component which manages a collection of Namespace resources derived from the project registration data shown in the second tab.
2. Line 5 manages a Namespace for each value of the `#Namespaces` struct. See the second tab for how the platform team defines this structure.
</TabItem>
<TabItem value="projects/projects.cue" label="Projects Definition">
```txt
projects/projects.cue
```
```cue showLineNumbers
package holos
import api "github.com/holos-run/holos/api/author/v1alpha4"
// Projects defines the structure other teams register with to manage project
// resources. The platform team defines the schema, development teams provide
// the values.
_Projects: api.#Projects & {
[NAME=string]: {
Name: NAME
// The platform team requires the development teams to indicate an owner of
// the project.
Owner: Name: string
// The default value for the owner email address is derived from the owner
// name, but development teams can provide a different email address if
// needed.
Owner: Email: string | *"sg-\(Owner.Name)@\(_Organization.Domain)"
// The platform team constrains the project to a single namespace.
Namespaces: close({(NAME): Name: NAME})
// The platform team constrains the exposed services to the project
// namespace.
Hostnames: [HOST=string]: {
Name: HOST
Namespace: Namespaces[NAME].Name
Service: HOST
Port: number | *80
}
CommonLabels: {
"\(_Organization.Domain)/project.name": Name
"\(_Organization.Domain)/owner.name": Owner.Name
"\(_Organization.Domain)/owner.email": Owner.Email
}
}
}
for Project in _Projects {
// Register project namespaces with the namespaces component.
_Namespaces: {
for Namespace in Project.Namespaces {
(Namespace.Name): metadata: labels: Project.CommonLabels
}
}
}
```
1. On lines 8-35 the platform team derives most fields from the project name (line 9), and the owner name (line 13). The purpose is to fill in the remaining fields defined by the Author API.
2. Line 13 The dev team is expected to provide a concrete owner name, indicated by the `string` value.
3. Line 17 The platform team provides a default value for the email address. The project team may define a different value.
4. Line 19 The Author API allows a project to have many namespaces. The platform team constrains this down to one namespace per project by closing the struct. The namespace name must be the same as the project name.
5. Lines 22-27 The platform team derives values for a Gateway API [BackendObjectReference] from the hostname provided by the project team. These values are used later to build HTTPRoutes to expose their service.
6. Lines 30-32 Common labels are derived to mix into resources associated with this project.
7. Lines 37-44 The platform team adds a namespace with common labels for each project to the struct we saw in the first tab.
</TabItem>
</Tabs>
The RoleBinding, SecretScore, and ReferenceGrant are managed in the
[projects](https://github.com/holos-run/bank-of-holos/blob/v0.4.1/examples/tech-overview/projects/platform/components/projects/projects.cue)
component, similar to the previous namespaces example.
The HTTPRoute is managed separately in the
[httproutes](https://github.com/holos-run/bank-of-holos/blob/v0.4.1/examples/tech-overview/projects/platform/components/httproutes/httproutes.cue)
component.
All components are registered with the platform in the
[platform](https://github.com/holos-run/bank-of-holos/tree/v0.4.1/examples/tech-overview/platform)
directory.
:::important
Multiple components, potentially owned by different teams, derive fully rendered
resources from the same three project values. The dev team added these three
values to the `_Projects` struct. The platform team wrote the definition to
integrate software according to bank policies. CUE powers this _unified_
platform configuration model.
:::
:::tip
Components map 1:1 to ArgoCD Applications or Flux Kustomizations.
:::
### Development Team
The development team has the platform resources they need, but they still need
to deploy their container. The development team submits a pull request adding
the following two files to deploy their existing Helm chart.
<Tabs groupId="7AD1DDA9-8001-462B-8BE0-D9410EB51233">
<TabItem value="projects/experiment/components/podinfo/podinfo.cue" label="Helm Component">
```txt
projects/experiment/components/podinfo/podinfo.cue
```
```cue showLineNumbers
package holos
// Produce a helm chart build plan.
_HelmChart.BuildPlan
_HelmChart: #Helm & {
Name: "podinfo"
Chart: {
version: "6.6.2"
repository: {
name: "podinfo"
url: "https://stefanprodan.github.io/podinfo"
}
}
}
```
This file represents a Helm chart component to add to the platform. The second
tab registers this component with the platform.
</TabItem>
<TabItem value="platform/podinfo.cue" label="Component Registration">
```
platform/podinfo.cue
```
```cue showLineNumbers
package holos
// Manage the component on every workload Cluster, but not management clusters.
for Cluster in _Fleets.workload.clusters {
_Platform: Components: "\(Cluster.name):podinfo": {
name: "podinfo"
component: "projects/experiment/components/podinfo"
cluster: Cluster.name
tags: project: "experiment"
}
}
```
This file registers the component with the platform. When the platform is
rendered the dev team's Helm chart will be rendered on all workload clusters
across the platform.
</TabItem>
</Tabs>
The project tag links the component to the same field of the `_Projects` struct.
:::important
You can add your own key=value tags in your platform specification to inject
values into components. This feature is useful to reuse one component path for
several environments or customers.
:::
Once the dev team's component is registered, rendering the platform will render
their component.
<Tabs groupId="1BAF7AD2-BBCD-4797-A3A6-55A626732845">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered namespaces for cluster overview in 185.64075ms
rendered app-projects for cluster overview in 186.729292ms
rendered httproutes for cluster overview in 195.222833ms
rendered projects for cluster overview in 195.217125ms
// highlight-next-line
rendered podinfo for cluster overview in 195.830042ms
rendered platform in 195.90275ms
```
</TabItem>
</Tabs>
<Tabs groupId="77BF500B-105A-4AB4-A615-DEC19F501AE1">
<TabItem value="command" label="Command">
```bash
cat deploy/clusters/local/components/podinfo/podinfo.gen.yaml
```
</TabItem>
<TabItem value="output" label="Output">
```yaml showLineNumbers
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: podinfo
app.kubernetes.io/version: 6.6.2
argocd.argoproj.io/instance: podinfo
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
helm.sh/chart: podinfo-6.6.2
holos.run/component.name: podinfo
name: podinfo
spec:
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http
- name: grpc
port: 9999
protocol: TCP
targetPort: grpc
selector:
app.kubernetes.io/name: podinfo
argocd.argoproj.io/instance: podinfo
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: podinfo
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: podinfo
app.kubernetes.io/version: 6.6.2
argocd.argoproj.io/instance: podinfo
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
helm.sh/chart: podinfo-6.6.2
holos.run/component.name: podinfo
name: podinfo
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: podinfo
argocd.argoproj.io/instance: podinfo
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: podinfo
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "9898"
prometheus.io/scrape: "true"
labels:
app.kubernetes.io/name: podinfo
argocd.argoproj.io/instance: podinfo
example.com/owner.email: sg-dev-team@example.com
example.com/owner.name: dev-team
example.com/project.name: experiment
holos.run/component.name: podinfo
spec:
containers:
- command:
- ./podinfo
- --port=9898
- --cert-path=/data/cert
- --port-metrics=9797
- --grpc-port=9999
- --grpc-service-name=podinfo
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: '#34577c'
image: ghcr.io/stefanprodan/podinfo:6.6.2
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
failureThreshold: 3
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: podinfo
ports:
- containerPort: 9898
name: http
protocol: TCP
- containerPort: 9797
name: http-metrics
protocol: TCP
- containerPort: 9999
name: grpc
protocol: TCP
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
failureThreshold: 3
initialDelaySeconds: 1
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
limits: null
requests:
cpu: 1m
memory: 16Mi
volumeMounts:
- mountPath: /data
name: data
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: data
```
</TabItem>
</Tabs>
Note the rendered Helm chart resources have consistent project labels. The
platform team added a constraint to the project so all Helm charts are post
processed with Kustomize to add these common labels. The platform team
accomplishes this by adding a constraint in the project directory. This can be
seen in
[schema.cue](https://github.com/holos-run/bank-of-holos/blob/v0.4.1/schema.cue#L35-L38)
where the platform team configures all component kinds for the platform.
We've covered how the platform team provides a golden path for development teams
to register their projects by defining a Projects structure. We've also covered
how the development team deploys their existing Helm chart onto the platform.
## Support & Resources
1. See our [Quickstart] guide to get started with Holos.
2. Check out our other [Guides] which cover specific topics.
3. Refer to the [Author API] when writing components.
4. Consider the [Core API] if you need to do something more advanced than the Author API supports.
5. Community and commercial [Support] is available.
6. [Discussions Forum](https://github.com/holos-run/holos/discussions)
[Support]: /docs/support/
[Guides]: /docs/guides/
[API Reference]: /docs/api/
[Quickstart]: /docs/quickstart/
[CUE]: https://cuelang.org/
[Author API]: /docs/api/author/
[Core API]: /docs/api/core/
[Open Infrastructure Services]: https://openinfrastructure.co/
[Why are we templating YAML]: https://hn.algolia.com/?dateRange=all&page=0&prefix=false&query=https%3A%2F%2Fleebriggs.co.uk%2Fblog%2F2019%2F02%2F07%2Fwhy-are-we-templating-yaml&sort=byDate&type=story
[Holos]: https://holos.run/
[Quickstart]: /docs/quickstart/
[rendered manifests pattern]: https://akuity.io/blog/the-rendered-manifests-pattern/
[examples/tech-overview]: https://github.com/holos-run/bank-of-holos/tree/v0.2.0/examples/tech-overview
[BackendObjectReference]: https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.BackendObjectReference
[CODEOWNERS]: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
[Project]: /docs/api/author/v1alpha3/#Project

View File

@@ -1,5 +0,0 @@
import DocCardList from '@theme/DocCardList';
# API Reference
<DocCardList />

View File

@@ -1,3 +0,0 @@
# Author Schema
v1alpha5

View File

@@ -1,3 +0,0 @@
# Core Schema
v1alpha5

View File

@@ -1,7 +1,3 @@
---
description: Core v1alpha4 schema for advanced use cases.
sidebar_position: 998
---
<!-- Code generated by gomarkdoc. DO NOT EDIT -->
# v1alpha2

17
doc/md/glossary.md Normal file
View File

@@ -0,0 +1,17 @@
# Glossary
This page describes the terms used within the context of Holos.
## Management Cluster
## Workload Cluster
## Platform Form
## Platform Model
## Secret Store
## Service Mesh
## Zero Trust

View File

@@ -1,88 +0,0 @@
# Glossary
This page describes the terms used within the context of Holos.
## Platform
In Holos, a Platform is a comprehensive environment configured using the
Kubernetes resource model. It extends beyond traditional Kubernetes
functionality by integrating cloud resources through Crossplane, allowing for a
unified management approach across both Kubernetes and cloud infrastructure. A
Platform typically consists of one Management Cluster, which handles control and
secret management, and one or more Workload Clusters, where application
workloads are deployed and run. This architecture enables a consistent and
scalable approach to managing diverse resources and services within the
cloud-native ecosystem.
## Management Cluster
In the context of Holos, a Management Cluster is a special Kubernetes cluster
that hosts Kubernetes controllers. For example, cert-manager, Cluster api, and
Crossplane. A management cluster manages a single platform. The primary
function of this cluster is to securely store and manage secrets, ensuring the
secure handling of sensitive information such as credentials, API keys, and
other confidential data. The Management Cluster serves as a centralized and
secure control plane for the platform, facilitating the orchestration and
management of other components.
## Workload Cluster
In Holos, a Workload Cluster is a Kubernetes cluster designed to host and run
application workloads. Unlike the Management Cluster, which focuses on control
and secret management, Workload Clusters are dedicated to executing the actual
applications and services. These clusters can vary in size and configuration
based on the specific needs of the applications they support. Workload Clusters
leverage Kubernetes' orchestration capabilities to manage the deployment,
scaling, and operation of containerized applications, providing a flexible and
scalable environment for running production workloads within the platform.
## Platform Form
In Holos, a Platform Form is a customizable web form defined by JSON data. Each
platform within Holos has a unique Platform Form, which serves as an interface
for configuring and managing the platform's settings and resources. Platform
engineers can customize the Platform Form by modifying the underlying CUE
(Configuration Unified Engine) code, allowing for tailored configurations that
meet specific requirements. This flexibility enables platform engineers to
create a user-friendly and specific interface for managing the platform's
components and operations.
## Platform Model
In Holos, the Platform Model represents the collection of values submitted
through the Platform Form. It encapsulates the specific configuration details
and settings defined by the platform engineers, serving as the blueprint for the
platform's setup and operation. The Platform Model is essential for translating
the customized options and parameters from the Platform Form into actionable
configurations within the Holos ecosystem, ensuring that the platform operates
according to the specified requirements and guidelines.
## Secret Store
In Holos, a SecretStore is a repository for securely storing and managing
sensitive data such as passwords, API keys, and other confidential information.
It is compatible with any secret store supported by the External Secrets
Operator. By default, the management cluster serves as the SecretStore to
minimize dependencies and simplify the architecture. This setup ensures that
secrets are managed in a secure and centralized manner, aligning with the
overall security framework of the platform.
## Service Mesh
In Holos, a Service Mesh is a dedicated infrastructure layer for managing,
observing, and securing service-to-service communications within a microservices
architecture. It typically includes features such as load balancing, traffic
routing, service discovery, and security policies like mutual TLS and access
control. The Service Mesh abstracts these functionalities away from the
application code, providing a centralized control plane for managing the
interactions between microservices. This facilitates better observability,
resilience, and security in complex, distributed environments.
## Zero Trust
In the context of Holos and broader security practices, Zero Trust is a security
model that assumes no implicit trust is granted to any user, system, or
component inside or outside the network. Instead, every request for access is
treated as potentially malicious, and verification is required at every stage.
This model enforces strict identity verification, continuous monitoring, and
least-privilege access policies.

64
doc/md/intro.md Normal file
View File

@@ -0,0 +1,64 @@
# Introduction
⚡️ Holos will help you build your **software development platform in no time.**
💸 Building a software development platform is **time consuming and expensive**. Spend more time building features for your customers and less time managing your development platform.
💥 Already have a platform? Add new features and services to your platform easily with Holos.
🧐 Holos is a platform builder. It builds a hollistic software development platform composed of best-of-breed cloud native open source projects. Holos is also a tool to make it easier to manage cloud infrastructure by providing a typed alternative to yaml templates.
## Features
Holos was built to solve two main problems:
1. Building a platform usually takes 3 engineers 6-9 months of effort. Holos provides a reference platform that enables you to deploy and customize your platform in a fraction of the time.
2. Configuration changes often cause outages. Existing tools like Helm make it difficult to understand the impact a configuration change will have. Holos provides a unique, unified configuration model powered by CUE that makes it safer and easier to roll out configuration changes.
A core principle of Holos is that organizations gain value from owning the the platform they build on. Avoid vendor lock-in, future price hikes, and expensive licensing changes by building on a solid foundation of open source, cloud native computing foundation backed projects.
The following features are built into the Holos reference platform.
:::tip
Don't see your preferred technology in the stack? Holos is designed to enable you to swap out components of the platform tech stack.
:::
- **Continuous Delivery**
- Holos builds a GitOps workflow for each application running in the platform.
- Developers push changes which are automatically deployed.
- Powered by [ArgoCD](https://argo-cd.readthedocs.io/en/stable/)
- **Identity and Access Management** (IAM)
- Holos builds a standard OIDC identity provider for you.
- Integrates with your exisitng IAM and SSO system, or works independently.
- Powerful customer identity and access management features.
- Role based access control.
- Powered by [ZITADEL](https://zitadel.com/)
- **Zero Trust**
- Authenticate and Authorize users at the platform layer instead of or in addition to the application layer.
- Integrated with observability to measure and alert about problems before customers complain.
- Powered by [Istio](https://istio.io/)
- **Observability**
- Holos collects performance and availability metrics automatically, without requiring application changes.
- Optional, deeper integration into the application layer.
- Distributed Tracing
- Logging
- Powered by Prometheus, Grafana, Loki, and OpenTelemetry.
- **Data Platform**
- Integrated management of PostgreSQL
- Automatic backups
- Automatic restore from backup
- Quickly fail over across multiple regions
- **Multi-Region**
- Holos is designed to operate in multiple regions and multiple clouds.
- Keep customer data in the region that makes the most sense for your business.
- Easily cut over from one region to another for redundancy and business continuity.
## Development Status
Holos is being actively developed by [Open Infrastructure Services](https://openinfrastructure.co). Release can be found [here](https://github.com/holos-run/holos/releases).
## Adoption
Organizations who have officially adopted Holos can be found [here](https://github.com/holos-run/holos/blob/main/ADOPTERS.md).

View File

@@ -2,8 +2,8 @@
This document captures notes on locally developing Holos.
Follow the steps in [Try Holos Locally](../guides/try-holos), but take care
to select `Develop` tabs when creating the k3d cluster so you have a local
Follow the steps in [Try Holos Locally](/docs/tutorial/local/k3d), but take
care to select `Develop` tabs when creating the k3d cluster so you have a local
registry to push to.
## Apply Resources

View File

@@ -0,0 +1,81 @@
# Architecture
This page describes the architecture of the Holos reference platform.
## Overview
The reference platform manages three kubernetes clusters by default. One management cluster and two workload clusters.
```mermaid
graph TB
subgraph "Management"
secrets(Secrets)
c1(Controllers)
end
subgraph "Primary"
s1p(Service 1)
s2p(Service 2)
end
subgraph "Standby"
s1s(Service 1)
s2s(Service 2)
end
classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;
classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;
classDef cluster fill:#fff,stroke:#bbb,stroke-width:2px,color:#326ce5;
class c1,s1p,s2p,s1s,s2s,secrets k8s;
class Management,Primary,Standby cluster;
```
The services in each cluster type are:
:::tip
The management cluster is designed to operate reliably on spot instances. A highly available management cluster typically costs less than a cup of coffee per month to operate.
:::
1. Management Cluster
- **SecretStore** to provide namespace scoped secrets to workload clusters.
- **CertManager** to provision TLS certificates and make them available to workload clusters.
- **ClusterAPI** to provision and manage workload clusters via GitOps. For example, EKS or GKE clusters.
- **Crossplane** to provision and manage cloud resources via GitOps. For example, buckets, managed databases, any other cloud resource.
- **CronJobs** to refresh short lived credentials. For example image pull credentials.
- **ArgoCD** to manage resources within the management cluster via GitOps.
2. Primary Workload Cluster
- **ArgoCD** to continuously deploy your applications and services via GitOps.
- **External Secrets Operator** to synchronize namespace scoped secrets.
- **Istio** to provide a Gateway to expose services.
- **ZITADEL** to provide SSO login for all other services (e.g. ArgoCD, Grafana, Backstage, etc...)
- **PostgreSQL** for in-cluster databases.
- **Backstage** to provide your developer portal into the whole platform.
- **Observability** implemented by Prometheus, Grafana, and Loki to provide monitoring and logging.
- **AuthorizationPolicy** to provide role based access control to all services in the cluster.
3. Standby Workload Cluster
- Identical configuration to the primary cluster.
- May be scaled down to zero to reduce expenses.
- Intended to take the primary cluster role quickly, within minutes, for disaster recovery or regular maintenance purposes.
## Security
### Namespaces
Namespaces are security boundaries in the reference platform. A given namespace is treated as the same security context across multiple clusters following the [SIG Multi-cluster Position](https://github.com/kubernetes/community/blob/dd4c8b704ef1c9c3bfd928c6fa9234276d61ad18/sig-multicluster/namespace-sameness-position-statement.md).
The namespace sameness principle makes role based access control straightforward to manage and comprehend. For example, granting a developer the ability to create secrets in namespace `example` means the developer has the ability to do so in the secret store in the management cluster and also synchronize the secret to the services they own in the workload clusters.
## Data Platform
Holos is designed to work with two distinct types of databases by default:
1. In-cluster PostgresSQL databases for lower cost and rapid development and testing.
2. Out-of-cluster SQL databases for production services, e.g. RDS, CloudSQL, Aurora, Redshift, etc...
:::tip
To simplify maintenance the holos reference platform provisions databases from the most recent backup by default.
:::
In-cluster databases in the holos reference platform automatically save backups to an S3 or GCS bucket. For regular maintenance and disaster recovery, the standby cluster automatically restores databases from the most recent backup in the bucket. This capability makes maintenance much simpler, most maintenance tasks are carried out on the standby cluster which is then promoted to the primary. Software upgrades in particular are intended to be carried out against the standby, verified, then promoted to primary. Once live traffic shifts to the upgraded services in the new primary the previous cluster can be spun down to save cost or upgraded safely in place.

View File

@@ -1,15 +0,0 @@
---
slug: /topics
title: Topics
description: Stand alone topics that often come up when using Holos.
---
import DocCardList from '@theme/DocCardList';
# Topics
This section has self-contained articles related to various topics that come up
when writing platform configuration code with Holos.
---
<DocCardList />

View File

@@ -1 +0,0 @@
# Multi Cluster

View File

@@ -1,272 +0,0 @@
---
description: Build a local Cluster to use with these guides.
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Admonition from '@theme/Admonition';
# Local Cluster
In this guide we'll set up a local k3d cluster to apply and explore the
configuration described in our other guides. After completing this guide you'll
have a standard Kubernetes API server with proper DNS and TLS certificates.
You'll be able to easily reset the cluster to a known good state to iterate on
your own Platform.
## Reset the Cluster
If you've already followed this guide, reset the cluster by running the
following commands. Skip this section if you're creating a cluster for the
first time.
First, delete the cluster.
<Tabs groupId="k3d-cluster-delete">
<TabItem value="command" label="Command">
```bash
k3d cluster delete workload
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
INFO[0000] Deleting cluster 'workload'
INFO[0000] Deleting cluster network 'k3d-workload'
INFO[0000] Deleting 1 attached volumes...
INFO[0000] Removing cluster details from default kubeconfig...
INFO[0000] Removing standalone kubeconfig file (if there is one)...
INFO[0000] Successfully deleted cluster workload!
```
</TabItem>
</Tabs>
Then create the cluster again.
<Tabs groupId="k3d-cluster-create">
<TabItem value="command" label="Command">
```bash
k3d cluster create workload \
--registry-use k3d-registry.holos.localhost:5100 \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
INFO[0000] portmapping '443:443' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy]
INFO[0000] Prep: Network
INFO[0000] Created network 'k3d-workload'
INFO[0000] Created image volume k3d-workload-images
INFO[0000] Starting new tools node...
INFO[0000] Starting node 'k3d-workload-tools'
INFO[0001] Creating node 'k3d-workload-server-0'
INFO[0001] Creating LoadBalancer 'k3d-workload-serverlb'
INFO[0001] Using the k3d-tools node to gather environment information
INFO[0001] HostIP: using network gateway 172.17.0.1 address
INFO[0001] Starting cluster 'workload'
INFO[0001] Starting servers...
INFO[0001] Starting node 'k3d-workload-server-0'
INFO[0003] All agents already running.
INFO[0003] Starting helpers...
INFO[0003] Starting node 'k3d-workload-serverlb'
INFO[0009] Injecting records for hostAliases (incl. host.k3d.internal) and for 3 network members into CoreDNS configmap...
INFO[0012] Cluster 'workload' created successfully!
INFO[0012] You can now use it like this:
kubectl cluster-info
```
</TabItem>
</Tabs>
Finally, add your trusted certificate authority.
<Tabs groupId="apply-local-ca">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f "$(mkcert -CAROOT)/namespace.yaml"
kubectl apply --server-side=true -n cert-manager -f "$(mkcert -CAROOT)/local-ca.yaml"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
namespace/cert-manager serverside-applied
secret/local-ca serverside-applied
```
</TabItem>
</Tabs>
You're back to the same state as the first time you completed this guide.
## What you'll need {#requirements}
You'll need the following tools installed to complete this guide.
1. [holos](/docs/install) - to build the platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Holos components that wrap upstream Helm charts.
3. [k3d](https://k3d.io/#installation) - to provide a k8s api server.
4. [OrbStack](https://docs.orbstack.dev/install) or [Docker](https://docs.docker.com/get-docker/) - to use k3d.
5. [kubectl](https://kubernetes.io/docs/tasks/tools/) - to interact with the k8s api server.
6. [mkcert](https://github.com/FiloSottile/mkcert?tab=readme-ov-file#installation) - to make trusted TLS certificates.
7. [jq](https://jqlang.github.io/jq/download/) - to fiddle with JSON output.
## Configure DNS {#configure-dns}
Configure your machine to resolve `*.holos.localhost` to your loopback
interface. This is necessary for requests to reach the workload cluster. Save
this script to a file and execute it.
```bash showLineNumbers
#! /bin/bash
#
set -euo pipefail
tmpdir="$(mktemp -d)"
finish() {
[[ -d "$tmpdir" ]] && rm -rf "$tmpdir"
}
trap finish EXIT
cd "$tmpdir"
brew install dnsmasq
cat <<EOF >"$(brew --prefix)/etc/dnsmasq.d/holos.localhost.conf"
# Refer to https://holos.run/docs/tutorial/local/k3d/
address=/holos.localhost/127.0.0.1
EOF
if [[ -r /Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist ]]; then
echo "dnsmasq already configured"
else
sudo cp "$(brew list dnsmasq | grep 'dnsmasq.plist$')" \
/Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist
sudo launchctl unload /Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist
sudo launchctl load /Library/LaunchDaemons/homebrew.mxcl.dnsmasq.plist
dscacheutil -flushcache
echo "dnsmasq configured"
fi
sudo mkdir -p /etc/resolver
sudo tee /etc/resolver/holos.localhost <<EOF
domain holos.localhost
nameserver 127.0.0.1
EOF
sudo killall -HUP mDNSResponder
echo "all done."
```
## Create the Cluster {#create-the-cluster}
The Workload Cluster is where your applications and services will be deployed.
In production this is usually an EKS, GKE, or AKS cluster.
:::tip
Holos supports all compliant Kubernetes clusters. Holos was developed and tested
on GKE, EKS, Talos, k3s, and Kubeadm clusters.
:::
Create a local registry to speed up image builds and pulls.
```bash
k3d registry create registry.holos.localhost --port 5100
```
Create the workload cluster configured to use the local registry.
```bash
k3d cluster create workload \
--registry-use k3d-registry.holos.localhost:5100 \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
Traefik is disabled because Istio provides the same functionality.
## Setup Root CA {#setup-root-ca}
Platforms most often use cert-manager to issue tls certificates. The browser
and tools we're using need to trust these certificates to work together.
Generate a local, trusted root certificate authority with the following script.
Admin access is necessary for `mkcert` to manage the certificate into your trust
stores.
```bash
sudo -v
```
Manage the local CA and copy the CA key to the workload cluster so that cert
manager can manage trusted certificates.
Save this script to a file and execute it to configure a trusted certificate
authority.
```bash showLineNumbers
#! /bin/bash
#
set -euo pipefail
mkcert --install
tmpdir="$(mktemp -d)"
finish() {
[[ -d "$tmpdir" ]] && rm -rf "$tmpdir"
}
trap finish EXIT
cd "$tmpdir"
# Create the local CA Secret with ca.crt, tls.crt, tls.key
mkdir local-ca
cd local-ca
CAROOT="$(mkcert -CAROOT)"
cp -p "${CAROOT}/rootCA.pem" ca.crt
cp -p "${CAROOT}/rootCA.pem" tls.crt
cp -p "${CAROOT}/rootCA-key.pem" tls.key
kubectl create secret generic --from-file=. --dry-run=client -o yaml local-ca > ../local-ca.yaml
echo 'type: kubernetes.io/tls' >> ../local-ca.yaml
cd ..
cat <<EOF > namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: cert-manager
name: cert-manager
spec:
finalizers:
- kubernetes
EOF
kubectl apply --server-side=true -f namespace.yaml
kubectl apply -n cert-manager --server-side=true -f local-ca.yaml
# Save the Secret to easily reset the cluster later.
install -m 0644 namespace.yaml "${CAROOT}/namespace.yaml"
install -m 0600 local-ca.yaml "${CAROOT}/local-ca.yaml"
```
:::warning
Take care to run the local-ca script each time you create the workload cluster
so that Certificates are issued correctly.
:::
## Clean Up {#clean-up}
If you'd like to clean up the resources you created in this guide, remove them
with:
```bash
k3d cluster delete workload
```
## Next Steps
Now that you have a real cluster, continue your Holos journey by following the
[Tutorial](../tutorial/1-overview.mdx) and setting up your own Platform.

View File

@@ -1,17 +0,0 @@
---
slug: /
title: Tutorial
description: Take a guided tour of Holos features with our tutorial.
---
import DocCardList from '@theme/DocCardList';
# Tutorial
Our tutorial starts with a technical overview of Holos and then progresses
through each of the main features and concepts. Upon completing the tutorial
you'll have a solid understanding of the primary features and advantages of
Holos.
---
<DocCardList />

View File

@@ -1,94 +0,0 @@
# Overview
Holos is an open-source tool that simplifies software integration for platform
teams. While most Kubernetes tools focus on application management, Holos takes
a holistic infrastructure-as-code (IaC) approach, targeting the integration
layer where applications and organizational data converge. By providing
well-defined, typed structures for consistent validation, Holos reduces errors,
streamlines integration, and creates clear pathways for teams to easily
integrate their services.
# How Holos works
Holos implements the [rendered manifests pattern][rmp], generating fully
rendered Kubernetes manifests from [CUE language][CUE] abstractions called
Components. These Components can model [Helm charts][Helm], [Kustomize
bases][Kustomize], or native Kubernetes resources. A Holos Platform consists of
one or more Components and is applied to one or more Kubernetes clusters.
```mermaid
graph BT
Platform[Platform]
Cluster[Cluster]
Component[Component]
Helm[Helm]
Kustomize[Kustomize]
CUE[CUE]
Platform --> Cluster
Component --> Platform
Helm --> Component
Kustomize --> Component
CUE --> Component
```
# Holos' role in your workflow
Holos generates, but does not apply, rendered manifests, allowing teams to use
`git diff` to clearly see changes and assess the impact on clusters and services
before deploying. Instead, Holos manages the GitOps configuration for tools like
ArgoCD and Flux CD, enabling continuous deployment of resource changes with the
full visibility and control that these tools provide. As a result, expect Holos
to sit at the very end of a CI pipeline.
# Advantages of using Holos
### Safety
* CUE constraints on Holos Components surface validation errors early in the
development process, reducing the number of failed deployments and the time
spent troubleshooting them.
* Holos natively provides a "blast radius" to code
changes by identifying the rendered manifests across your fleet of Kubernetes
clusters that will be affected by the change.
* CUE's unification strategy allows multiples teams to contribute to the desired
state of a service:
* The Platform team provides definitions for shared resources.
* Engineering teams populate definitions with service-specific data.
* The Security team provides concrete values that cannot be changed to harden the company's security posture.
### Flexibility
* CUE is adept at modeling variation and organizational complexity at scale,
while Holos enables seamless integration of CUE data with native Kubernetes
tools such as [Helm][Helm] and [Kustomize][Kustomize].
* Holos is extensible, allowing Holos Components to be modeled from any tool that
generates (e.g. `helm`) or transforms (e.g. `kustomize`) manifest data.
### Consistency
* Holos manages the execution context for Helm and Kustomize, ensuring that
rendered manifests are consistently and reliably reproducible, no matter where
Holos is run.
* CUE constraints ensure that data abstractions include the required information
in the expected format, triggering early failures if any required data is
missing.
# When not to use Holos
Holos excels at configuration management and is most effective when integrated
into a broader Kubernetes deployment strategy. If you need a single tool to
manage the entire lifecycle of a Kubernetes cluster, Holos may not fully meet
your needs on its own.
# Next Steps
Now that you have a base understanding of how Holos works, continue
to the [Setup page](./2-setup.mdx) that guides you through installing Holos and
initializing your first Platform.
[rmp]: https://akuity.io/blog/the-rendered-manifests-pattern
[Helm]: https://helm.sh/
[Kustomize]: https://kustomize.io/
[CUE]: https://cuelang.org/

View File

@@ -1,85 +0,0 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# Setup
This tutorial will guide you through the installation of Holos and its
dependencies, as well as the initialization of a minimal Platform that you can
extend to meet your specific needs.
# Prerequisites
Holos integrates with the following tools that should be installed to enable
their functionality.
* [Helm][helm] to fetch and render Helm chart Components
* [Kubectl][kubectl] to [kustomize][kustomize] components.
# Install Holos
Holos is distributed as a single file executable that can be installed in a couple of ways.
### Releases
Download `holos` from the [releases](https://github.com/holos-run/holos/releases) page and place the executable into your shell path.
### Go install
Alternatively, install directly into your go bin path using:
```shell
go install github.com/holos-run/holos/cmd/holos@latest
```
# Do I need a Kubernetes cluster?
Holos only generates rendered Kubernetes manifests, so you don't need a
Kubernetes cluster to start using the tool or understand its workflow. However,
you will need a cluster eventually to apply the rendered manifests and verify
that they achieve the desired end state.
We recommend using [k3d](https://k3d.io/) to set up a minimal Kubernetes cluster with
[Orbstack](https://docs.orbstack.dev/install) or
[Docker](https://docs.docker.com/get-started/get-docker/). To simplify this,
we've created [the Local Cluster guide](../topics/4-local-cluster.mdx) which
automates the process, ensuring proper DNS and TLS certificates, and includes a
script to reset the cluster to a known good state.
When you're ready to experiment with a live Kubernetes cluster, refer to [the
Local Cluster guide](../topics/4-local-cluster.mdx).
# Initialize a new Platform
Use Holos to generate a minimal platform using the recommended directory structure
by creating an empty directory and then using the `holos generate platform` subcommand:
<Tabs groupId="tutorial-setup-generate-platform">
<TabItem value="command" label="Command">
```bash
mkdir holos_platform
cd holos_platform
holos generate platform v1alpha4
```
</TabItem>
<TabItem value="output" label="Output">
```txt
no output
```
</TabItem>
</Tabs>
Holos creates a `platform` directory with a `platform.gen.cue` file that serves
as the foundation for your newly initialized Holos Platform. For each Holos
Component you model, you'll add a new CUE file to the platform directory,
mapping it to the location of the Component's CUE code, and extending the
capabilities of your platform.
# Next Steps
Now that you've got Holos and its prerequisites installed, continue on to the
[Hello Holos page](./3-hello-holos.mdx) where we will guide you through the
process of creating your first Component and modeling a Helm chart.
[helm]: https://github.com/helm/helm/releases
[kubectl]: https://kubernetes.io/docs/tasks/tools/
[kustomize]: https://kustomize.io/

Some files were not shown because too many files have changed in this diff Show More