Compare commits

...

33 Commits

Author SHA1 Message Date
Jeff McCune
16a6447926 helm: support oci images in chart name
Without this patch we do not support installing Kargo from an OCI helm
chart.  We want to support:

```
Component: #Helm & {
	Name:      "kargo"
	Namespace: Kargo.Namespace

	Chart: {
		name:    "oci://ghcr.io/akuity/kargo-charts/kargo"
		version: "1.0.3"
		release: Name
	}
	EnableHooks: true

	Values: Kargo.Values
}
```

This patch fixes the problem by using the base name for filesystem cache
operations.
2024-12-03 12:15:06 -08:00
Jeff McCune
111a5944ff cue: bump to 0.11.0
go get cuelang.org/go/cmd/cue@latest
2024-12-02 12:37:19 -08:00
Jeff McCune
ff1446dc93 docs: redirect /docs/guides/helm/
This shows up in the Unity tests I'm working on with mvdan and goes to a
blank page without the redirect in place.

	--- FAIL: TestGuides_v1alpha5 (0.00s)
	   --- FAIL: TestGuides_v1alpha5/helm (0.60s)
	       testscript.go:584: # Helm Guide https://holos.run/docs/guides/helm/
2024-12-02 09:05:13 -08:00
Jeff McCune
67ef990c37 v0.101.2 build tags 2024-12-02 08:09:23 -08:00
Jeff McCune
6bd54ab856 render: pass build tags from platform to component (#366)
Previously, build tags were not propagated from `holos render platform
-t validate` through to the underlying `holos render component` command.
This is a problem because validators need to be selectively enabled as a
work around until we have an audit mode field.

This patch fixes the problem by propagating command line tags from the
render platform command to the underlying commands.  This patch also
propagates tags for the show command.
2024-11-30 20:56:11 -08:00
Jeff McCune
89a23a10fd docs: remove DIRECTORY from holos render platform --help
The directory argument is deprecated now, use the --platform flag
instead.
2024-11-30 13:08:01 -08:00
Jeff McCune
5a939bb6fe render: support cue build tags e.g. -t foo for @if(foo) (#366)
Previously Holos only supported tags in the form of key=value.  CUE
supports boolean style tags in the form of `key [ "=" value ]` which we
want to use to conditionally use to register components with the
platform.

This patch modifies the flag parsing to support -t foo like cue does,
for use with the @if(foo) build tag.
2024-11-30 12:50:31 -08:00
Jeff McCune
ee16f14e03 refactor build plan pipeline
Previously the BuildPlan pipeline didn't execute generators and
transformers concurrently.  All steps were sequentially executed.  Holos
was primarily concurrent by executing multiple BuildPlans at once.

This patch changes the Build implementation for each BuildPlan to
execute a GoRoutine pipeline.  One producer fans out to a group of
routines each executing the pipeline for one artifact in the build plan.
The pipeline has 3 stages:

1: Fan-out to build each Generator concurrently.
2: Fan-in to build each Transformer sequentially.
3: Fan-out again to run each validator concurrently.

When the artifact pipelines return, the producer closes the tasks
channel causing the worker tasks to return.

Note the overall runtime for 8 BuildPlans is roughly equivalent to
previously at 160ms with --concurrency=8 on my M3 Max.  I expect this to
perform better than previously when multiple artifacts are rendered for
each BuildPlan.
2024-11-29 14:52:06 -08:00
Jeff McCune
7530345620 main: add tracing and profiling
Writes files based on parent pid and process pid to avoid collisions.

Analyze with:

export HOLOS_TRACE=trace.%d.%d.out
go tool trace trace.999.1000.out

export HOLOS_CPU_PROFILE=cpu.%d.%d.prof
go tool pprof cpu.999.1000.prof

export HOLOS_MEM_PROFILE=mem.%d.%d.prof
go tool pprof mem.999.1000.prof
2024-11-29 14:52:06 -08:00
Jeff McCune
47d60ef86d docs: fix cue vet path in validators post (#357)
Without this patch the validator fails if a component manages two of the
same kind of resource, which is common.

This patch updates the example to use the metadata namespace and name as
lookup keys.  This works for most components, but may not for
ClusterResources.  Use the kind top level field in that case and pass
the field name of the validator as a tag value to vary by component.
2024-11-25 19:35:02 -08:00
Jeff McCune
9e9f6efd04 docs: fix typos in validators blog post (#357) 2024-11-25 15:46:54 -08:00
Jeff McCune
fb4a043823 docs: add card for validators blog post (#357) 2024-11-25 15:11:11 -08:00
Jeff McCune
d718ab1910 docs: redirect /docs/local-cluster to the v1alpha5 topic 2024-11-25 10:53:31 -08:00
Jeff McCune
c649db18a9 docs: redirect /docs/quickstart to the overview 2024-11-25 10:43:16 -08:00
Jeff McCune
b3bddf3ee3 docs: add validators blog post (#357) 2024-11-25 08:49:27 -08:00
Jeff McCune
77836be250 docs: update readme 2024-11-25 08:10:11 -08:00
Jeff McCune
4db670b854 docs: add validators tutorial (#357)
Add a tutorial page on validators.
2024-11-24 21:34:09 -08:00
Jeff McCune
d87c919519 docs: redirect /docs/topics to structures
/docs/v1alpha5/api/author/ links to it in the opening paragraphs.
2024-11-24 19:38:30 -08:00
Jeff McCune
2184bda2a1 v1alpha5: add validators (#357) 2024-11-24 17:40:41 -08:00
Jeff McCune
a8ab4dabaa cue: fix holos cue vet exit code (#358)
Without this patch `holos cue vet` always returns exit code 0, even when
there are errors.

This patch fixes the problem by catching the error and returning it to
our own top level error handler.  Note the final error, "could not run:
terminating because of errors" which wraps the generic error reported by
cue in the presence of multiple errors.

Result:

```
❯ holos cue vet ./policy --path 'strings.ToLower(kind)' /tmp/podinfo.gen.yaml
deployment.kind: conflicting values "Forbidden" and "Deployment":
    ./policy/validations.cue:18:8
    ../../../../../tmp/podinfo.gen.yaml:25:7
deployment.spec.template.spec.containers.0.resources.limits: conflicting values null and {[string]:"k8s.io/apimachinery/pkg/api/resource".#Quantity} (mismatched types null and struct):
    ./cue.mod/gen/k8s.io/api/apps/v1/types_go_gen.cue:355:9
    ./cue.mod/gen/k8s.io/api/apps/v1/types_go_gen.cue:376:12
    ./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:2840:11
    ./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:2968:14
    ./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:3882:15
    ./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:3882:18
    ./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:5027:9
    ./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:6407:16
    ./policy/validations.cue:17:13
    ../../../../../tmp/podinfo.gen.yaml:104:19
could not run: terminating because of errors
```
2024-11-24 16:39:56 -08:00
Jeff McCune
7175950ce0 docs: redirect /docs/overview permalink 2024-11-24 08:58:08 -08:00
Jeff McCune
e186c1be37 docs: edits for grammar 2024-11-21 15:55:14 -08:00
Jeff McCune
a998513e34 docs: edit for grammar and make concise 2024-11-21 15:26:20 -08:00
Jeff McCune
2f821ec33c docs: add environments topic page (#354)
Similar to the Clusters topic, add a topic about configuring multiple
environments.  This likely needs some work, the example is a bit
contrivied but at least shows how we can look up attributes, then use
those attributes to look up additional configuration from platform-wide
configuration data.
2024-11-21 15:07:46 -08:00
Jeff McCune
4d54190f2e docs: remove empty topic pages
We'll add them back in as we write them.
2024-11-21 14:40:39 -08:00
Gary Larizza
b466ec3457 Merge pull request #353 from holos-run/gl/fix-platform-args
Remove deprecated argument to 'holos render platform'
2024-11-21 13:04:17 -08:00
Gary Larizza
45120797b9 Remove deprecated argument to 'holos render platform'
This commit removes the extra `./platform` argument from any of the
current tutorial/topic docs to reflect the change that was made to
`holos render platform` in version `0.100.0`.
2024-11-21 11:49:44 -08:00
Jeff McCune
e6d25bf5eb holos: improve OrderedEncoder error handling
If someone accidentally provides the same index multiple times, or
indexes less than the next expected, the program would silently discard
the data.  This would be difficult to troubleshoot since an
OrderedEncoder is usually used with concurrent go routines, which would
likely mislead the investigator.

Better to just fail hard with an error indicating the caller in these
situations.
2024-11-20 17:02:16 -08:00
Jeff McCune
3ad6e69336 docs: update report issue template
Show how we can copy and paste directly from the issue into testscript.
2024-11-20 15:58:02 -08:00
Gary Larizza
9b10e23e43 Redirect docs to current version (#352)
* Redirect docs to current version

PROBLEM:

See #350 for context. There's a GitHub issue open for this on the
facebook docusaurus repo:
https://github.com/facebook/docusaurus/issues/9049

SOLUTION:

Use the redirect workaround for the time being.

OUTCOME:

https://holos.run/docs will link to the most recent version of our docs
site.

* Update doc/website/static/_redirects

Signed-off-by: Jeff McCune <jeff@openinfrastructure.co>

---------

Signed-off-by: Jeff McCune <jeff@openinfrastructure.co>
Co-authored-by: Jeff McCune <jeff@openinfrastructure.co>
2024-11-20 15:33:14 -08:00
Jeff McCune
b19022d3ff tests: fill in the buildplan.yaml for better coverage (#348)
Assert against the complete build plan so we know if we change the
output format in the future.

It's easy to update if so:

  HOLOS_UPDATE_SCRIPTS=1 go test github.com/holos-run/holos/cmd/holos
2024-11-20 15:21:41 -08:00
Jeff McCune
3fd06f594b schemas: fix kustomize patch name field (#348)
Without this patch trying to use a Kustomize patch with the optional
name field omitted results in the following error:

  could not run: holos.spec.artifacts.0.transformers.0.kustomize.kustomization.patches.0.target.name: cannot convert non-concrete value string at builder/v1alpha5/builder.go:218
  holos.spec.artifacts.0.transformers.0.kustomize.kustomization.patches.0.target.name: cannot convert non-concrete value string:
      $WORK/cue.mod/gen/sigs.k8s.io/kustomize/api/types/var_go_gen.cue:33:2

This patch fixes the problem by providing a default value for the name
field matching the Go zero value for a string.
2024-11-20 15:08:44 -08:00
Jeff McCune
a75338f21c docs: add bug report issue template, disable dev deploy 2024-11-20 14:50:25 -08:00
53 changed files with 2275 additions and 851 deletions

View File

@@ -309,6 +309,7 @@
"traefik",
"transactionhistory",
"tsdb",
"txtar",
"typemeta",
"udev",
"uibutton",

131
.github/ISSUE_TEMPLATE/bug-report.md vendored Normal file
View File

@@ -0,0 +1,131 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: NeedsInvestigation, Triage
assignees: ''
---
<!--
Please answer these questions before submitting your issue. Thanks!
To ask questions, see https://github.com/holos-run/holos/discussions
-->
### What version of holos are you using (`holos --version`)?
```
0.0.0
```
### Does this issue reproduce with the latest release?
<!--
Get the latest release with:
brew install holos-run/tap/holos
Or see https://holos.run/docs/v1alpha5/tutorial/setup/
-->
### What did you do?
<!--
Please provide a testscript that should pass, but does not because of the bug.
See the below example.
You can create a txtar from a directory with:
holos txtar ./path/to/dir
Refer to: https://github.com/rogpeppe/go-internal/tree/master/cmd/testscript
-->
Steps to reproduce:
```shell
testscript -v -continue <<EOF
```
```txtar
# Have: an error related to the imported Kustomize schemas.
# Want: holos show buildplans to work.
exec holos --version
exec holos init platform v1alpha5 --force
# remove the fix to trigger the bug
rm cue.mod/pkg/sigs.k8s.io/kustomize/api/types/var.cue
# want a BuildPlan shown
exec holos show buildplans
cmp stdout buildplan.yaml
# want this error to go away
! stderr 'cannot convert non-concrete value string'
-- buildplan.yaml --
kind: BuildPlan
-- platform/example.cue --
package holos
Platform: Components: example: {
name: "example"
path: "components/example"
}
-- components/example/example.cue --
package holos
import "encoding/yaml"
holos: Component.BuildPlan
Component: #Kustomize & {
KustomizeConfig: Kustomization: patches: [
{
target: kind: "CustomResourceDefinition"
patch: yaml.Marshal([{
op: "add"
path: "/metadata/annotations/example"
value: "example-value"
}])
},
]
}
```
```shell
EOF
```
### What did you expect to see?
The testscript should pass.
### What did you see instead?
The testscript fails because of the bug.
```txt
# Have: an error related to the imported Kustomize schemas.
# Want: holos show buildplans to work. (0.168s)
> exec holos --version
[stdout]
0.100.1-2-g9b10e23-dirty
> exec holos init platform v1alpha5 --force
# remove the fix to trigger the bug (0.000s)
> rm cue.mod/pkg/sigs.k8s.io/kustomize/api/types/var.cue
# want a BuildPlan shown (0.091s)
> exec holos show buildplans
[stderr]
could not run: holos.spec.artifacts.0.transformers.0.kustomize.kustomization.patches.0.target.name: cannot convert non-concrete value string at builder/v1alpha5/builder.go:218
holos.spec.artifacts.0.transformers.0.kustomize.kustomization.patches.0.target.name: cannot convert non-concrete value string:
$WORK/cue.mod/gen/sigs.k8s.io/kustomize/api/types/var_go_gen.cue:33:2
[exit status 1]
FAIL: <stdin>:8: unexpected command failure
> cmp stdout buildplan.yaml
diff stdout buildplan.yaml
--- stdout
+++ buildplan.yaml
@@ -0,0 +1,1 @@
+kind: BuildPlan
FAIL: <stdin>:9: stdout and buildplan.yaml differ
# want this error to go away (0.000s)
> ! stderr 'cannot convert non-concrete value string'
FAIL: <stdin>:11: unexpected match for `cannot convert non-concrete value string` found in stderr: cannot convert non-concrete value string
failed run
```

View File

@@ -2,7 +2,7 @@ name: Dev Deploy
on:
push:
branches: ['main', 'dev-deploy']
branches: ['dev-deploy']
jobs:
deploy:

129
README.md
View File

@@ -1,35 +1,130 @@
## Holos - A Holistic Development Platform
# Holos
<img width="50%"
align="right"
style="display: block; margin: 40px auto;"
src="https://openinfrastructure.co/blog/2016/02/27/logo/logorectangle.png">
Building and maintaining a software development platform is a complex and time
consuming endeavour. Organizations often dedicate a team of 3-4 who need 6-12
months to build the platform.
[Holos] is a configuration management tool for Kubernetes implementing the
[rendered manifests pattern]. It handles configurations ranging from single
resources to multi-cluster platforms across regions.
Holos is a tool and a reference platform to reduce the complexity and speed up
the process of building a modern, cloud native software development platform.
Key components:
- Platform schemas defining component integration
- Building blocks unifying Helm, Kustomize and Kubernetes configs with CUE
- BuildPlan pipeline for generating, transforming and validating manifests
- **Accelerate new projects** - Reduce time to market and operational complexity by starting your new project on top of the Holos reference platform.
- **Modernize existing projects** - Incrementally incorporate your existing platform services into Holos for simpler integration.
- **Unified configuration model** - Increase safety and reduce the risk of config changes with CUE.
- **First class Helm and Kustomize support** - Leverage and reuse your existing investment in existing configuration tools such as Helm and Kustomize.
- **Modern Authentication and Authorization** - Holos seamlessly integrates platform identity and access management with zero-trust beyond corp style authorization policy.
```mermaid
---
title: Rendering Overview
---
graph LR
Platform[<a href="https://holos.run/docs/v1alpha5/api/author/#Platform">Platform</a>]
Component[<a href="https://holos.run/docs/v1alpha5/api/author/#ComponentConfig">Components</a>]
## Quick Installation
Helm[<a href="https://holos.run/docs/v1alpha5/api/author/#Helm">Helm</a>]
Kustomize[<a href="https://holos.run/docs/v1alpha5/api/author/#Kustomize">Kustomize</a>]
Kubernetes[<a href="https://holos.run/docs/v1alpha5/api/author/#Kubernetes">Kubernetes</a>]
```console
go install github.com/holos-run/holos/cmd/holos@latest
BuildPlan[<a href="https://holos.run/docs/v1alpha5/api/core/#BuildPlan">BuildPlan</a>]
ResourcesArtifact[<a href="https://holos.run/docs/v1alpha5/api/core/#Artifact">Resources<br/>Artifact</a>]
GitOpsArtifact[<a href="https://holos.run/docs/v1alpha5/api/core/#Artifact">GitOps<br/>Artifact</a>]
Generators[<a href="https://holos.run/docs/v1alpha5/api/core/#Generator">Generators</a>]
Transformers[<a href="https://holos.run/docs/v1alpha5/api/core/#Transformer">Transformers</a>]
Validators[<a href="https://holos.run/docs/v1alpha5/api/core/#Validator">Validators</a>]
Files[Manifest<br/>Files]
Platform --> Component
Component --> Helm --> BuildPlan
Component --> Kubernetes --> BuildPlan
Component --> Kustomize --> BuildPlan
BuildPlan --> ResourcesArtifact --> Generators
BuildPlan --> GitOpsArtifact --> Generators
Generators --> Transformers --> Validators --> Files
```
## Docs and Support
## Setup
The documentation for developing and using Holos is available at: https://holos.run
```shell
brew install holos-run/tap/holos
```
For discussion and support, [open a discussion](https://github.com/orgs/holos-run/discussions/new/choose).
Refer to [setup] for other installation methods and dependencies.
## Example
See our [tutorial] for a complete hello world example.
```cue showLineNumbers
package holos
holos: Component.BuildPlan
Component: #Helm & {
Name: "podinfo"
Chart: {
version: "6.6.2"
repository: {
name: "podinfo"
url: "https://stefanprodan.github.io/podinfo"
}
}
Values: ui: {
message: string | *"Hello World" @tag(message, type=string)
}
}
```
## Organizational Role
Platform engineers use Holos to generate Kubernetes manifests, both locally and
in CI pipelines. The manifests are committed to version control and deployed via
GitOps tools like ArgoCD or Flux.
Holos integrates seamlessly with existing Helm charts, Kustomize bases, and
other version-controlled configurations.
## Advantages of Holos
### Safe
Holos leverages [CUE] for strong typing and validation of configuration data,
ensuring consistent output from Helm and other tools.
### Consistent
A unified pipeline processes all configurations - whether from CUE, Helm, or
Kustomize - through the same well-defined stages.
### Flexible
Composable building blocks for generation, transformation, validation and
integration let teams assemble workflows that match their needs.
The core is intentionally unopinionated about platform configuration patterns.
Common needs like environments and clusters are provided as customizable
[topics] recipes rather than enforced structures.
## Getting Help
Get support through our [Discord] channel or [GitHub discussions]. Configuration
challenges arise at all experience levels - we welcome your questions and are
here to help.
## License
Holos is licensed under Apache 2.0 as found in the [LICENSE file](LICENSE).
[Holos]: https://holos.run
[rendered manifests pattern]: https://akuity.io/blog/the-rendered-manifests-pattern
[CUE]: https://cuelang.org/
[Discord]: https://discord.gg/JgDVbNpye7
[GitHub discussions]: https://github.com/holos-run/holos/discussions
[Why CUE for Configuration]: https://holos.run/blog/why-cue-for-configuration/
[topics]: https://holos.run/docs/topics/
[setup]: https://holos.run/docs/setup/
[tutorial]: https://holos.run/docs/tutorial/

View File

@@ -65,8 +65,10 @@ type ComponentConfig struct {
// Resources represents kubernetes resources mixed into the rendered manifest.
Resources core.Resources
// KustomizeConfig represents the configuration kustomize.
// KustomizeConfig represents the kustomize configuration.
KustomizeConfig KustomizeConfig
// Validators represent checks that must pass for output to be written.
Validators map[NameLabel]core.Validator
// Artifacts represents additional artifacts to mix in. Useful for adding
// GitOps resources. Each Artifact is unified without modification into the
// BuildPlan.

View File

@@ -40,14 +40,6 @@ type BuildPlanSpec struct {
Disabled bool `json:"disabled,omitempty" yaml:"disabled,omitempty"`
}
// BuildPlanSource reflects the origin of a [BuildPlan]. Useful to save a build
// plan to a file, then re-generate it without needing to process a [Platform]
// component collection.
type BuildPlanSource struct {
// Component reflects the component that produced the build plan.
Component Component `json:"component,omitempty" yaml:"component,omitempty"`
}
// Artifact represents one fully rendered manifest produced by a [Transformer]
// sequence, which transforms a [Generator] collection. A [BuildPlan] produces
// an [Artifact] collection.
@@ -72,6 +64,7 @@ type Artifact struct {
Artifact FilePath `json:"artifact,omitempty" yaml:"artifact,omitempty"`
Generators []Generator `json:"generators,omitempty" yaml:"generators,omitempty"`
Transformers []Transformer `json:"transformers,omitempty" yaml:"transformers,omitempty"`
Validators []Validator `json:"validators,omitempty" yaml:"validators,omitempty"`
Skip bool `json:"skip,omitempty" yaml:"skip,omitempty"`
}
@@ -206,15 +199,37 @@ type Kustomize struct {
// is expected to happen in CUE against the kubectl version the user prefers.
type Kustomization map[string]any
// FileContent represents file contents.
type FileContent string
// FileContentMap represents a mapping of file paths to file contents.
type FileContentMap map[FilePath]FileContent
// FilePath represents a file path.
type FilePath string
// FileContent represents file contents.
type FileContent string
// Validator validates files. Useful to validate an [Artifact] prior to writing
// it out to the final destination. Holos may execute validators concurrently.
// See the [validators] tutorial for an end to end example.
//
// [validators]: https://holos.run/docs/v1alpha5/tutorial/validators/
type Validator struct {
// Kind represents the kind of transformer. Must be Kustomize, or Join.
Kind string `json:"kind" yaml:"kind" cue:"\"Command\""`
// Inputs represents the files to validate. Usually the final Artifact.
Inputs []FilePath `json:"inputs" yaml:"inputs"`
// Command represents a validation command. Ignored unless kind is Command.
Command Command `json:"command,omitempty" yaml:"command,omitempty"`
}
// Command represents a command vetting one or more artifacts. Holos appends
// fully qualified input file paths to the end of the args list, then executes
// the command. Inputs are written into a temporary directory prior to
// executing the command and removed afterwards.
type Command struct {
Args []string `json:"args,omitempty" yaml:"args,omitempty"`
}
// InternalLabel is an arbitrary unique identifier internal to holos itself.
// The holos cli is expected to never write a InternalLabel value to rendered
// output files, therefore use a InternalLabel when the identifier must be

View File

@@ -0,0 +1,12 @@
# https://github.com/holos-run/holos/issues/358
# holos cue vet should fail verifications with exit code 1
! exec holos cue vet ./policy --path strings.ToLower(kind) ./data/secret.yaml
# holos cue vet should report validation errors to stderr
stderr 'Forbidden. Use an ExternalSecret instead.'
-- data/secret.yaml --
kind: Secret
-- policy/validators.cue --
package policy
secret: kind: "Forbidden. Use an ExternalSecret instead."

View File

@@ -7,11 +7,11 @@ cd $WORK
exec holos generate platform v1alpha5 --force
# Platforms are empty by default.
exec holos render platform ./platform
exec holos render platform
stderr -count=1 '^rendered platform'
# Holos uses CUE to build a platform specification.
exec cue export --expression holos --out=yaml ./platform
exec holos show platform
cmp stdout want/1.platform_spec.yaml
# Define the host and port in projects/blackbox.schema.cue
@@ -22,7 +22,7 @@ mv projects/platform/components/prometheus/prometheus.cue.disabled projects/plat
mv platform/prometheus.cue.disabled platform/prometheus.cue
# Render the platform to render the prometheus chart.
exec holos render platform ./platform
exec holos render platform
stderr -count=1 '^rendered prometheus'
stderr -count=1 '^rendered platform'
cmp deploy/components/prometheus/prometheus.gen.yaml want/1.prometheus.gen.yaml
@@ -73,8 +73,8 @@ core.#BuildPlan & {
metadata: name: _Tags.component.name
}
-- want/1.platform_spec.yaml --
kind: Platform
apiVersion: v1alpha5
kind: Platform
metadata:
name: default
spec:

View File

@@ -106,9 +106,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -135,9 +132,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -164,9 +158,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -193,9 +184,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
-- want/buildplans.1.yaml --
@@ -222,9 +210,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
-- want/buildplans.2.yaml --
@@ -251,9 +236,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
-- want/buildplans.3.yaml --
@@ -280,9 +262,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -309,9 +288,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -338,9 +314,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
-- want/buildplans.4.yaml --
@@ -367,9 +340,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -396,9 +366,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -425,9 +392,6 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
---
@@ -454,8 +418,5 @@ spec:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml

View File

@@ -0,0 +1,64 @@
# https://github.com/holos-run/holos/issues/348
# when the optional kustomize patch name field is omitted
exec holos init platform v1alpha5 --force
# want a buildplan shown
exec holos show buildplans
cmp stdout buildplan.yaml
# want this error to go away
! stderr 'cannot convert non-concrete value string'
-- platform/example.cue --
package holos
Platform: Components: example: {
name: "example"
path: "components/example"
}
-- components/example/example.cue --
package holos
import "encoding/yaml"
holos: Component.BuildPlan
Component: #Kustomize & {
KustomizeConfig: Kustomization: patches: [
{
target: kind: "CustomResourceDefinition"
patch: yaml.Marshal([{
op: "add"
path: "/metadata/annotations/example"
value: "example-value"
}])
},
]
}
-- buildplan.yaml --
kind: BuildPlan
apiVersion: v1alpha5
metadata:
name: example
spec:
artifacts:
- artifact: components/example/example.gen.yaml
generators:
- kind: Resources
output: resources.gen.yaml
transformers:
- kind: Kustomize
inputs:
- resources.gen.yaml
output: components/example/example.gen.yaml
kustomize:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
patches:
- patch: |
- op: add
path: /metadata/annotations/example
value: example-value
target:
kind: CustomResourceDefinition
name: ""
resources:
- resources.gen.yaml

View File

@@ -0,0 +1,50 @@
# https://github.com/holos-run/holos/issues/366
# Build tags conditionally include CUE files.
env HOME=$WORK
exec holos init platform v1alpha5 --force
exec holos show platform
cmp stdout want/empty.yaml
exec holos show platform -t foo
cmp stdout want/foo.yaml
-- platform/empty.cue --
@if(foo)
package holos
Platform: Components: foo: _
-- platform/metadata.cue --
package holos
Platform: Components: [NAME=string]: {
name: NAME
path: "components/empty"
labels: "app.holos.run/name": NAME
annotations: "app.holos.run/description": "\(NAME) empty test case"
}
-- components/empty/empty.cue --
package holos
Component: #Kubernetes & {}
holos: Component.BuildPlan
-- want/empty.yaml --
apiVersion: v1alpha5
kind: Platform
metadata:
name: default
spec:
components: []
-- want/foo.yaml --
apiVersion: v1alpha5
kind: Platform
metadata:
name: default
spec:
components:
- annotations:
app.holos.run/description: foo empty test case
labels:
app.holos.run/name: foo
name: foo
path: components/empty

View File

@@ -31,6 +31,7 @@ spec:
- kind: Resources
output: resources.gen.yaml
resources: {}
validators: []
transformers:
- kind: Kustomize
inputs:
@@ -38,9 +39,6 @@ spec:
output: components/no-name/no-name.gen.yaml
kustomize:
kustomization:
labels:
- includeSelectors: false
pairs: {}
resources:
- resources.gen.yaml
kind: Kustomization

View File

@@ -0,0 +1,37 @@
# https://github.com/holos-run/holos/issues/357
exec holos init platform v1alpha5 --force
! exec holos render platform
stderr 'secret.kind: conflicting values "Forbidden. Use an ExternalSecret instead." and "Secret"'
-- validators.cue --
package holos
import "github.com/holos-run/holos/api/author/v1alpha5:author"
#ComponentConfig: author.#ComponentConfig & {
Validators: cue: {
kind: "Command"
command: args: ["holos", "cue", "vet", "./policy", "--path", "strings.ToLower(kind)"]
}
}
-- policy/validations.cue --
package validations
secret: kind: "Forbidden. Use an ExternalSecret instead."
-- platform/example.cue --
package holos
Platform: Components: example: {
name: "example"
path: "components/example"
}
-- components/example/secret.cue --
package holos
holos: Component.BuildPlan
Component: #Kubernetes & {
Resources: Secret: test: {
metadata: name: "test"
}
}

View File

@@ -62,8 +62,10 @@ type ComponentConfig struct {
// Resources represents kubernetes resources mixed into the rendered manifest.
Resources core.Resources
// KustomizeConfig represents the configuration kustomize.
// KustomizeConfig represents the kustomize configuration.
KustomizeConfig KustomizeConfig
// Validators represent checks that must pass for output to be written.
Validators map[NameLabel]core.Validator
// Artifacts represents additional artifacts to mix in. Useful for adding
// GitOps resources. Each Artifact is unified without modification into the
// BuildPlan.

View File

@@ -16,9 +16,9 @@ Package core contains schemas for a [Platform](<#Platform>) and [BuildPlan](<#Bu
- [type Artifact](<#Artifact>)
- [type BuildPlan](<#BuildPlan>)
- [type BuildPlanSource](<#BuildPlanSource>)
- [type BuildPlanSpec](<#BuildPlanSpec>)
- [type Chart](<#Chart>)
- [type Command](<#Command>)
- [type Component](<#Component>)
- [type File](<#File>)
- [type FileContent](<#FileContent>)
@@ -38,6 +38,7 @@ Package core contains schemas for a [Platform](<#Platform>) and [BuildPlan](<#Bu
- [type Resource](<#Resource>)
- [type Resources](<#Resources>)
- [type Transformer](<#Transformer>)
- [type Validator](<#Validator>)
- [type Values](<#Values>)
@@ -59,6 +60,7 @@ type Artifact struct {
Artifact FilePath `json:"artifact,omitempty" yaml:"artifact,omitempty"`
Generators []Generator `json:"generators,omitempty" yaml:"generators,omitempty"`
Transformers []Transformer `json:"transformers,omitempty" yaml:"transformers,omitempty"`
Validators []Validator `json:"validators,omitempty" yaml:"validators,omitempty"`
Skip bool `json:"skip,omitempty" yaml:"skip,omitempty"`
}
```
@@ -85,18 +87,6 @@ type BuildPlan struct {
}
```
<a name="BuildPlanSource"></a>
## type BuildPlanSource {#BuildPlanSource}
BuildPlanSource reflects the origin of a [BuildPlan](<#BuildPlan>). Useful to save a build plan to a file, then re\-generate it without needing to process a [Platform](<#Platform>) component collection.
```go
type BuildPlanSource struct {
// Component reflects the component that produced the build plan.
Component Component `json:"component,omitempty" yaml:"component,omitempty"`
}
```
<a name="BuildPlanSpec"></a>
## type BuildPlanSpec {#BuildPlanSpec}
@@ -129,6 +119,17 @@ type Chart struct {
}
```
<a name="Command"></a>
## type Command {#Command}
Command represents a command vetting one or more artifacts. Holos appends fully qualified input file paths to the end of the args list, then executes the command. Inputs are written into a temporary directory prior to executing the command and removed afterwards.
```go
type Command struct {
Args []string `json:"args,omitempty" yaml:"args,omitempty"`
}
```
<a name="Component"></a>
## type Component {#Component}
@@ -414,6 +415,22 @@ type Transformer struct {
}
```
<a name="Validator"></a>
## type Validator {#Validator}
Validator validates files. Useful to validate an [Artifact](<#Artifact>) prior to writing it out to the final destination. Holos may execute validators concurrently. See the [validators](<https://holos.run/docs/v1alpha5/tutorial/validators/>) tutorial for an end to end example.
```go
type Validator struct {
// Kind represents the kind of transformer. Must be Kustomize, or Join.
Kind string `json:"kind" yaml:"kind" cue:"\"Command\""`
// Inputs represents the files to validate. Usually the final Artifact.
Inputs []FilePath `json:"inputs" yaml:"inputs"`
// Command represents a validation command. Ignored unless kind is Command.
Command Command `json:"command,omitempty" yaml:"command,omitempty"`
}
```
<a name="Values"></a>
## type Values {#Values}

View File

@@ -1,12 +0,0 @@
---
description: Re-use components by passing in parameters.
slug: component-parameters
sidebar_position: 400
---
# Component Parameters
Key points:
1. Components can be reused.
2. The Platform spec can pass user defined parameters to a component.

View File

@@ -1,12 +0,0 @@
---
description: Organize clusters into fleets.
slug: fleets
sidebar_position: 300
---
# Fleets
Key points:
1. Workload fleet.
2. Management fleet.

View File

@@ -177,7 +177,7 @@ source:
<Tabs groupId="E150C802-7162-4FBF-82A7-77D9ADAEE847">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -218,7 +218,6 @@ spec:
[CUE Module]: https://cuelang.org/docs/reference/modules/
[CUE Tags]: https://cuelang.org/docs/howto/inject-value-into-evaluation-using-tag-attribute/
[Application]: https://argo-cd.readthedocs.io/en/stable/user-guide/application-specification/
[Component Parameters]: ../component-parameters.mdx
[Platform]: ../../api/author.md#Platform
[ComponentConfig]: ../../api/author.md#ComponentConfig
[Artifact]: ../../api/core.md#Artifact

View File

@@ -1,15 +0,0 @@
---
description: Management Cluster
slug: management-cluster
sidebar_position: 200
---
# Management Cluster
Key points:
1. Namespaces
2. Certificates
3. Secrets
4. CronJobs
5. GKE autopilot

View File

@@ -1,12 +0,0 @@
---
description: Secrets Management
slug: secrets-management
sidebar_position: 150
---
# Secrets Management
Key points:
1. Namespaces
2. ExternalSecrets

View File

@@ -39,11 +39,12 @@ mkdir holos-multiple-clusters && cd holos-multiple-clusters
holos init platform v1alpha5
```
### Example Component
### Using an example Component
<CommonComponent />
We'll integrate the component with the platform after we define clusters.
We'll integrate the component with the platform after we define the
configuration structures.
## Defining Clusters
@@ -320,7 +321,7 @@ Render the platform to configure `podinfo` on each cluster.
<Tabs groupId="34A2D80B-0E86-4142-B65B-7DF70C47E1D2">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">

View File

@@ -7,23 +7,479 @@ sidebar_position: 130
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import CommonComponent from '../../common/example-component.mdx';
# Environments
## Overview
This topic covers how to model environments in Holos. We'll define schemas for
`#Environment` and `#Environments` to represent one environment and a
collection. The `Environments: #Environments` struct maps environment names to
configurations.
:::note
This approach unifies the component definition with the overall platform
configuration, creating a tight coupling between the two.
:::
This tight coupling is appropriate when you're configuring your own platform.
For example:
1. When you're integrating third party software into your own platform.
2. When you're configuring first party in-house software into your own platform.
This approach is not well suited to writing a component to share outside of your
own organization, which we can think of as configuring someone else's platform.
## The Code
### Generating the structure
Use `holos init platform` to generate a minimal platform structure:
```shell
mkdir holos-environments-tutorial && cd holos-environments-tutorial
holos init platform v1alpha5
```
### Using an example Component
<CommonComponent />
We'll integrate the component with the platform after we define the
configuration structures.
## Defining Environments
### Defining one Environment
We'll define an `#Environment` schema `#Environments` collection. We'll use
these schemas to define an `Environments` struct of concrete configuration
values.
### Defining a collection of Environments
### Assumptions
## Rendering manifests
There are two tiers of environments, prod and nonprod. Prod environments
organized along broad jurisdictions, for example US and EU. Nonprod
environments are organized by purpose, dev, test, and stage.
## Reviewing the Manifests
### Prototyping the data
Before we define the schema, let's prototype the data structure we want to work
with from the perspective of each component.
Let's imagine we're configuring `podinfo` to comply with regulations. When
podinfo is deployed to production in the EU, we'll configure opt-in behavior.
In the US we'll configure opt-out behavior.
We'll pass the environment name as a component parameter. The component
definition can then look up the jurisdiction to determine the appropriate
configuration values.
```shell
holos cue export --out=yaml --expression Environments
```
```yaml showLineNumbers
prod-pdx:
name: prod-pdx
tier: prod
jurisdiction: us
state: oregon
prod-cmh:
name: prod-cmh
tier: prod
jurisdiction: us
state: ohio
prod-ams:
name: prod-ams
tier: prod
jurisdiction: eu
state: netherlands
dev:
name: dev
tier: nonprod
jurisdiction: us
state: oregon
test:
name: test
tier: nonprod
jurisdiction: us
state: oregon
stage:
name: stage
tier: nonprod
jurisdiction: us
state: oregon
```
### Defining the schema
Given the example structure, we can write a schema to define and validate the
data.
```shell
cat <<EOF > environments.schema.cue
```
```cue showLineNumbers
package holos
#Environment: {
name: string
tier: "prod" | "nonprod"
jurisdiction: "us" | "eu" | "uk" | "global"
state: "oregon" | "ohio" | "germany" | "netherlands" | "england" | "global"
// Prod environment names must be prefixed with prod for clarity.
if tier == "prod" {
name: "prod" | =~"^prod-"
}
}
#Environments: {
[NAME=string]: #Environment & {
name: NAME
}
}
```
```shell
EOF
```
### Adding configuration
With a schema defined, we can fill in the concrete values.
```shell
cat <<EOF > environments.cue
```
```cue showLineNumbers
package holos
// Injected from Platform.spec.components.parameters.EnvironmentName
EnvironmentName: string @tag(EnvironmentName)
Environments: #Environments & {
"prod-pdx": {
tier: "prod"
jurisdiction: "us"
state: "oregon"
}
"prod-cmh": {
tier: "prod"
jurisdiction: "us"
state: "ohio"
}
"prod-ams": {
tier: "prod"
jurisdiction: "eu"
state: "netherlands"
}
// Nonprod environments are colocated together.
_nonprod: {
tier: "nonprod"
jurisdiction: "us"
state: "oregon"
}
dev: _nonprod
test: _nonprod
stage: _nonprod
}
```
```shell
EOF
```
### Inspecting the configuration
Inspect the `Environments` data structure to verify the schema and concrete
values are what we want.
<Tabs groupId="FF820F5A-A85F-464D-B299-39CAAFFCE5C6">
<TabItem value="command" label="Command">
```bash
holos cue export --out=yaml --expression Environments
```
</TabItem>
<TabItem value="output" label="Output">
```yaml showLineNumbers
prod-pdx:
name: prod-pdx
tier: prod
jurisdiction: us
state: oregon
prod-cmh:
name: prod-cmh
tier: prod
jurisdiction: us
state: ohio
prod-ams:
name: prod-ams
tier: prod
jurisdiction: eu
state: netherlands
dev:
name: dev
tier: nonprod
jurisdiction: us
state: oregon
test:
name: test
tier: nonprod
jurisdiction: us
state: oregon
stage:
name: stage
tier: nonprod
jurisdiction: us
state: oregon
```
</TabItem>
</Tabs>
This looks like our prototype, we're confident we can iterate over each
environment and get a handle on the configuration values we need.
## Integrating components
The `Environments` data structure unlocks the capability to look up concrete
values specific to a named environment. We'll use this capability to configure
the `podinfo` component in compliance with the regulations of the jurisdiction.
### Configuring the environment
Inject the environment name when we integrate `podinfo` with the platform.
```shell
cat <<EOF > platform/podinfo.cue
```
```cue showLineNumbers
package holos
Platform: Components: {
podinfoPDX: ProdPodinfo & {_city: "pdx"}
podinfoCMH: ProdPodinfo & {_city: "cmh"}
podinfoAMS: ProdPodinfo & {_city: "ams"}
podinfoDEV: {
name: "podinfo-dev"
path: "components/podinfo"
labels: "app.holos.run/component": "podinfo"
parameters: EnvironmentName: "dev"
}
}
let ProdPodinfo = {
_city: string
name: "podinfo-\(_city)"
path: "components/podinfo"
labels: "app.holos.run/component": "podinfo"
labels: "app.holos.run/tier": "prod"
labels: "app.holos.run/city": _city
parameters: EnvironmentName: "prod-\(_city)"
}
```
### Using the environment
Now we can configure `podinfo` based on the jurisdiction of the environment.
```shell
cat <<EOF > components/podinfo/cookie-consent.cue
```
```cue showLineNumbers
package holos
// Schema definition for our configuration.
#Values: {
ui: enableCookieConsent: *true | false
ui: message: string
}
// Map jurisdiction to helm values
JurisdictionValues: {
// Enable cookie consent by default in any jurisdiction.
[_]: #Values
// Disable in the US.
us: ui: enableCookieConsent: false
eu: ui: enableCookieConsent: true
}
// Look up the configuration values associated with the environment name.
Component: Values: JurisdictionValues[Environments[EnvironmentName].jurisdiction]
```
```shell
EOF
```
### Inspecting the BuildPlans
With the above configuration, we can inspect the buildplans for this component.
The prod environment in Amsterdam has cookie consent enabled on line 26.
<Tabs groupId="6EC991F3-F78C-43F1-8A6D-E68D8BDAF58B">
<TabItem value="command" label="Command">
```bash
holos show buildplans --selector app.holos.run/city=ams
```
</TabItem>
<TabItem value="output" label="Output">
```yaml showLineNumbers
kind: BuildPlan
apiVersion: v1alpha5
metadata:
name: podinfo-ams
labels:
app.holos.run/city: ams
app.holos.run/component: podinfo
app.holos.run/name: podinfo-ams
app.holos.run/tier: prod
spec:
artifacts:
- artifact: components/podinfo-ams/podinfo-ams.gen.yaml
generators:
- kind: Helm
output: helm.gen.yaml
helm:
chart:
name: podinfo
version: 6.6.2
release: podinfo
repository:
name: podinfo
url: https://stefanprodan.github.io/podinfo
values:
ui:
# highlight-next-line
enableCookieConsent: true
message: Hello World
- kind: Resources
output: resources.gen.yaml
transformers:
- kind: Kustomize
inputs:
- helm.gen.yaml
- resources.gen.yaml
output: components/podinfo-ams/podinfo-ams.gen.yaml
kustomize:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- helm.gen.yaml
- resources.gen.yaml
- artifact: gitops/podinfo-ams.application.gen.yaml
generators:
- kind: Resources
output: gitops/podinfo-ams.application.gen.yaml
resources:
Application:
podinfo-ams:
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: podinfo-ams
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
path: deploy/components/podinfo-ams
repoURL: https://github.com/brenix/holos-demo.git
targetRevision: main
```
</TabItem>
</Tabs>
In Portland cookie consent is disabled.
<Tabs groupId="3438335B-1FFC-4838-B8DE-C54B8346CDB4">
<TabItem value="command" label="Command">
```bash
holos show buildplans --selector app.holos.run/city=pdx
```
</TabItem>
<TabItem value="output" label="Output">
```yaml showLineNumbers
kind: BuildPlan
apiVersion: v1alpha5
metadata:
name: podinfo-pdx
labels:
app.holos.run/city: pdx
app.holos.run/component: podinfo
app.holos.run/name: podinfo-pdx
app.holos.run/tier: prod
spec:
artifacts:
- artifact: components/podinfo-pdx/podinfo-pdx.gen.yaml
generators:
- kind: Helm
output: helm.gen.yaml
helm:
chart:
name: podinfo
version: 6.6.2
release: podinfo
repository:
name: podinfo
url: https://stefanprodan.github.io/podinfo
values:
ui:
# highlight-next-line
enableCookieConsent: false
message: Hello World
- kind: Resources
output: resources.gen.yaml
transformers:
- kind: Kustomize
inputs:
- helm.gen.yaml
- resources.gen.yaml
output: components/podinfo-pdx/podinfo-pdx.gen.yaml
kustomize:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
labels:
- includeSelectors: false
pairs: {}
resources:
- helm.gen.yaml
- resources.gen.yaml
- artifact: gitops/podinfo-pdx.application.gen.yaml
generators:
- kind: Resources
output: gitops/podinfo-pdx.application.gen.yaml
resources:
Application:
podinfo-pdx:
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: podinfo-pdx
namespace: argocd
spec:
destination:
server: https://kubernetes.default.svc
project: default
source:
path: deploy/components/podinfo-pdx
repoURL: https://github.com/brenix/holos-demo.git
targetRevision: main
```
</TabItem>
</Tabs>
## Concluding Remarks
In this topic we covered how to use a CUE structure to define attributes of prod
and nonprod environments.
1. We passed the environment name as a parameter to each component using a CUE `@tag`.
2. The component definition uses the environment name as a key to get a handle
on attributes. For example, the jurisdiction a service operates within.
3. The example podinfo component uses an additional structure to map
jurisdictions to concrete configuration values.

View File

@@ -1,29 +0,0 @@
---
slug: owners
title: Owners
description: Managing and mapping projects to owners.
sidebar_position: 150
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# Owners
## Overview
## The Code
### Generating the structure
### Using an example Component
## Defining Owners
### Defining one Owner
### Defining a collection of Owners
## Rendering manifests
## Reviewing the Manifests

View File

@@ -1,29 +0,0 @@
---
slug: projects
title: Projects
description: Managing components organizing them into projects.
sidebar_position: 140
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# Projects
## Overview
## The Code
### Generating the structure
### Using an example Component
## Defining Projects
### Defining one Project
### Defining a collection of Projects
## Rendering manifests
## Reviewing the Manifests

View File

@@ -1,12 +0,0 @@
---
description: Workload Cluster
slug: workload-cluster
sidebar_position: 250
---
# Workload Cluster
Key points:
1. Namespaces
2. ExternalSecrets

View File

@@ -12,34 +12,34 @@ import TabItem from '@theme/TabItem';
## Overview
This tutorial demonstrates mixing additional resources into a component using
CUE. Holos components frequently mix in resources so we don't need to modify
existing charts or manifests. We'll add an [ExternalSecret] resource to the
podinfo Helm chart we configured in the [Hello Holos] tutorial.
This tutorial demonstrates how to add additional resources to a component using
CUE. Holos components often mix in resources, eliminating the need to modify
existing charts or manifests. In this tutorial, we'll add an [ExternalSecret]
resource to the podinfo Helm chart configured in the [Hello Holos] tutorial.
Key concepts:
1. Resources are validated against `#Resources` defined at the root.
2. Custom Resource Definitions need to be imported with timoni.
3. Helm, Kustomize, and CUE can be mixed in together in the same component.
2. Custom Resource Definitions need to be imported using Timoni.
3. Helm, Kustomize, and CUE can be mixed together within the same component.
## The Code
### Generating the structure
### Generating the Structure
Use `holos` to generate a minimal platform directory structure. First, create
and cd into a blank directory. Then use the `holos generate platform` command to
generate a minimal platform.
Use `holos` to generate a minimal platform directory structure. First, create
and navigate into a blank directory. Then, use the `holos generate platform`
command to generate a minimal platform.
```shell
mkdir holos-cue-tutorial && cd holos-cue-tutorial
holos init platform v1alpha5
```
### Creating the component
### Creating the Component
Create the directory for the `podinfo` component. Create an empty file and then
add the following CUE configuration to it.
Create the directory for the `podinfo` component. Create an empty file, then add
the following CUE configuration to it.
```bash
mkdir -p components/podinfo
@@ -72,7 +72,7 @@ Component: #Helm & {
EOF
```
Integrate the component with the platform.
Register the component with the platform.
```bash
cat <<EOF > platform/podinfo.cue
@@ -94,7 +94,7 @@ Render the platform.
<Tabs groupId="tutorial-hello-render-manifests">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -161,12 +161,13 @@ field validates against the `#Resources` definition in [resources.cue].
### Importing CRDs
Holos includes CUE schema definitions of the ExternalSecret custom resource
definition (CRD). These schemas are located in the `cue.mod` directory, written by
the `holos init platform` command we executed at the start of this tutorial.
Holos includes CUE schema definitions for the ExternalSecret Custom Resource
Definition (CRD). These schemas are located in the `cue.mod` directory,
generated by the `holos init platform` command executed at the start of this
tutorial.
Import your own custom resource definitions using [timoni]. We imported the
ExternalSecret CRDs embedded into `holos` with the following command.
To import your own custom resource definitions, use [Timoni]. We imported the
ExternalSecret CRDs embedded in `holos` using the following command.
<Tabs groupId="35B1A1A1-D7DF-4D27-A575-28556E182096">
<TabItem value="command" label="Command">
@@ -203,16 +204,16 @@ Take a look at
the imported definitions.
:::
Once imported, the last step is to add the resource kind to the `#Resources`
struct. This is most often accomplished by adding a new file which cue unifies
with the existing [resources.cue] file.
Once imported, the final step is to add the resource kind to the `#Resources`
struct. Typically, this is done by creating a new file that CUE unifies with the
existing [resources.cue] file.
## Reviewing Changes
Render the platform with the `ExternalSecret` mixed into the podinfo component.
```shell
holos render platform ./platform
holos render platform
```
Take a look at the diff to see the mixed in `ExternalSecret`.
@@ -249,22 +250,20 @@ index 6e4aec0..f79e9d0 100644
```
We saw how to mix in resources using the `Resources` field of the
[ComponentConfig]. This technique approach works for every kind of component in
Holos. We did this without needing to fork the upstream Helm chart so we can
easily update to new podinfo versions as they're released.
[ComponentConfig]. This approach works for every kind of component in Holos,
allowing seamless integration without needing to fork the upstream Helm chart.
This way, we can easily update to new podinfo versions as they're released.
## Trying Locally
Optionally apply the manifests Holos rendered to a [Local Cluster].
Optionally, apply the manifests rendered by Holos to a [Local Cluster] for
testing.
## Next Steps
This tutorial uses the `#Resources` structure to map resource kinds to their
schema definitions in CUE. This structure is defined in `resources.cue` at the
root of the tree. Take a look at [resources.cue] to see this mapping structure.
Continue to the next tutorial to learn how to define your own data structures
similar to this `#Resources` structure.
schema definitions in CUE. This structure is defined in `resources.cue` at the
root of the tree. Take a look at [resources.cue] to see this mapping structure.
[Local Cluster]: ../topics/local-cluster.mdx
[ExternalSecret]: https://external-secrets.io/latest/api/externalsecret/

View File

@@ -15,32 +15,22 @@ import ComponentSequence from '@site/src/diagrams/render-component-sequence.mdx'
## Overview
One of the first exercises we do when learning a new programming language is
printing out a "Hello World!" greeting. Hello Holos configures the [podinfo Helm
chart][podinfo] producing a similar greeting from a Kubernetes Service.
Like a traditional "Hello World" program, we'll start by configuring the
[podinfo Helm chart][podinfo] to output a greeting from a Kubernetes Service.
This introduces the core concept of wrapping Helm charts as Holos Components.
By the end of this tutorial you have an understanding of how to wrap a Helm
Chart as a Holos Component.
## Implementation
## The Code
### Initialize Platform Structure
### Generating the structure
Use `holos` to generate a minimal platform directory structure. Start by
creating a blank directory to hold the platform configuration.
Create and initialize a minimal platform:
```shell
mkdir holos-tutorial && cd holos-tutorial
```
Use the `holos init platform` command to initialize a minimal platform in the
blank directory.
```shell
holos init platform v1alpha5
```
Here's the filesystem tree we'll build in this tutorial.
The resulting directory structure:
<Tabs groupId="80D04C6A-BC83-44D0-95CC-CE01B439B159">
<TabItem value="tree" label="Tree">
@@ -104,10 +94,9 @@ initialization.
</TabItem>
</Tabs>
### Creating a component
### Create the Component
Start by creating a directory for the `podinfo` component. Create an empty file
and then add the following CUE configuration to it.
Configure the `podinfo` component:
```bash
mkdir -p components/podinfo
@@ -145,20 +134,19 @@ EOF
```
:::important
CUE loads all of `*.cue` files in the component directory to define component,
similar to Go packages.
Like Go packages, CUE loads all `*.cue` files in the component directory to
define the component.
:::
:::note
CUE _also_ loads all `*.cue` files from the component leaf directory to the
platform root directory. In this example, `#Helm` on line 6 is defined in
`schema.cue` at the root.
CUE recursively loads `*.cue` files from the component directory up to the
platform root. For example, `#Helm` referenced on line 6 is defined in
root-level `schema.cue`.
:::
### Integrating the component
### Add to Platform
Integrate the `podinfo` component into the platform by creating a new CUE file
in the `platform` directory with the following content.
Register the `podinfo` component in `platform/podinfo.cue`:
```bash
cat <<EOF > platform/podinfo.cue
@@ -178,19 +166,17 @@ EOF
```
:::tip
Component parameters may have any name as long as they don't start with
`holos_`.
Parameter names are unrestricted, except for the reserved `holos_` prefix.
:::
## Rendering manifests
## Generate Manifests
Render a manifest for `podinfo` using the `holos render platform ./platform`
command. The `platform/` directory is the main entrypoint for this command.
Render the `podinfo` configuration:
<Tabs groupId="E150C802-7162-4FBF-82A7-77D9ADAEE847">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -202,10 +188,7 @@ rendered platform in 1.938759417s
</TabItem>
</Tabs>
:::important
Holos rendered the following manifest file by executing `helm template` after
caching `podinfo` locally.
:::
Holos executes `helm template` with locally cached charts to generate:
```txt
deploy/components/podinfo/podinfo.gen.yaml
@@ -348,21 +331,24 @@ grep -B2 Hello deploy/components/podinfo/podinfo.gen.yaml
## Breaking it down
We run `holos render platform ./platform` because the CUE files in the platform
directory export a [Platform] resource to `holos`. The platform directory is
the entrypoint to the platform rendering process.
We run `holos render platform` because the CUE files in the platform directory
export a [Platform] resource to `holos`.
Components are the building blocks for a Platform. The `platform/podinfo.cue`
file integrates the `podinfo` Component with the Platform.
:::important
The `platform/` directory is the default entry point to the platform rendering
process. Override with `--platform <dir>`.
:::
Holos requires two fields to integrate a component with the platform.
Components are the building blocks of a Platform. The `platform/podinfo.cue`
file integrates the `podinfo` component with the Platform.
Holos requires two fields to integrate a component with the platform:
1. A unique name for the component.
2. The component path to the directory containing the CUE files exporting a
2. The component path to the directory containing the CUE files that export a
`BuildPlan` defining the component.
Component parameters are optional. They allow re-use of the same component.
Refer to the [Component Parameters] topic for more information.
Component parameters are optional and allow re-use of the same component.
<Tabs groupId="67C1EE71-3EA8-4568-9F6D-0072BA09FF12">
<TabItem value="overview" label="Rendering Overview">
@@ -379,12 +365,12 @@ Refer to the [Component Parameters] topic for more information.
## Next Steps
We've shown how to integrate one Helm chart to the Platform, but we haven't yet
covered multiple Helm charts. Continue on with the next tutorial to learn how
Holos makes it easy to inject values into multiple components safely and easily.
We've shown how to integrate one Helm chart into the Platform, but we haven't
yet covered multiple Helm charts. Continue with the next tutorial to learn how
Holos makes it easy to inject values into multiple components safely and
efficiently.
[podinfo]: https://github.com/stefanprodan/podinfo
[CUE Module]: https://cuelang.org/docs/reference/modules/
[CUE Tags]: https://cuelang.org/docs/howto/inject-value-into-evaluation-using-tag-attribute/
[Platform]: ../api/author.md#Platform
[Component Parameters]: ../topics/component-parameters.mdx

View File

@@ -17,28 +17,23 @@ import TabItem from '@theme/TabItem';
## Overview
Holos makes it easier to integrate multiple Helm charts together. Holos adds
valuable capabilities to Helm and Kustomize.
Holos simplifies integrating multiple Helm charts by adding valuable
capabilities to Helm and Kustomize:
1. Inject the same value into two or more charts to integrate them safer than Helm alone.
2. Add strong type checking and validation of constraints for Helm input values.
3. Implementation of the [rendered manifests pattern].
1. Inject the same value into multiple charts more safely than using Helm alone.
2. Add strong type checking and validation for Helm input values.
3. Implement the [rendered manifests pattern].
We'll manage the [prometheus] and [blackbox] Helm Charts in this tutorial.
Unfortunately, the upstream `values.yaml` files are misconfigured by default.
Prometheus tries to connect to Blackbox at the wrong host and port.
:::tip
Holos and CUE makes it easy to fix this problem by integrating them correctly.
The integrated charts will stay in lock step over time.
:::
In this tutorial, we'll manage the [prometheus] and [blackbox] Helm charts. By
default, the upstream `values.yaml` files are misconfigured, causing Prometheus
to connect to Blackbox at the wrong host and port.
## The Code
### Generating the structure
Use `holos` to generate a minimal platform directory structure. First, create
and cd into a blank directory. Then use the `holos init platform` command.
Use `holos` to generate a minimal platform directory structure. First, create
and navigate into a blank directory, then use the `holos init platform` command:
```shell
mkdir holos-helm-values-tutorial
@@ -46,10 +41,10 @@ cd holos-helm-values-tutorial
holos init platform v1alpha5
```
Make a commit to track changes.
Make an initial commit to track changes:
```bash
git init . && git add . && git commit -m initial
git init . && git add . && git commit -m "initial commit"
```
### Managing the Components
@@ -114,10 +109,9 @@ EOF
</TabItem>
</Tabs>
### Integrating the Components
### Register the Components
Integrate the components with the platform by adding the following file to the
platform directory.
Register the components with the platform by adding the following file to the platform directory.
```bash
cat <<EOF > platform/prometheus.cue
@@ -145,7 +139,7 @@ Render the platform.
<Tabs groupId="33D6BFED-62D8-4A42-A26A-F3121D57C4E5">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -182,9 +176,8 @@ git add . && git commit -m 'add blackbox and prometheus'
### Importing Helm Values
Holos renders the helm charts with their default values. We can import these
default values into CUE so we can easily work with them as structured data
instead of text markup.
Holos renders Helm charts with their default values. We can import these default
values into CUE to work with them as structured data instead of text markup.
```bash
holos cue import \
@@ -206,15 +199,15 @@ These commands convert the YAML data into CUE code and nest the values under the
`Values` field of the `Helm` struct.
:::important
CUE unifies `values.cue` with the other `*.cue` files in the same directory.
CUE unifies `values.cue` with the other `\*.cue` files in the same directory.
:::
Render the platform and commit the results.
Render the platform using `holos render platform` and commit the results.
<Tabs groupId="BDDCD65A-2E9D-4BA6-AAE2-8099494D5E4B">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -244,9 +237,10 @@ git add . && git commit -m 'import values'
### Managing Common Configuration
Define a structure to hold the configuration values we want both helm charts to
use. We add this configuration to the `components` directory so it's in scope
for all components.
To manage shared configuration for both Helm charts, define a structure that
holds the common configuration values. Place this configuration in the
`components` directory to ensure it is accessible to all components.
```bash
cat <<EOF > components/blackbox.cue
@@ -296,13 +290,13 @@ git add . && git commit -m 'add blackbox configuration'
</TabItem>
</Tabs>
### Referring to Common Configuration
### Using Common Configuration Across Components
Referencing common configuration from multiple components is easy and safe with
Holos and CUE.
Referencing common configuration across multiple components is straightforward
and reliable using Holos and CUE.
Patch the two `values.cue` files, or edit them by hand, to reference
`Blackbox.host` and `Blackbox.port`.
To apply the common configuration, patch the two `values.cue` files, or manually
edit them to reference `Blackbox.host` and `Blackbox.port`.
<Tabs groupId="5FFCE892-B8D4-4F5B-B2E2-39EC9E9F87A4">
<TabItem value="command" label="Command">
@@ -379,13 +373,13 @@ git add . && git commit -m 'integrate blackbox and prometheus together'
## Reviewing Changes
Holos makes it easy to see and review platform wide changes. Render the
platform to see how both prometheus and blackbox change in lock step.
Holos makes it easy to view and review platform-wide changes. Render the
platform to observe how both Prometheus and Blackbox update in sync.
<Tabs groupId="E7F6D8B1-22FA-4075-9B44-D9F2815FE0D3">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -397,7 +391,7 @@ rendered platform in 383.270625ms
</TabItem>
</Tabs>
Changes are easily visible with version control.
Changes are easily visible in version control.
<Tabs groupId="9789A0EF-24D4-4FB9-978A-3895C2778789">
<TabItem value="command" label="Command">
@@ -476,19 +470,18 @@ index 9e02bce..ab638f0 100644
</TabItem>
</Tabs>
From the diff we know this change will:
From the diff, we can see this change will:
1. Reconfigure the blackbox exporter host from `prometheus-blackbox-exporter` to `blackbox`.
2. Have no effect on the blackbox service port. It was already using the default 9115.
3. Reconfigure prometheus to query the blackbox exporter at the correct host and
port, `blackbox:9115`.
1. Reconfigure the Blackbox Exporter host from `prometheus-blackbox-exporter` to `blackbox`.
2. Have no effect on the Blackbox service port, as it was already using the default `9115`.
3. Reconfigure Prometheus to query the Blackbox Exporter at the correct host and port, `blackbox:9115`.
Without this change prometheus incorrectly assumed blackbox was listening at
Without this change, Prometheus incorrectly assumed Blackbox was listening at
`blackbox` on port `80` when it was actually listening at
`prometheus-blackbox-exporter` port `9115`. Going forward, changing the
blackbox host or port will reconfigure both charts correctly.
`prometheus-blackbox-exporter` on port `9115`. Going forward, changing the
Blackbox host or port will reconfigure both charts correctly.
Commit the changes and move on to deploying them.
Commit the changes and proceed to deploy them.
<Tabs groupId="F8C9A98D-DE1E-4EF6-92C1-017A9166F6C7">
<TabItem value="command" label="Command">
@@ -506,14 +499,15 @@ git add . && git commit -m 'render integrated blackbox and prometheus manifests'
## Trying Locally
Optionally apply the manifests Holos rendered to a [Local Cluster].
Optionally, apply the manifests rendered by Holos to a [Local Cluster].
## Next Steps
In this tutorial we learned how Holos makes it easier to holistically integrate
the [prometheus] and [blackbox] charts so they're configured in lock step with
each other. If we relied on Helm alone, there is no good way to configure both
charts to use the same service endpoint.
In this tutorial, we learned how Holos simplifies the holistic integration of
the [prometheus] and [blackbox] charts, ensuring they are configured
consistently. By using Holos, we overcome the limitations of relying solely on
Helm, which lacks an effective method to configure both charts to use the same
service endpoint.
[rendered manifests pattern]: https://akuity.io/blog/the-rendered-manifests-pattern
[prometheus]: https://github.com/prometheus-community/helm-charts/tree/prometheus-25.27.0/charts/prometheus

View File

@@ -12,14 +12,14 @@ import TabItem from '@theme/TabItem';
## Overview
In the previous tutorial we learned how Holos makes it easier to holistically
integrate the [prometheus] and [blackbox] charts so they're configured in lock
step with each other.
In the previous tutorial, we learned how Holos simplifies the holistic
integration of the [prometheus] and [blackbox] charts, ensuring they are
configured in sync.
This tutorial goes further by integrating the [httpbin] service with prometheus
and blackbox to automatically probe for availability.
In this tutorial, we'll go a step further by integrating the [httpbin] service
with Prometheus and Blackbox to automatically probe for availability.
We'll explore how Holos manages [kustomize] bases similar to the Helm kind
We'll also explore how Holos manages [kustomize] bases, similar to the Helm kind
covered in the [Helm Values] tutorial.
## The Code
@@ -34,8 +34,10 @@ Otherwise click the **Generate** tab to generate a blank platform now.
:::
</TabItem>
<TabItem value="generate" label="Generate">
Use `holos` to generate a minimal platform directory structure. First, create
and cd into a blank directory. Then use the `holos init platform` command.
Use `holos` to generate a minimal platform directory structure. First, create
and navigate into a blank directory. Then, run the `holos init platform`
command.
```shell
mkdir holos-kustomize-tutorial
@@ -54,8 +56,8 @@ git init . && git add . && git commit -m initial
### Managing the Component
Create the `httpbin` component directory and add the `httpbin.cue` and
`httpbin.yaml` files to it.
Create the `httpbin` component directory, and add the `httpbin.cue` and
`httpbin.yaml` files to it for configuration and setup.
<Tabs groupId="800C3AE7-E7F8-4AFC-ABF1-6AFECD945958">
<TabItem value="setup" label="Setup">
@@ -154,9 +156,9 @@ EOF
Holos knows the `httpbin.yaml` file is part of the BuildPlan because of the
`KustomizeConfig: Files: "httpbin.yaml": _` line in the `httpbin.cue`.
### Integrating the Components
### Register the Components
Integrate `httpbin` with the platform by adding the following file to the
Register `httpbin` with the platform by adding the following file to the
platform directory.
```bash
@@ -181,7 +183,7 @@ Render the platform.
<Tabs groupId="B120D5D1-0EAB-41E0-AD21-15526EBDD53D">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -212,10 +214,10 @@ git add . && git commit -m 'add httpbin'
</TabItem>
</Tabs>
### Inspecting the BuildPlan
### Inspecting the Build Plan
We can see the [BuildPlan] exported to `holos` by the `holos:
Kustomize.BuildPlan` line in `httpbin.cue`. Holos processes this build plan to
Kustomize.BuildPlan` line in `httpbin.cue`. Holos processes this build plan to
produce the fully rendered manifests.
<Tabs groupId="DD697D65-5BEC-4B92-BB33-59BE4FEC112F">
@@ -270,19 +272,21 @@ source:
</TabItem>
</Tabs>
### Transforming manifests
### Transforming Manifests
Reviewing the BuildPlan exported in the previous command:
Review the BuildPlan exported in the previous command:
1. The [File Generator] copies the plain `httpbin.yaml` file into the build.
2. The [Kustomize Transformer] uses `httpbin.yaml` as an input resource.
3. The final artifact is the output from Kustomize.
This BuildPlan transforms the raw yaml by labeling all of the resources with
This BuildPlan transforms the raw YAML by labeling all of the resources with
`"app.kubernetes.io/name": "httpbin"` using the [KustomizeConfig] `CommonLabels`
field. We still need to integrate `httpbin` with `prometheus`. Annotate the
Service with `prometheus.io/probe: "true"` to complete the integration. Holos
makes this easier with CUE. We don't need to edit any yaml files.
field.
To complete the integration with Prometheus, annotate the Service with
`prometheus.io/probe: "true"`. Holos makes this easier with CUE, so there's no
need to edit any YAML files manually.
Add a new `patches.cue` file to the `httpbin` component with the following
content.
@@ -324,7 +328,7 @@ Render the platform to see the result of the kustomization patch.
<Tabs groupId="5D1812DD-8E7B-4F97-B349-275214F38B6E">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
@@ -382,17 +386,19 @@ git add . && git commit -m 'annotate httpbin for prometheus probes'
## Trying Locally
Optionally apply the manifests Holos rendered to a [Local Cluster].
Optionally, apply the manifests rendered by Holos to a [Local Cluster] for
testing and validation.
## Next Steps
We learned how Holos makes it easier to manage [httpbin], distributed as a
Kustomize base, using a kustomize Component similar to the helm component we saw
previously. Holos offers a clear way to kustomize any component, patching an
annotation onto the `httpbin` Service in this example.
In this tutorial, we learned how Holos simplifies managing [httpbin], which is
distributed as a Kustomize base. We used a Kustomize component similar to the
Helm component covered previously. Holos provides a straightforward way to
customize any component, demonstrated by patching an annotation onto the
`httpbin` Service.
Continue on with the tutorial to explore how Holos makes it easier to manage
certificates and make services accessible outside of a cluster.
Continue with the tutorial to learn how Holos facilitates certificate management
and makes services accessible outside of a cluster.
[httpbin]: https://github.com/mccutchen/go-httpbin/tree/v2.15.0
[prometheus]: https://github.com/prometheus-community/helm-charts/tree/prometheus-25.27.0/charts/prometheus

View File

@@ -11,19 +11,16 @@ import RenderingOverview from '@site/src/diagrams/rendering-overview.mdx';
## Overview
Holos is a configuration management tool for Kubernetes resources. It provides
the building blocks needed for implementing the [rendered manifests pattern].
It gives the flexibility to manage a wide range of configurations, from large
software delivery platforms spanning multiple clusters and regions to generating
a single resource on your local device.
Holos is a configuration management tool for Kubernetes implementing the
[rendered manifests pattern]. It handles configurations ranging from single
resources to multi-cluster platforms across regions.
{/* truncate */}
At a high level, Holos provides a few major components:
- A Platform schema for specifying how components integrate together into a platform.
- Component building blocks for Helm, Kustomize, and Kubernetes to unify configuration with CUE.
- A BuildPlan orchestrating generators, transformers, and validators to produce manifest files.
Key components:
- Platform schemas defining component integration
- Building blocks unifying Helm, Kustomize and Kubernetes configs with CUE
- BuildPlan pipeline for generating, transforming and validating manifests
<RenderingOverview />
@@ -31,43 +28,39 @@ At a high level, Holos provides a few major components:
## Holos' role in your organization
Platform engineers run the `holos render platform` command locally and in CI to
produce Kubernetes manifests which are committed to version control. GitOps
tools like ArgoCD or Flux deploy the manifests produced by Holos.
Platform engineers use Holos to generate Kubernetes manifests, both locally and
in CI pipelines. The manifests are committed to version control and deployed via
GitOps tools like ArgoCD or Flux.
Holos works well with your existing Helm charts, kustomize bases, and any other
configuration data you currently store in version control.
Holos integrates seamlessly with existing Helm charts, Kustomize bases, and
other version-controlled configurations.
## Advantages of Holos
### Safe
Holos uses [CUE] to provide strong typing and constraints to configuration data.
Additionally, holos adds strong validation to verify the output produced by Helm
and other tools.
Holos leverages [CUE] for strong typing and validation of configuration data,
ensuring consistent output from Helm and other tools.
### Consistent
Holos offers a consistent way to incorporate a wide variety of tools into a well
defined data pipeline. Configuration produced from CUE, Helm, and Kustomize are
all handled with the same consistent process.
A unified pipeline processes all configurations - whether from CUE, Helm, or
Kustomize - through the same well-defined stages.
### Flexible
Holos is designed to be flexible. Holos offers flexible building blocks for
data generation, transformation, validation, and integration. Find the perfect
fit for your team by assembling these building blocks to your unique needs.
Composable building blocks for generation, transformation, validation and
integration let teams assemble workflows that match their needs.
Holos does not have an opinion on many common aspects of platform configuration.
For example, environments and clusters are explicitly kept out of the core and
instead are provided as flexible, user-customizable [topics] organized as a
recipes.
The core is intentionally unopinionated about platform configuration patterns.
Common needs like environments and clusters are provided as customizable
[topics] recipes rather than enforced structures.
## Getting Help
If you get stuck, you can get help on [Discord] or [GitHub discussions]. Don't
worry about asking "beginner" questions, configuration is often complex even for
the most experienced among us. We all start somewhere and are happy to help.
Get support through our [Discord] channel or [GitHub discussions]. Configuration
challenges arise at all experience levels - we welcome your questions and are
here to help.
[rendered manifests pattern]: https://akuity.io/blog/the-rendered-manifests-pattern
[CUE]: https://cuelang.org/

View File

@@ -11,22 +11,23 @@ import RenderPlatformDiagram from '@site/src/diagrams/render-platform-sequence.m
# Setup
## Overview
This tutorial will guide you through the installation of Holos and its
dependencies, as well as the initialization of a minimal Platform that you can
extend to meet your specific needs.
## Installing
Holos is distributed as a single file executable that can be installed in a
couple of ways.
Holos is a single executable that can be installed via:
<Tabs groupId="FE2C74C8-B3A3-4AEA-BBD3-F57FAA654B6F">
<TabItem value="brew" label="Install with brew">
<TabItem value="brew" label="macOS">
```bash
brew install holos-run/tap/holos
```
</TabItem>
<TabItem value="linux" label="Linux">
Download holos from the [releases] page and place the executable into your shell
path.
</TabItem>
<TabItem value="win" label="Windows">
Download holos from the [releases] page and place the executable into your shell
path.
</TabItem>
<TabItem value="go" label="Go">
```bash
@@ -60,31 +61,22 @@ holos completion powershell | Invoke-Expression
</TabItem>
</Tabs>
### Releases
Download `holos` from the [releases] page and place the executable into your
shell path.
### Dependencies
Holos integrates with the following tools that should be installed to enable
their functionality.
Install these tools to use Holos's full capabilities:
- [Helm] to fetch and render Helm chart Components.
- [Kubectl] to [kustomize] components.
- [Helm] for chart management and rendering
- [Kubectl] for [kustomize] operations
:::note
Holos is tested with Helm version `v3.16.2`.
Holos is tested with Helm `v3.16.2`. If you see `Error: chart requires
kubeVersion` errors, try upgrading Helm.
:::
Please try upgrading helm if you encounter `Error: chart requires kubeVersion
...` errors.
## Next Steps
You've got the structure of your platform configuration in place. Continue on to
[Hello Holos] where you'll learn how easy it is to manage a Helm chart with
holos.
With your platform structure initialized, proceed to [Hello Holos] to learn Helm
chart management.
[Helm]: https://github.com/helm/helm/releases
[Kubectl]: https://kubernetes.io/docs/tasks/tools/

View File

@@ -0,0 +1,428 @@
---
slug: validators
title: Validators
description: Validate rendered manifests against policy definitions.
sidebar_position: 60
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import RenderingOverview from '@site/src/diagrams/rendering-overview.mdx';
# Validators
## Overview
Sometimes Helm charts render Secrets we do not wanted committed to version
control for security. Helm charts often render incorrect manifests, even if
they're accepted by the api server. For example, passing `null` to collection
fields. We'll solve both of these issues using a [Validator] to block artifacts
with a Secret resource, and verifying the artifact against Kubernetes type
definitions.
1. If a Helm chart renders a Secret, Holos errors before writing the artifact
and suggests an ExternalSecret instead.
2. Each resource is validated against a field named by the value of the kind
field. For example, a `kind: Secret` resource validates against `secret: {}` in
CUE. `kind: Deployment` validates against `deployment: {}` in CUE.
3. The final artifact is validated, covering the output of all generators and
transformers.
<RenderingOverview />
## The Code
### Generating the Structure
Use `holos` to generate a minimal platform directory structure. First, create
and navigate into a blank directory. Then, use the `holos generate platform`
command to generate a minimal platform.
```shell
mkdir holos-validators-tutorial && cd holos-validators-tutorial
holos init platform v1alpha5
```
### Creating the Component
Create the directory for the `podinfo` component. Create an empty file, then add
the following CUE configuration to it.
```bash
mkdir -p components/podinfo
```
```bash
cat <<EOF > components/podinfo/podinfo.cue
```
```cue showLineNumbers
package holos
// export the component build plan to holos
holos: Component.BuildPlan
// Component is a Helm chart
Component: #Helm & {
Name: "podinfo"
Namespace: "default"
// Add metadata.namespace to all resources with kustomize.
KustomizeConfig: Kustomization: namespace: Namespace
Chart: {
version: "6.6.2"
repository: {
name: "podinfo"
url: "https://stefanprodan.github.io/podinfo"
}
}
}
```
```bash
EOF
```
Register the component with the platform.
```bash
cat <<EOF > platform/podinfo.cue
```
```cue showLineNumbers
package holos
Platform: Components: podinfo: {
name: "podinfo"
path: "components/podinfo"
}
```
```bash
EOF
```
Render the platform.
<Tabs groupId="tutorial-hello-render-manifests">
<TabItem value="command" label="Command">
```bash
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
```
cached podinfo 6.6.2
rendered podinfo in 1.938665041s
rendered platform in 1.938759417s
```
</TabItem>
</Tabs>
Add and commit the initial configuration.
```bash
git init . && git add . && git commit -m initial
```
### Define the Valid Schema
We'll use a CUE package named `policy` so the entire platform configuration in
package `holos` isn't loaded every time we validate an artifact.
Create `policy/validation-schema.cue` with the following content.
```shell
mkdir -p policy
cat <<EOF > policy/validation-schema.cue
```
```cue showLineNumbers
package policy
import apps "k8s.io/api/apps/v1"
// Organize by kind then name to avoid conflicts.
kind: [KIND=string]: [NAME=string]: {...}
// Useful when one component manages the same resource kind and name across
// multiple namespaces.
let KIND = kind
namespace: [NS=string]: KIND
// Block Secret resources. kind will not unify with "Secret"
kind: secret: [NAME=string]: kind: "Use an ExternalSecret instead. Forbidden by security policy. secret/\(NAME)"
// Validate Deployment against Kubernetes type definitions.
kind: deployment: [_]: apps.#Deployment
```
```shell
EOF
```
### Configuring Validators
Configure the Validators [ComponentConfig] field to configure each [BuildPlan]
to validate the rendered [Artifact] files.
```shell
cat <<EOF > validators.cue
```
```cue showLineNumbers
package holos
// Configure all component kinds to validate against the policy directory.
#ComponentConfig: Validators: cue: {
kind: "Command"
// Note --path maps each resource to a top level field named by the kind.
command: args: [
"holos",
"cue",
"vet",
"./policy",
"--path=\"namespace\"",
"--path=metadata.namespace",
"--path=strings.ToLower(kind)",
"--path=metadata.name",
]
}
```
```shell
EOF
```
### Patching Errors
Render the platform to see validation fail. The podinfo chart has no Secret,
but it produces an invalid Deployment because it sets the container resource
limits field to `null`.
```shell
holos render platform
```
```txt
deployment.spec.template.spec.containers.0.resources.limits: conflicting values null and {[string]:"k8s.io/apimachinery/pkg/api/resource".#Quantity} (mismatched types null and struct):
./cue.mod/gen/k8s.io/api/apps/v1/types_go_gen.cue:355:9
./cue.mod/gen/k8s.io/api/apps/v1/types_go_gen.cue:376:12
./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:2840:11
./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:2968:14
./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:3882:15
./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:3882:18
./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:5027:9
./cue.mod/gen/k8s.io/api/core/v1/types_go_gen.cue:6407:16
./policy/validation-schema.cue:9:13
../../../../../var/folders/22/T/holos.validate1636392304/components/podinfo/podinfo.gen.yaml:104:19
could not run: terminating because of errors
could not run: could not validate podinfo path ./components/podinfo: could not run command: holos cue vet ./policy --path strings.ToLower(kind) /var/folders/22/T/holos.validate1636392304/components/podinfo/podinfo.gen.yaml: exit status 1 at builder/v1alpha5/builder.go:411
could not run: could not render component: could not run command: holos --log-level info --log-format console render component --inject holos_component_name=podinfo --inject holos_component_path=components/podinfo ./components/podinfo: exit status 1 at cli/render/render.go:155
```
We'll use a [Kustomize] patch [Transformer] to replace the `null` limits field
with a valid equivalent value.
:::important
This configuration is defined in CUE, not YAML, even though we're configuring a
Kustomize patch transformer. CUE gives us access to the unified platform
configuration.
:::
```shell
cat <<EOF > components/podinfo/patch.cue
```
```cue showLineNumbers
package holos
import "encoding/yaml"
Component: KustomizeConfig: Kustomization: {
_patches: limits: {
target: kind: "Deployment"
patch: yaml.Marshal([{
op: "test"
path: "/spec/template/spec/containers/0/resources/limits"
value: null
}, {
op: "replace"
path: "/spec/template/spec/containers/0/resources/limits"
value: {}
}])
}
patches: [for x in _patches {x}]
}
```
```shell
EOF
```
Now the platform renders.
<Tabs groupId="3A050092-8E56-49D4-84A9-71E544A21276">
<TabItem value="command" label="Command">
```bash
holos render platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt
rendered podinfo in 181.875083ms
rendered platform in 181.975833ms
```
</TabItem>
</Tabs>
## Inspecting the BuildPlan
The BuildPlan patches the output of the upstream helm chart without modifying
it, then validates the artifact against the Kubernetes type definitions.
<Tabs groupId="1DAB4C46-0793-4CCA-8930-7B2E60BDA1BE">
<TabItem value="command" label="Command">
```bash
holos show buildplans
```
</TabItem>
<TabItem value="output" label="Output">
```yaml showLineNumbers
kind: BuildPlan
apiVersion: v1alpha5
metadata:
name: podinfo
spec:
artifacts:
- artifact: components/podinfo/podinfo.gen.yaml
generators:
- kind: Helm
output: helm.gen.yaml
helm:
chart:
name: podinfo
version: 6.6.2
release: podinfo
repository:
name: podinfo
url: https://stefanprodan.github.io/podinfo
values: {}
namespace: default
- kind: Resources
output: resources.gen.yaml
transformers:
- kind: Kustomize
inputs:
- helm.gen.yaml
- resources.gen.yaml
output: components/podinfo/podinfo.gen.yaml
kustomize:
kustomization:
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
patches:
- patch: |
- op: test
path: /spec/template/spec/containers/0/resources/limits
value: null
- op: replace
path: /spec/template/spec/containers/0/resources/limits
value: {}
target:
kind: Deployment
name: ""
resources:
- helm.gen.yaml
- resources.gen.yaml
validators:
- kind: Command
inputs:
- components/podinfo/podinfo.gen.yaml
command:
args:
- holos
- cue
- vet
- ./policy
- --path
- strings.ToLower(kind)
```
</TabItem>
</Tabs>
## Catching Mistakes
Suppose a teammate downloads a helm chart that includes a Secret unbeknown to
them. Holos catches the problem and suggests an ExternalSecret instead.
Mix in a Secret to see what happens
```shell
cat <<EOF > components/podinfo/secret.cue
```
```cue showLineNumbers
package holos
Component: Resources: Secret: example: metadata: name: "example"
```
```shell
EOF
```
Render the platform to see the error.
```shell
holos render platform
```
```txt
secret.kind: conflicting values "Use an ExternalSecret instead. Forbidden by security policy." and "Secret":
./policy/validation-schema.cue:6:15
../../../../../var/folders/22/T/holos.validate2549739170/components/podinfo/podinfo.gen.yaml:1:7
could not run: terminating because of errors
could not run: could not validate podinfo path ./components/podinfo: could not run command: holos cue vet ./policy --path strings.ToLower(kind) /var/folders/22/T/holos.validate2549739170/components/podinfo/podinfo.gen.yaml: exit status 1 at builder/v1alpha5/builder.go:411
could not run: could not render component: could not run command: holos --log-level info --log-format console render component --inject holos_component_name=podinfo --inject holos_component_path=components/podinfo ./components/podinfo: exit status 1 at cli/render/render.go:155
```
:::important
Holos quickly returns an error if validated artifacts have a Secret.
:::
Remove the secret to resolve the issue.
```shell
rm components/podinfo/secret.cue
```
## Inspecting the diff
The validation and patch results in a correct Deployment, verified against the
Kubernetes type definitions.
```shell
git diff
```
```diff
diff --git a/deploy/components/podinfo/podinfo.gen.yaml b/deploy/components/podinfo/podinfo.gen.yaml
index 6e4aec0..a145e3f 100644
--- a/deploy/components/podinfo/podinfo.gen.yaml
+++ b/deploy/components/podinfo/podinfo.gen.yaml
@@ -101,7 +101,7 @@ spec:
successThreshold: 1
timeoutSeconds: 5
resources:
- limits: null
+ limits: {}
requests:
cpu: 1m
memory: 16Mi
```
## Trying Locally
Optionally, apply the manifests rendered by Holos to a [Local Cluster] for
testing.
[Local Cluster]: ../topics/local-cluster.mdx
[ExternalSecret]: https://external-secrets.io/latest/api/externalsecret/
[Artifact]: ../api/core.md#Artifact
[BuildPlan]: ../api/core.md#BuildPlan
[Resources]: ../api/core.md#Resources
[Validator]: ../api/core.md#Validator
[Transformer]: ../api/core.md#Transformer
[Kustomize]: ../api/core.md#Kustomize
[Generator]: ../api/core.md#Generator
[Hello Holos]: ./hello-holos.mdx
[cue.mod/gen/external-secrets.io/externalsecret/v1beta1/types_gen.cue]: https://github.com/holos-run/holos/blob/main/internal/generate/platforms/cue.mod/gen/external-secrets.io/externalsecret/v1beta1/types_gen.cue#L13
[ComponentConfig]: ../api/author.md#ComponentConfig
[timoni]: https://timoni.sh/install/
[resources.cue]: https://github.com/holos-run/holos/blob/main/internal/generate/platforms/v1alpha5/resources.cue#L33

View File

@@ -0,0 +1,35 @@
---
slug: validators-feature
title: Validators added in Holos v0.101.0
authors: [jeff]
tags: [holos, feature]
image: /img/cards/validators.png
description: Validators are useful to enforce policy and catch Helm errors.
---
import RenderingOverview from '@site/src/diagrams/rendering-overview.mdx';
import RenderPlatformDiagram from '@site/src/diagrams/render-platform-sequence.mdx';
We've added support for [Validators] in [v0.101.0]. Validators are useful to
enforce policies and ensure consistency early in the process. This feature
addresses two primary use cases:
1. Prevent insecure configuration early in the process. For example, prevent
Helm from rendering a `Secret` which would otherwise be committed to version control.
2. Prevent unsafe configuration by validating manifests against Kubernetes core
and custom resource type definitions.
Check out the [Validators] tutorial for examples of both use cases.
[Validators]: https://holos.run/docs/v1alpha5/tutorial/validators/
[v0.101.0]: https://github.com/holos-run/holos/releases/tag/v0.101.0
{/* truncate */}
<RenderingOverview />
Validators complete the core functionality of the Holos manifest rendering
pipeline. We are seeking design partners to help enhance Generators and
Transformers. Validators are implemented using a generic `Command` kind, and
we're considering a similar kind for Generators and Transformers. Please
connect with us if you'd like to help design these enhancements.

View File

@@ -3,21 +3,21 @@
title: Rendering Overview
---
graph LR
Platform[<a href="/docs/v1alpha5/api/author/#Platform">Platform</a>]
Component[<a href="/docs/v1alpha5/api/author/#ComponentConfig">Components</a>]
Platform[<a href="https://holos.run/docs/v1alpha5/api/author/#Platform">Platform</a>]
Component[<a href="https://holos.run/docs/v1alpha5/api/author/#ComponentConfig">Components</a>]
Helm[<a href="/docs/v1alpha5/api/author/#Helm">Helm</a>]
Kustomize[<a href="/docs/v1alpha5/api/author/#Kustomize">Kustomize</a>]
Kubernetes[<a href="/docs/v1alpha5/api/author/#Kubernetes">Kubernetes</a>]
Helm[<a href="https://holos.run/docs/v1alpha5/api/author/#Helm">Helm</a>]
Kustomize[<a href="https://holos.run/docs/v1alpha5/api/author/#Kustomize">Kustomize</a>]
Kubernetes[<a href="https://holos.run/docs/v1alpha5/api/author/#Kubernetes">Kubernetes</a>]
BuildPlan[<a href="/docs/v1alpha5/api/core/#BuildPlan">BuildPlan</a>]
BuildPlan[<a href="https://holos.run/docs/v1alpha5/api/core/#BuildPlan">BuildPlan</a>]
ResourcesArtifact[<a href="/docs/v1alpha5/api/core/#Artifact">Resources<br/>Artifact</a>]
GitOpsArtifact[<a href="/docs/v1alpha5/api/core/#Artifact">GitOps<br/>Artifact</a>]
ResourcesArtifact[<a href="https://holos.run/docs/v1alpha5/api/core/#Artifact">Resources<br/>Artifact</a>]
GitOpsArtifact[<a href="https://holos.run/docs/v1alpha5/api/core/#Artifact">GitOps<br/>Artifact</a>]
Generators[<a href="/docs/v1alpha5/api/core/#Generator">Generators</a>]
Transformers[<a href="/docs/v1alpha5/api/core/#Transformer">Transformers</a>]
Validators[Validators]
Generators[<a href="https://holos.run/docs/v1alpha5/api/core/#Generator">Generators</a>]
Transformers[<a href="https://holos.run/docs/v1alpha5/api/core/#Transformer">Transformers</a>]
Validators[<a href="https://holos.run/docs/v1alpha5/api/core/#Validator">Validators</a>]
Files[Manifest<br/>Files]
Platform --> Component

View File

@@ -0,0 +1,16 @@
/docs /docs/v1alpha5/ 301
/docs/ /docs/v1alpha5/ 301
/docs/tutorial /docs/v1alpha5/tutorial/ 301
/docs/tutorial/ /docs/v1alpha5/tutorial/ 301
/docs/quickstart /docs/v1alpha5/tutorial/overview/ 301
/docs/quickstart/ /docs/v1alpha5/tutorial/overview/ 301
/docs/overview /docs/v1alpha5/tutorial/overview/ 301
/docs/overview/ /docs/v1alpha5/tutorial/overview/ 301
/docs/topics /docs/v1alpha5/topics/structures/ 301
/docs/topics/ /docs/v1alpha5/topics/structures/ 301
/docs/setup /docs/v1alpha5/tutorial/setup/ 301
/docs/setup/ /docs/v1alpha5/tutorial/setup/ 301
/docs/local-cluster /docs/v1alpha5/topics/local-cluster/ 301
/docs/local-cluster/ /docs/v1alpha5/topics/local-cluster/ 301
/docs/guides/helm /docs/v1alpha5/tutorial/helm-values/ 301
/docs/guides/helm/ /docs/v1alpha5/tutorial/helm-values/ 301

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

24
go.mod
View File

@@ -10,7 +10,7 @@ require (
connectrpc.com/grpcreflect v1.2.0
connectrpc.com/otelconnect v0.7.0
connectrpc.com/validate v0.1.0
cuelang.org/go v0.10.1
cuelang.org/go v0.11.0
entgo.io/ent v0.13.1
github.com/bufbuild/buf v1.35.1
github.com/choria-io/machine-room v0.0.0-20240417064836-c604da2f005e
@@ -33,9 +33,9 @@ require (
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.9.0
golang.org/x/net v0.28.0
golang.org/x/net v0.30.0
golang.org/x/sync v0.8.0
golang.org/x/tools v0.24.0
golang.org/x/tools v0.26.0
google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
@@ -53,7 +53,7 @@ require (
buf.build/gen/go/bufbuild/registry/connectrpc/go v1.16.2-20240610164129-660609bc46d3.1 // indirect
buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.34.2-20240610164129-660609bc46d3.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cuelabs.dev/go/oci/ociregistry v0.0.0-20240807094312-a32ad29eed79 // indirect
cuelabs.dev/go/oci/ociregistry v0.0.0-20240906074133-82eb438dd565 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
@@ -256,7 +256,7 @@ require (
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -268,7 +268,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.52.3 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect
github.com/protocolbuffers/txtpbfmt v0.0.0-20240823084532-8e6b51fa9bef // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
@@ -326,14 +326,14 @@ require (
go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/sys v0.23.0 // indirect
golang.org/x/term v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect

48
go.sum
View File

@@ -48,10 +48,10 @@ connectrpc.com/otelconnect v0.7.0 h1:ZH55ZZtcJOTKWWLy3qmL4Pam4RzRWBJFOqTPyAqCXkY
connectrpc.com/otelconnect v0.7.0/go.mod h1:Bt2ivBymHZHqxvo4HkJ0EwHuUzQN6k2l0oH+mp/8nwc=
connectrpc.com/validate v0.1.0 h1:r55jirxMK7HO/xZwVHj3w2XkVFarsUM77ZDy367NtH4=
connectrpc.com/validate v0.1.0/go.mod h1:GU47c9/x/gd+u9wRSPkrQOP46gx2rMN+Wo37EHgI3Ow=
cuelabs.dev/go/oci/ociregistry v0.0.0-20240807094312-a32ad29eed79 h1:EceZITBGET3qHneD5xowSTY/YHbNybvMWGh62K2fG/M=
cuelabs.dev/go/oci/ociregistry v0.0.0-20240807094312-a32ad29eed79/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg=
cuelang.org/go v0.10.1 h1:vDRRsd/5CICzisZ/13kBmXt3M+9eDl/pI06rrHyhlgA=
cuelang.org/go v0.10.1/go.mod h1:HzlaqqqInHNiqE6slTP6+UtxT9hN6DAzgJgdbNxXvX8=
cuelabs.dev/go/oci/ociregistry v0.0.0-20240906074133-82eb438dd565 h1:R5wwEcbEZSBmeyg91MJZTxfd7WpBo2jPof3AYjRbxwY=
cuelabs.dev/go/oci/ociregistry v0.0.0-20240906074133-82eb438dd565/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg=
cuelang.org/go v0.11.0 h1:2af2nhipqlUHtXk2dtOP5xnMm1ObGvKqIsJUJL1sRE4=
cuelang.org/go v0.11.0/go.mod h1:PBY6XvPUswPPJ2inpvUozP9mebDVTXaeehQikhZPBz0=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
@@ -734,8 +734,8 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
@@ -769,8 +769,8 @@ github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZA
github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk=
github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c=
github.com/protocolbuffers/txtpbfmt v0.0.0-20240823084532-8e6b51fa9bef h1:ej+64jiny5VETZTqcc1GFVAPEtaSk6U1D0kKC2MS5Yc=
github.com/protocolbuffers/txtpbfmt v0.0.0-20240823084532-8e6b51fa9bef/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
@@ -968,8 +968,8 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1007,8 +1007,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1048,16 +1048,16 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1138,8 +1138,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
@@ -1147,8 +1147,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1163,8 +1163,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1217,8 +1217,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -20,6 +20,7 @@ var _ Store = NewStore()
type Store interface {
Get(path string) (data []byte, ok bool)
Set(path string, data []byte) error
// Save previously set path to dir preserving directories.
Save(dir, path string) error
}

View File

@@ -15,7 +15,7 @@ import (
// PlatformOpts represents build options when processing the components in a
// platform.
type PlatformOpts struct {
Fn BuildFunc
Fn func(context.Context, int, holos.Component) error
Selector holos.Selector
Concurrency int
InfoEnabled bool
@@ -89,9 +89,6 @@ func (p *Platform) Build(ctx context.Context, opts PlatformOpts) error {
return nil
}
// BuildFunc is executed concurrently when processing platform components.
type BuildFunc func(context.Context, int, holos.Component) error
func LoadPlatform(i *Instance) (platform Platform, err error) {
err = i.Discriminate(func(tm holos.TypeMeta) error {
if tm.Kind != "Platform" {

View File

@@ -5,16 +5,18 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"log/slog"
"os"
"path"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"cuelang.org/go/cue"
core "github.com/holos-run/holos/api/core/v1alpha5"
"github.com/holos-run/holos/internal/artifact"
"github.com/holos-run/holos/internal/errors"
"github.com/holos-run/holos/internal/holos"
"github.com/holos-run/holos/internal/logger"
@@ -107,156 +109,84 @@ func (c *Component) Path() string {
}
var _ holos.BuildPlan = &BuildPlan{}
var _ task = generatorTask{}
var _ task = transformersTask{}
var _ task = validatorTask{}
// BuildPlan represents a component builder.
type BuildPlan struct {
core.BuildPlan
Opts holos.BuildOpts
type task interface {
id() string
run(ctx context.Context) error
}
// Build builds a BuildPlan into Artifact files.
func (b *BuildPlan) Build(ctx context.Context) error {
name := b.BuildPlan.Metadata.Name
path := b.Opts.Path
log := logger.FromContext(ctx).With("name", name, "path", path)
msg := fmt.Sprintf("could not build %s", name)
if b.BuildPlan.Spec.Disabled {
log.WarnContext(ctx, fmt.Sprintf("%s: disabled", msg))
return nil
}
type taskParams struct {
taskName string
buildPlanName string
opts holos.BuildOpts
}
g, ctx := errgroup.WithContext(ctx)
// One more for the producer
g.SetLimit(b.Opts.Concurrency + 1)
func (t taskParams) id() string {
return fmt.Sprintf("%s:%s/%s", t.opts.Path, t.buildPlanName, t.taskName)
}
// Producer.
g.Go(func() error {
for _, a := range b.BuildPlan.Spec.Artifacts {
msg := fmt.Sprintf("%s artifact %s", msg, a.Artifact)
log := log.With("artifact", a.Artifact)
if a.Skip {
log.WarnContext(ctx, fmt.Sprintf("%s: skipped field is true", msg))
continue
}
select {
case <-ctx.Done():
return ctx.Err()
default:
// https://golang.org/doc/faq#closures_and_goroutines
a := a
// Worker. Blocks if limit has been reached.
g.Go(func() error {
for _, gen := range a.Generators {
switch gen.Kind {
case "Resources":
if err := b.resources(log, gen, b.Opts.Store); err != nil {
return errors.Format("could not generate resources: %w", err)
}
case "Helm":
if err := b.helm(ctx, log, gen, b.Opts.Store); err != nil {
return errors.Format("could not generate helm: %w", err)
}
case "File":
if err := b.file(log, gen, b.Opts.Store); err != nil {
return errors.Format("could not generate file: %w", err)
}
default:
return errors.Format("%s: unsupported kind %s", msg, gen.Kind)
}
}
type generatorTask struct {
taskParams
generator core.Generator
wg *sync.WaitGroup
}
for _, t := range a.Transformers {
switch t.Kind {
case "Kustomize":
if err := b.kustomize(ctx, log, t, b.Opts.Store); err != nil {
return errors.Wrap(err)
}
case "Join":
s := make([][]byte, 0, len(t.Inputs))
for _, input := range t.Inputs {
if data, ok := b.Opts.Store.Get(string(input)); ok {
s = append(s, data)
} else {
return errors.Format("%s: missing %s", msg, input)
}
}
data := bytes.Join(s, []byte(t.Join.Separator))
if err := b.Opts.Store.Set(string(t.Output), data); err != nil {
return errors.Format("%s: %w", msg, err)
}
log.Debug("set artifact: " + string(t.Output))
default:
return errors.Format("%s: unsupported kind %s", msg, t.Kind)
}
}
// Write the final artifact
if err := b.Opts.Store.Save(b.Opts.WriteTo, string(a.Artifact)); err != nil {
return errors.Format("%s: %w", msg, err)
}
log.DebugContext(ctx, "wrote "+filepath.Join(b.Opts.WriteTo, string(a.Artifact)))
return nil
})
}
func (t generatorTask) run(ctx context.Context) error {
defer t.wg.Done()
msg := fmt.Sprintf("could not build %s", t.id())
switch t.generator.Kind {
case "Resources":
if err := t.resources(); err != nil {
return errors.Format("%s: could not generate resources: %w", msg, err)
}
return nil
})
// Wait for completion and return the first error (if any)
return g.Wait()
}
func (b *BuildPlan) Export(idx int, encoder holos.OrderedEncoder) error {
if err := encoder.Encode(idx, &b.BuildPlan); err != nil {
return errors.Wrap(err)
case "Helm":
if err := t.helm(ctx); err != nil {
return errors.Format("%s: could not generate helm: %w", msg, err)
}
case "File":
if err := t.file(); err != nil {
return errors.Format("%s: could not generate file: %w", msg, err)
}
default:
return errors.Format("%s: unsupported kind %s", msg, t.generator.Kind)
}
return nil
}
func (b *BuildPlan) Load(v cue.Value) error {
return errors.Wrap(v.Decode(&b.BuildPlan))
}
func (b *BuildPlan) file(
log *slog.Logger,
g core.Generator,
store artifact.Store,
) error {
data, err := os.ReadFile(filepath.Join(string(b.Opts.Path), string(g.File.Source)))
func (t generatorTask) file() error {
data, err := os.ReadFile(filepath.Join(string(t.opts.Path), string(t.generator.File.Source)))
if err != nil {
return errors.Wrap(err)
}
if err := store.Set(string(g.Output), data); err != nil {
if err := t.opts.Store.Set(string(t.generator.Output), data); err != nil {
return errors.Wrap(err)
}
log.Debug("set artifact: " + string(g.Output))
return nil
}
func (b *BuildPlan) helm(
ctx context.Context,
log *slog.Logger,
g core.Generator,
store artifact.Store,
) error {
chartName := g.Helm.Chart.Name
log = log.With("chart", chartName)
func (t generatorTask) helm(ctx context.Context) error {
chartName := t.generator.Helm.Chart.Name
// Unnecessary? cargo cult copied from internal/cli/render/render.go
if chartName == "" {
return errors.New("missing chart name")
}
// Cache the chart by version to pull new versions. (#273)
cacheDir := filepath.Join(string(b.Opts.Path), "vendor", g.Helm.Chart.Version)
cacheDir := filepath.Join(string(t.opts.Path), "vendor", t.generator.Helm.Chart.Version)
cachePath := filepath.Join(cacheDir, filepath.Base(chartName))
log := logger.FromContext(ctx)
if _, err := os.Stat(cachePath); os.IsNotExist(err) {
timeout, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()
err := onceWithLock(log, timeout, cachePath, func() error {
return b.cacheChart(ctx, log, cacheDir, g.Helm.Chart)
})
err := func() error {
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()
return onceWithLock(log, ctx, cachePath, func() error {
return cacheChart(ctx, cacheDir, t.generator.Helm.Chart, t.opts.Stderr)
})
}()
if err != nil {
return errors.Format("could not cache chart: %w", err)
}
@@ -269,7 +199,7 @@ func (b *BuildPlan) helm(
}
defer util.Remove(ctx, tempDir)
data, err := yaml.Marshal(g.Helm.Values)
data, err := yaml.Marshal(t.generator.Helm.Values)
if err != nil {
return errors.Format("could not marshal values: %w", err)
}
@@ -282,22 +212,22 @@ func (b *BuildPlan) helm(
// Run charts
args := []string{"template"}
if !g.Helm.EnableHooks {
if !t.generator.Helm.EnableHooks {
args = append(args, "--no-hooks")
}
for _, apiVersion := range g.Helm.APIVersions {
for _, apiVersion := range t.generator.Helm.APIVersions {
args = append(args, "--api-versions", apiVersion)
}
if kubeVersion := g.Helm.KubeVersion; kubeVersion != "" {
if kubeVersion := t.generator.Helm.KubeVersion; kubeVersion != "" {
args = append(args, "--kube-version", kubeVersion)
}
args = append(args,
"--include-crds",
"--values", valuesPath,
"--namespace", g.Helm.Namespace,
"--namespace", t.generator.Helm.Namespace,
"--kubeconfig", "/dev/null",
"--version", g.Helm.Chart.Version,
g.Helm.Chart.Release,
"--version", t.generator.Helm.Chart.Version,
t.generator.Helm.Chart.Release,
cachePath,
)
helmOut, err := util.RunCmd(ctx, "helm", args...)
@@ -314,36 +244,31 @@ func (b *BuildPlan) helm(
}
// Set the artifact
if err := store.Set(string(g.Output), helmOut.Stdout.Bytes()); err != nil {
if err := t.opts.Store.Set(string(t.generator.Output), helmOut.Stdout.Bytes()); err != nil {
return errors.Format("could not store helm output: %w", err)
}
log.Debug("set artifact: " + string(g.Output))
log.Debug("set artifact: " + string(t.generator.Output))
return nil
}
func (b *BuildPlan) resources(
log *slog.Logger,
g core.Generator,
store artifact.Store,
) error {
func (t generatorTask) resources() error {
var size int
for _, m := range g.Resources {
for _, m := range t.generator.Resources {
size += len(m)
}
list := make([]core.Resource, 0, size)
for _, m := range g.Resources {
for _, m := range t.generator.Resources {
for _, r := range m {
list = append(list, r)
}
}
msg := fmt.Sprintf(
"could not generate %s for %s path %s",
g.Output,
b.BuildPlan.Metadata.Name,
b.Opts.Path,
"could not generate %s for %s",
t.generator.Output,
t.id(),
)
buf, err := marshal(list)
@@ -351,70 +276,220 @@ func (b *BuildPlan) resources(
return errors.Format("%s: %w", msg, err)
}
if err := store.Set(string(g.Output), buf.Bytes()); err != nil {
if err := t.opts.Store.Set(string(t.generator.Output), buf.Bytes()); err != nil {
return errors.Format("%s: %w", msg, err)
}
log.Debug("set artifact " + string(g.Output))
return nil
}
func (b *BuildPlan) kustomize(
ctx context.Context,
log *slog.Logger,
t core.Transformer,
store artifact.Store,
) error {
tempDir, err := os.MkdirTemp("", "holos.kustomize")
if err != nil {
return errors.Wrap(err)
}
defer util.Remove(ctx, tempDir)
msg := fmt.Sprintf(
"could not transform %s for %s path %s",
t.Output,
b.BuildPlan.Metadata.Name,
b.Opts.Path,
)
type transformersTask struct {
taskParams
transformers []core.Transformer
wg *sync.WaitGroup
}
// Write the kustomization
data, err := yaml.Marshal(t.Kustomize.Kustomization)
if err != nil {
return errors.Format("%s: %w", msg, err)
}
path := filepath.Join(tempDir, "kustomization.yaml")
if err := os.WriteFile(path, data, 0666); err != nil {
return errors.Format("%s: %w", msg, err)
}
log.DebugContext(ctx, "wrote "+path)
// Write the inputs
for _, input := range t.Inputs {
path := string(input)
if err := store.Save(tempDir, path); err != nil {
return errors.Format("%s: %w", msg, err)
func (t transformersTask) run(ctx context.Context) error {
defer t.wg.Done()
for idx, transformer := range t.transformers {
msg := fmt.Sprintf("could not build %s/%d", t.id(), idx)
switch transformer.Kind {
case "Kustomize":
if err := kustomize(ctx, transformer, t.taskParams); err != nil {
return errors.Wrap(err)
}
case "Join":
s := make([][]byte, 0, len(transformer.Inputs))
for _, input := range transformer.Inputs {
if data, ok := t.opts.Store.Get(string(input)); ok {
s = append(s, data)
} else {
return errors.Format("%s: missing %s", msg, input)
}
}
data := bytes.Join(s, []byte(transformer.Join.Separator))
if err := t.opts.Store.Set(string(transformer.Output), data); err != nil {
return errors.Format("%s: %w", msg, err)
}
default:
return errors.Format("%s: unsupported kind %s", msg, transformer.Kind)
}
log.DebugContext(ctx, "wrote "+filepath.Join(tempDir, path))
}
return nil
}
// Execute kustomize
r, err := util.RunCmd(ctx, "kubectl", "kustomize", tempDir)
if err != nil {
kErr := r.Stderr.String()
err = errors.Format("%s: could not run kustomize: %w", msg, err)
log.ErrorContext(ctx, fmt.Sprintf("%s: stderr:\n%s", err.Error(), kErr), "err", err, "stderr", kErr)
return err
type validatorTask struct {
taskParams
validator core.Validator
wg *sync.WaitGroup
}
func (t validatorTask) run(ctx context.Context) error {
defer t.wg.Done()
msg := fmt.Sprintf("could not validate %s", t.id())
switch kind := t.validator.Kind; kind {
case "Command":
if err := validate(ctx, t.validator, t.taskParams); err != nil {
return errors.Wrap(err)
}
default:
return errors.Format("%s: unsupported kind %s", msg, kind)
}
return nil
}
// Store the artifact
if err := store.Set(string(t.Output), r.Stdout.Bytes()); err != nil {
func worker(ctx context.Context, idx int, tasks chan task) error {
log := logger.FromContext(ctx).With("worker", idx)
for {
select {
case <-ctx.Done():
return ctx.Err()
case task, ok := <-tasks:
if !ok {
log.DebugContext(ctx, fmt.Sprintf("worker %d returning: tasks chan closed", idx))
return nil
}
log.DebugContext(ctx, fmt.Sprintf("worker %d task %s starting", idx, task.id()))
if err := task.run(ctx); err != nil {
return errors.Wrap(err)
}
log.DebugContext(ctx, fmt.Sprintf("worker %d task %s finished ok", idx, task.id()))
}
}
}
func buildArtifact(ctx context.Context, idx int, artifact core.Artifact, tasks chan task, buildPlanName string, opts holos.BuildOpts) error {
var wg sync.WaitGroup
msg := fmt.Sprintf("could not build %s artifact %s", buildPlanName, artifact.Artifact)
// Process Generators concurrently
for gid, gen := range artifact.Generators {
task := generatorTask{
taskParams: taskParams{
taskName: fmt.Sprintf("artifact/%d/generator/%d", idx, gid),
buildPlanName: buildPlanName,
opts: opts,
},
generator: gen,
wg: &wg,
}
wg.Add(1)
select {
case <-ctx.Done():
return ctx.Err()
case tasks <- task:
}
}
wg.Wait()
// Process Transformers sequentially
task := transformersTask{
taskParams: taskParams{
taskName: fmt.Sprintf("artifact/%d/transformers", idx),
buildPlanName: buildPlanName,
opts: opts,
},
transformers: artifact.Transformers,
wg: &wg,
}
wg.Add(1)
select {
case <-ctx.Done():
return ctx.Err()
case tasks <- task:
}
wg.Wait()
// Process Validators concurrently
for vid, val := range artifact.Validators {
task := validatorTask{
taskParams: taskParams{
taskName: fmt.Sprintf("artifact/%d/validator/%d", idx, vid),
buildPlanName: buildPlanName,
opts: opts,
},
validator: val,
wg: &wg,
}
wg.Add(1)
select {
case <-ctx.Done():
return ctx.Err()
case tasks <- task:
}
}
wg.Wait()
// Write the final artifact
out := string(artifact.Artifact)
if err := opts.Store.Save(opts.WriteTo, out); err != nil {
return errors.Format("%s: %w", msg, err)
}
log.Debug("set artifact " + string(t.Output))
log := logger.FromContext(ctx)
log.DebugContext(ctx, fmt.Sprintf("wrote %s", filepath.Join(opts.WriteTo, out)))
return nil
}
// BuildPlan represents a component builder.
type BuildPlan struct {
core.BuildPlan
Opts holos.BuildOpts
}
func (b *BuildPlan) Build(ctx context.Context) error {
name := b.BuildPlan.Metadata.Name
path := b.Opts.Path
log := logger.FromContext(ctx).With("name", name, "path", path)
msg := fmt.Sprintf("could not build %s", name)
if b.BuildPlan.Spec.Disabled {
log.WarnContext(ctx, fmt.Sprintf("%s: disabled", msg))
return nil
}
g, ctx := errgroup.WithContext(ctx)
tasks := make(chan task)
// Start the worker pool.
for idx := 0; idx < max(1, b.Opts.Concurrency); idx++ {
g.Go(func() error {
return worker(ctx, idx, tasks)
})
}
// Start one producer that fans out to one pipeline per artifact.
g.Go(func() error {
// Close the tasks chan when the producer returns.
defer func() {
log.DebugContext(ctx, "producer returning: closing tasks chan")
close(tasks)
}()
// Separate error group for producers.
p, ctx := errgroup.WithContext(ctx)
for idx, a := range b.BuildPlan.Spec.Artifacts {
p.Go(func() error {
return buildArtifact(ctx, idx, a, tasks, b.Metadata.Name, b.Opts)
})
}
// Wait on producers to finish.
return errors.Wrap(p.Wait())
})
// Wait on workers to finish.
return g.Wait()
}
func (b *BuildPlan) Export(idx int, encoder holos.OrderedEncoder) error {
if err := encoder.Encode(idx, &b.BuildPlan); err != nil {
return errors.Wrap(err)
}
return nil
}
func (b *BuildPlan) Load(v cue.Value) error {
return errors.Wrap(v.Decode(&b.BuildPlan))
}
func marshal(list []core.Resource) (buf bytes.Buffer, err error) {
encoder := yaml.NewEncoder(&buf)
defer encoder.Close()
@@ -427,95 +502,6 @@ func marshal(list []core.Resource) (buf bytes.Buffer, err error) {
return
}
// cacheChart stores a cached copy of Chart in the chart subdirectory of path.
//
// We assume the only method responsible for writing to chartDir is cacheChart
// itself. cacheChart runs concurrently when rendering a platform.
//
// We rely on the atomicity of moving temporary directories into place on the
// same filesystem via os.Rename. If a syscall.EEXIST error occurs during
// renaming, it indicates that the cached chart already exists, which is
// expected when this function is called concurrently.
//
// TODO(jeff): Break the dependency on v1alpha5, make it work across versions as
// a utility function.
func (b *BuildPlan) cacheChart(
ctx context.Context,
log *slog.Logger,
cacheDir string,
chart core.Chart,
) error {
// Add repositories
repo := chart.Repository
stderr := b.Opts.Stderr
if repo.URL == "" {
// repo update not needed for oci charts so this is debug instead of warn.
log.DebugContext(ctx, "skipped helm repo add and update: repo url is empty")
} else {
if _, err := util.RunCmdW(ctx, stderr, "helm", "repo", "add", repo.Name, repo.URL); err != nil {
return errors.Format("could not run helm repo add: %w", err)
}
if _, err := util.RunCmdW(ctx, stderr, "helm", "repo", "update", repo.Name); err != nil {
return errors.Format("could not run helm repo update: %w", err)
}
}
cacheTemp, err := os.MkdirTemp(cacheDir, chart.Name)
if err != nil {
return errors.Wrap(err)
}
defer util.Remove(ctx, cacheTemp)
cn := chart.Name
if chart.Repository.Name != "" {
cn = fmt.Sprintf("%s/%s", chart.Repository.Name, chart.Name)
}
helmOut, err := util.RunCmdW(ctx, stderr, "helm", "pull", "--destination", cacheTemp, "--untar=true", "--version", chart.Version, cn)
if err != nil {
stderr := helmOut.Stderr.String()
lines := strings.Split(stderr, "\n")
for _, line := range lines {
log.DebugContext(ctx, line)
if strings.HasPrefix(line, "Error:") {
err = fmt.Errorf("%s: %w", line, err)
}
}
return errors.Format("could not run helm pull: %w", err)
}
log.Debug("helm pull", "stdout", helmOut.Stdout, "stderr", helmOut.Stderr)
items, err := os.ReadDir(cacheTemp)
if err != nil {
return errors.Wrap(fmt.Errorf("could not read directory: %w", err))
}
if len(items) != 1 {
return errors.Format("want: exactly one item, have: %+v", items)
}
item := items[0]
src := filepath.Join(cacheTemp, item.Name())
dst := filepath.Join(cacheDir, chart.Name)
if err := os.Rename(src, dst); err != nil {
var linkErr *os.LinkError
if errors.As(err, &linkErr) && errors.Is(linkErr.Err, syscall.EEXIST) {
log.DebugContext(ctx, "cache already exists", "chart", chart.Name, "chart_version", chart.Version, "path", dst)
} else {
return errors.Wrap(fmt.Errorf("could not rename: %w", err))
}
} else {
log.DebugContext(ctx, fmt.Sprintf("renamed %s to %s", src, dst), "src", src, "dst", dst)
}
log.InfoContext(ctx,
fmt.Sprintf("cached %s %s", chart.Name, chart.Version),
"chart", chart.Name,
"chart_version", chart.Version,
"path", dst,
)
return nil
}
// onceWithLock obtains a filesystem lock with mkdir, then executes fn. If the
// lock is already locked, onceWithLock waits for it to be released then returns
// without calling fn.
@@ -564,3 +550,158 @@ func onceWithLock(log *slog.Logger, ctx context.Context, path string, fn func()
// Unexpected error
return errors.Wrap(err)
}
func cacheChart(ctx context.Context, cacheDir string, chart core.Chart, stderr io.Writer) error {
log := logger.FromContext(ctx)
// Add repositories
repo := chart.Repository
if repo.URL == "" {
// repo update not needed for oci charts so this is debug instead of warn.
log.DebugContext(ctx, "skipped helm repo add and update: repo url is empty")
} else {
if _, err := util.RunCmdW(ctx, stderr, "helm", "repo", "add", repo.Name, repo.URL); err != nil {
return errors.Format("could not run helm repo add: %w", err)
}
if _, err := util.RunCmdW(ctx, stderr, "helm", "repo", "update", repo.Name); err != nil {
return errors.Format("could not run helm repo update: %w", err)
}
}
// Support chart.Name = "oci:/ghcr.io/akuity/kargo-charts/kargo"
chartBaseName := path.Base(chart.Name)
cacheTemp, err := os.MkdirTemp(cacheDir, chartBaseName)
if err != nil {
return errors.Wrap(err)
}
defer util.Remove(ctx, cacheTemp)
cn := chart.Name
if chart.Repository.Name != "" {
cn = fmt.Sprintf("%s/%s", chart.Repository.Name, chart.Name)
}
helmOut, err := util.RunCmdW(ctx, stderr, "helm", "pull", "--destination", cacheTemp, "--untar=true", "--version", chart.Version, cn)
if err != nil {
stderr := helmOut.Stderr.String()
lines := strings.Split(stderr, "\n")
for _, line := range lines {
log.DebugContext(ctx, line)
if strings.HasPrefix(line, "Error:") {
err = fmt.Errorf("%s: %w", line, err)
}
}
return errors.Format("could not run helm pull: %w", err)
}
log.Debug("helm pull", "stdout", helmOut.Stdout, "stderr", helmOut.Stderr)
items, err := os.ReadDir(cacheTemp)
if err != nil {
return errors.Wrap(fmt.Errorf("could not read directory: %w", err))
}
if len(items) != 1 {
return errors.Format("want: exactly one item, have: %+v", items)
}
item := items[0]
src := filepath.Join(cacheTemp, item.Name())
dst := filepath.Join(cacheDir, chartBaseName)
if err := os.Rename(src, dst); err != nil {
var linkErr *os.LinkError
if errors.As(err, &linkErr) && errors.Is(linkErr.Err, syscall.EEXIST) {
log.DebugContext(ctx, "cache already exists", "chart", chart.Name, "chart_version", chart.Version, "path", dst)
} else {
return errors.Wrap(fmt.Errorf("could not rename: %w", err))
}
} else {
log.DebugContext(ctx, fmt.Sprintf("renamed %s to %s", src, dst), "src", src, "dst", dst)
}
log.InfoContext(ctx,
fmt.Sprintf("cached %s %s", chart.Name, chart.Version),
"chart", chart.Name,
"chart_version", chart.Version,
"path", dst,
)
return nil
}
func kustomize(ctx context.Context, t core.Transformer, p taskParams) error {
tempDir, err := os.MkdirTemp("", "holos.kustomize")
if err != nil {
return errors.Wrap(err)
}
defer util.Remove(ctx, tempDir)
msg := fmt.Sprintf(
"could not transform %s for %s path %s",
t.Output,
p.buildPlanName,
p.opts.Path,
)
// Write the kustomization
data, err := yaml.Marshal(t.Kustomize.Kustomization)
if err != nil {
return errors.Format("%s: %w", msg, err)
}
path := filepath.Join(tempDir, "kustomization.yaml")
if err := os.WriteFile(path, data, 0666); err != nil {
return errors.Format("%s: %w", msg, err)
}
// Write the inputs
for _, input := range t.Inputs {
path := string(input)
if err := p.opts.Store.Save(tempDir, path); err != nil {
return errors.Format("%s: %w", msg, err)
}
}
// Execute kustomize
r, err := util.RunCmdW(ctx, p.opts.Stderr, "kubectl", "kustomize", tempDir)
if err != nil {
return errors.Format("%s: could not run kustomize: %w", msg, err)
}
// Store the artifact
if err := p.opts.Store.Set(string(t.Output), r.Stdout.Bytes()); err != nil {
return errors.Format("%s: %w", msg, err)
}
return nil
}
func validate(ctx context.Context, validator core.Validator, p taskParams) error {
store := p.opts.Store
tempDir, err := os.MkdirTemp("", "holos.validate")
if err != nil {
return errors.Wrap(err)
}
// defer util.Remove(ctx, tempDir)
msg := fmt.Sprintf("could not validate %s", p.id())
// Write the inputs
for _, input := range validator.Inputs {
path := string(input)
if err := store.Save(tempDir, path); err != nil {
return errors.Format("%s: %w", msg, err)
}
}
if len(validator.Command.Args) < 1 {
return errors.Format("%s: command args length must be at least 1", msg)
}
size := len(validator.Command.Args) + len(validator.Inputs)
args := make([]string, 0, size)
args = append(args, validator.Command.Args...)
for _, input := range validator.Inputs {
args = append(args, filepath.Join(tempDir, string(input)))
}
// Execute the validator
if _, err = util.RunCmdA(ctx, p.opts.Stderr, args[0], args[1:]...); err != nil {
return errors.Format("%s: %w", msg, err)
}
return nil
}

View File

@@ -4,6 +4,9 @@ import (
"context"
"fmt"
"log/slog"
"os"
"runtime/pprof"
"runtime/trace"
"connectrpc.com/connect"
cue "cuelang.org/go/cue/errors"
@@ -12,12 +15,48 @@ import (
"google.golang.org/genproto/googleapis/rpc/errdetails"
)
func memProfile(ctx context.Context, cfg *holos.Config) {
if format := os.Getenv("HOLOS_MEM_PROFILE"); format != "" {
f, _ := os.Create(fmt.Sprintf(format, os.Getppid(), os.Getpid()))
defer f.Close()
if err := pprof.WriteHeapProfile(f); err != nil {
_ = HandleError(ctx, err, cfg)
}
}
}
// MakeMain makes a main function for the cli or tests.
func MakeMain(options ...holos.Option) func() int {
return func() (exitCode int) {
cfg := holos.New(options...)
slog.SetDefault(cfg.Logger())
ctx := context.Background()
if format := os.Getenv("HOLOS_CPU_PROFILE"); format != "" {
f, _ := os.Create(fmt.Sprintf(format, os.Getppid(), os.Getpid()))
err := pprof.StartCPUProfile(f)
defer func() {
pprof.StopCPUProfile()
f.Close()
}()
if err != nil {
return HandleError(ctx, err, cfg)
}
}
defer memProfile(ctx, cfg)
if format := os.Getenv("HOLOS_TRACE"); format != "" {
f, _ := os.Create(fmt.Sprintf(format, os.Getppid(), os.Getpid()))
err := trace.Start(f)
defer func() {
trace.Stop()
f.Close()
}()
if err != nil {
return HandleError(ctx, err, cfg)
}
}
feature := &holos.EnvFlagger{}
if err := New(cfg, feature).ExecuteContext(ctx); err != nil {
return HandleError(ctx, err, cfg)

View File

@@ -16,6 +16,8 @@ import (
"github.com/spf13/cobra"
)
const tagHelp = "set the value of a cue @tag field in the form key [ = value ]"
func New(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
cmd := command.New("render")
cmd.Args = cobra.NoArgs
@@ -26,7 +28,7 @@ func New(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
}
func newPlatform(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
cmd := command.New("platform DIRECTORY")
cmd := command.New("platform")
cmd.Args = cobra.MaximumNArgs(1)
cmd.Example = "holos render platform"
cmd.Short = "render an entire platform"
@@ -38,13 +40,13 @@ func newPlatform(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
}
var concurrency int
cmd.Flags().IntVar(&concurrency, "concurrency", min(runtime.NumCPU(), 8), "number of components to render concurrently")
cmd.Flags().IntVar(&concurrency, "concurrency", runtime.NumCPU(), "number of components to render concurrently")
var platform string
cmd.Flags().StringVar(&platform, "platform", "./platform", "platform directory path")
var selector holos.Selector
cmd.Flags().VarP(&selector, "selector", "l", "label selector (e.g. label==string,label!=string)")
tagMap := make(holos.TagMap)
cmd.Flags().VarP(&tagMap, "inject", "t", "set the value of a cue @tag field from a key=value pair")
cmd.Flags().VarP(&tagMap, "inject", "t", tagHelp)
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Root().Context()
@@ -70,7 +72,7 @@ func newPlatform(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
"--log-format", cfg.LogConfig().Format(),
}
opts := builder.PlatformOpts{
Fn: makePlatformRenderFunc(cmd.ErrOrStderr(), prefixArgs),
Fn: makeComponentRenderFunc(cmd.ErrOrStderr(), prefixArgs, tagMap.Tags()),
Selector: selector,
Concurrency: concurrency,
InfoEnabled: true,
@@ -102,9 +104,9 @@ func newComponent(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
}
tagMap := make(holos.TagMap)
cmd.Flags().VarP(&tagMap, "inject", "t", "set the value of a cue @tag field from a key=value pair")
cmd.Flags().VarP(&tagMap, "inject", "t", tagHelp)
var concurrency int
cmd.Flags().IntVar(&concurrency, "concurrency", min(runtime.NumCPU(), 8), "number of concurrent build steps")
cmd.Flags().IntVar(&concurrency, "concurrency", runtime.NumCPU(), "number of concurrent build steps")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
ctx := cmd.Root().Context()
@@ -134,7 +136,7 @@ func newComponent(cfg *holos.Config, feature holos.Flagger) *cobra.Command {
return cmd
}
func makePlatformRenderFunc(w io.Writer, prefixArgs []string) builder.BuildFunc {
func makeComponentRenderFunc(w io.Writer, prefixArgs, cliTags []string) func(context.Context, int, holos.Component) error {
return func(ctx context.Context, idx int, component holos.Component) error {
select {
case <-ctx.Done():
@@ -147,11 +149,14 @@ func makePlatformRenderFunc(w io.Writer, prefixArgs []string) builder.BuildFunc
args := make([]string, 0, 10+len(prefixArgs)+(len(tags)*2))
args = append(args, prefixArgs...)
args = append(args, "render", "component")
for _, tag := range cliTags {
args = append(args, "--inject", tag)
}
for _, tag := range tags {
args = append(args, "--inject", tag)
}
args = append(args, component.Path())
if _, err := util.RunCmdW(ctx, w, "holos", args...); err != nil {
if _, err := util.RunCmdA(ctx, w, "holos", args...); err != nil {
return errors.Format("could not render component: %w", err)
}
}

View File

@@ -4,7 +4,6 @@ import (
_ "embed"
"fmt"
"log/slog"
"os"
"github.com/spf13/cobra"
@@ -119,7 +118,20 @@ func newOrgCmd(feature holos.Flagger) (cmd *cobra.Command) {
}
func newCueCmd() (cmd *cobra.Command) {
cueCmd, _ := cue.New(os.Args[1:])
cmd = cueCmd.Command
return
// Get a handle on the cue root command fields.
root, _ := cue.New([]string{})
// Copy the fields to our embedded command.
cmd = command.New("cue")
cmd.Short = root.Short
cmd.Long = root.Long
// Pass all arguments through to RunE.
cmd.DisableFlagParsing = true
cmd.Args = cobra.ArbitraryArgs
// We do it this way so we handle errors correctly.
cmd.RunE = func(cmd *cobra.Command, args []string) error {
cueRootCommand, _ := cue.New(args)
return cueRootCommand.Run(cmd.Root().Context())
}
return cmd
}

View File

@@ -94,6 +94,7 @@ func newShowBuildPlanCmd() (cmd *cobra.Command) {
buildPlanOpts := holos.NewBuildOpts(path)
buildPlanOpts.Stderr = cmd.ErrOrStderr()
buildPlanOpts.Concurrency = concurrency
buildPlanOpts.Tags = tagMap.Tags()
platformOpts := builder.PlatformOpts{
Fn: makeBuildFunc(encoder, buildPlanOpts),
@@ -110,7 +111,7 @@ func newShowBuildPlanCmd() (cmd *cobra.Command) {
return cmd
}
func makeBuildFunc(encoder holos.OrderedEncoder, opts holos.BuildOpts) builder.BuildFunc {
func makeBuildFunc(encoder holos.OrderedEncoder, opts holos.BuildOpts) func(context.Context, int, holos.Component) error {
return func(ctx context.Context, idx int, component holos.Component) error {
select {
case <-ctx.Done():
@@ -120,6 +121,7 @@ func makeBuildFunc(encoder holos.OrderedEncoder, opts holos.BuildOpts) builder.B
if err != nil {
return errors.Wrap(err)
}
tags = append(tags, opts.Tags...)
inst, err := builder.LoadInstance(component.Path(), tags)
if err != nil {
return errors.Wrap(err)

View File

@@ -72,9 +72,12 @@ import "github.com/holos-run/holos/api/core/v1alpha5:core"
// Resources represents kubernetes resources mixed into the rendered manifest.
Resources: core.#Resources
// KustomizeConfig represents the configuration kustomize.
// KustomizeConfig represents the kustomize configuration.
KustomizeConfig: #KustomizeConfig
// Validators represent checks that must pass for output to be written.
Validators: {[string]: core.#Validator} @go(,map[NameLabel]core.Validator)
// Artifacts represents additional artifacts to mix in. Useful for adding
// GitOps resources. Each Artifact is unified without modification into the
// BuildPlan.

View File

@@ -46,14 +46,6 @@ package core
disabled?: bool @go(Disabled)
}
// BuildPlanSource reflects the origin of a [BuildPlan]. Useful to save a build
// plan to a file, then re-generate it without needing to process a [Platform]
// component collection.
#BuildPlanSource: {
// Component reflects the component that produced the build plan.
component?: #Component @go(Component)
}
// Artifact represents one fully rendered manifest produced by a [Transformer]
// sequence, which transforms a [Generator] collection. A [BuildPlan] produces
// an [Artifact] collection.
@@ -78,6 +70,7 @@ package core
artifact?: #FilePath @go(Artifact)
generators?: [...#Generator] @go(Generators,[]Generator)
transformers?: [...#Transformer] @go(Transformers,[]Transformer)
validators?: [...#Validator] @go(Validators,[]Validator)
skip?: bool @go(Skip)
}
@@ -229,15 +222,39 @@ package core
// is expected to happen in CUE against the kubectl version the user prefers.
#Kustomization: {...}
// FileContent represents file contents.
#FileContent: string
// FileContentMap represents a mapping of file paths to file contents.
#FileContentMap: {[string]: #FileContent}
// FilePath represents a file path.
#FilePath: string
// FileContent represents file contents.
#FileContent: string
// Validator validates files. Useful to validate an [Artifact] prior to writing
// it out to the final destination. Holos may execute validators concurrently.
// See the [validators] tutorial for an end to end example.
//
// [validators]: https://holos.run/docs/v1alpha5/tutorial/validators/
#Validator: {
// Kind represents the kind of transformer. Must be Kustomize, or Join.
kind: string & "Command" @go(Kind)
// Inputs represents the files to validate. Usually the final Artifact.
inputs: [...#FilePath] @go(Inputs,[]FilePath)
// Command represents a validation command. Ignored unless kind is Command.
command?: #Command @go(Command)
}
// Command represents a command vetting one or more artifacts. Holos appends
// fully qualified input file paths to the end of the args list, then executes
// the command. Inputs are written into a temporary directory prior to
// executing the command and removed afterwards.
#Command: {
args?: [...string] @go(Args,[]string)
}
// InternalLabel is an arbitrary unique identifier internal to holos itself.
// The holos cli is expected to never write a InternalLabel value to rendered
// output files, therefore use a InternalLabel when the identifier must be

View File

@@ -19,11 +19,14 @@ import (
Kustomization: ks.#Kustomization & {
apiVersion: "kustomize.config.k8s.io/v1beta1"
kind: "Kustomization"
_labels: commonLabels: {
includeSelectors: false
pairs: CommonLabels
_labels: {}
if len(CommonLabels) > 0 {
_labels: commonLabels: {
includeSelectors: false
pairs: CommonLabels
}
labels: [for x in _labels {x}]
}
labels: [for x in _labels {x}]
}
}
@@ -40,20 +43,11 @@ import (
Path: _
Parameters: _
Resources: _
OutputBaseDir: _
KustomizeConfig: _
Artifacts: {
HolosComponent: {
_path: string
if OutputBaseDir == "" {
_path: "components/\(Name)"
}
if OutputBaseDir != "" {
_path: "\(OutputBaseDir)/components/\(Name)"
}
artifact: "\(_path)/\(Name).gen.yaml"
artifact: _
let ResourcesOutput = "resources.gen.yaml"
generators: [
{
@@ -83,17 +77,6 @@ import (
]
}
}
BuildPlan: {
metadata: name: Name
if len(Labels) != 0 {
metadata: labels: Labels
}
if len(Annotations) != 0 {
metadata: annotations: Annotations
}
spec: artifacts: [for x in Artifacts {x}]
}
}
// https://holos.run/docs/next/api/author/#Helm
@@ -119,15 +102,7 @@ import (
Artifacts: {
HolosComponent: {
_path: string
if OutputBaseDir == "" {
_path: "components/\(Name)"
}
if OutputBaseDir != "" {
_path: "\(OutputBaseDir)/components/\(Name)"
}
artifact: "\(_path)/\(Name).gen.yaml"
artifact: _
let HelmOutput = "helm.gen.yaml"
let ResourcesOutput = "resources.gen.yaml"
generators: [
@@ -177,6 +152,26 @@ import (
]
}
}
}
#ComponentConfig: {
Name: _
Labels: _
Annotations: _
Validators: _
OutputBaseDir: _
Artifacts: HolosComponent: {
_path: string
if OutputBaseDir == "" {
_path: "components/\(Name)"
}
if OutputBaseDir != "" {
_path: "\(OutputBaseDir)/components/\(Name)"
}
artifact: "\(_path)/\(Name).gen.yaml"
validators: [for x in Validators {x & {inputs: [artifact]}}]
}
BuildPlan: {
metadata: name: Name

View File

@@ -0,0 +1,7 @@
package types
#Target: {
// Ensure name has a concrete value so json.Marshal works. See
// https://github.com/holos-run/holos/issues/348
name: string | *""
}

View File

@@ -14,12 +14,10 @@ import (
"gopkg.in/yaml.v3"
)
// yamlEncoder and jsonEncoder implement Encoder to write individual resources.
// Interface implementation assertions.
var _ Encoder = &yamlEncoder{}
var _ Encoder = &jsonEncoder{}
// seqEncoder implements SequentialEncoder to write resources in order.
var _ OrderedEncoder = &seqEncoder{}
var _ OrderedEncoder = &orderedEncoder{}
// StringSlice represents zero or more flag values.
type StringSlice []string
@@ -42,13 +40,22 @@ func (i *StringSlice) Set(value string) error {
return nil
}
// TagMap represents a map of key values for CUE TagMap for flag parsing.
type TagMap map[string]string
// TagMap represents a map of key values for CUE TagMap for flag parsing. The
// values are pointers to disambiguate between the case where a tag is a boolean
// ("--inject foo") and the case where a tag has a string zero value ("--inject
// foo="). Refer to the Tags field of [cue/load.Config]
//
// [cue/load.Config]: https://pkg.go.dev/cuelang.org/go@v0.10.1/cue/load#Config
type TagMap map[string]*string
func (t TagMap) Tags() []string {
parts := make([]string, 0, len(t))
for k, v := range t {
parts = append(parts, fmt.Sprintf("%s=%s", k, v))
for tag, val := range t {
if val == nil {
parts = append(parts, tag)
} else {
parts = append(parts, fmt.Sprintf("%s=%s", tag, *val))
}
}
return parts
}
@@ -62,10 +69,14 @@ func (t TagMap) String() string {
// is not supported.
func (t TagMap) Set(value string) error {
parts := strings.SplitN(value, "=", 2)
if len(parts) != 2 {
switch len(parts) {
case 1:
t[parts[0]] = nil
case 2:
t[parts[0]] = &parts[1]
default:
return errors.Format("invalid format, must be tag=value")
}
t[parts[0]] = parts[1]
return nil
}
@@ -183,7 +194,7 @@ func NewSequentialEncoder(format string, w io.Writer) (OrderedEncoder, error) {
if err != nil {
return nil, err
}
seqEnc := &seqEncoder{
seqEnc := &orderedEncoder{
Encoder: enc,
buf: make(map[int]any),
}
@@ -251,17 +262,25 @@ func IsSelected(labels Labels, selectors ...Selector) bool {
return true
}
type seqEncoder struct {
type orderedEncoder struct {
Encoder
mu sync.Mutex
buf map[int]any
next int
}
func (s *seqEncoder) Encode(idx int, v any) error {
// Encode encodes v in sequential or starting with idx 0.
//
// It is an error to provide idx values less than the next to encode. Is is an
// error to provide the same idx value more than once.
func (s *orderedEncoder) Encode(idx int, v any) error {
s.mu.Lock()
defer s.mu.Unlock()
if idx < s.next {
return fmt.Errorf("could not encode idx %d: must be greater than or equal to next idx %d", idx, s.next)
}
// If this is the next expected index, encode it and any buffered values
if idx == s.next {
if err := s.Encoder.Encode(v); err != nil {
@@ -284,6 +303,10 @@ func (s *seqEncoder) Encode(idx int, v any) error {
return nil
}
if _, ok := s.buf[idx]; ok {
return fmt.Errorf("could not encode idx %d: already exists", idx)
}
// Buffer out-of-order value
s.buf[idx] = v
return nil
@@ -297,6 +320,7 @@ type BuildOpts struct {
Stderr io.Writer
WriteTo string
Path string
Tags []string
}
func NewBuildOpts(path string) BuildOpts {
@@ -306,5 +330,6 @@ func NewBuildOpts(path string) BuildOpts {
Stderr: os.Stderr,
WriteTo: "deploy",
Path: path,
Tags: make([]string, 0, 10),
}
}

View File

@@ -43,10 +43,11 @@ func RunCmd(ctx context.Context, name string, args ...string) (result RunResult,
cmd.Stdout = result.Stdout
cmd.Stderr = result.Stderr
log := logger.FromContext(ctx)
log.DebugContext(ctx, "running: "+name, "name", name, "args", args)
command := fmt.Sprintf("%s '%s'", name, strings.Join(args, "' '"))
log.DebugContext(ctx, "running command: "+command, "name", name, "args", args)
err = cmd.Run()
if err != nil {
err = fmt.Errorf("could not run command: %s %s: %w", name, strings.Join(args, " "), err)
err = fmt.Errorf("could not run command:\n\t%s\n\t%w", command, err)
}
return result, err
}
@@ -55,15 +56,25 @@ func RunCmd(ctx context.Context, name string, args ...string) (result RunResult,
func RunCmdW(ctx context.Context, w io.Writer, name string, args ...string) (result RunResult, err error) {
result, err = RunCmd(ctx, name, args...)
if err != nil {
err = fmt.Errorf("could not run command: %s %s: %w", name, strings.Join(args, " "), err)
mu.Lock()
defer mu.Unlock()
_, err2 := io.Copy(w, result.Stderr)
mu.Unlock()
if err2 != nil {
err = fmt.Errorf("could not copy stderr: %s: %w", err2.Error(), err)
}
}
return
return result, err
}
// RunCmdA calls RunCmd and always copies the result stderr to w.
func RunCmdA(ctx context.Context, w io.Writer, name string, args ...string) (result RunResult, err error) {
result, err = RunCmd(ctx, name, args...)
mu.Lock()
defer mu.Unlock()
if _, err2 := io.Copy(w, result.Stderr); err2 != nil {
err = fmt.Errorf("could not copy stderr: %s: %w", err2.Error(), err)
}
return result, err
}
// RunInteractiveCmd runs a command within a context but allows the command to

View File

@@ -1 +1 @@
100
101

View File

@@ -1 +1 @@
0
3