Compare commits

..

7 Commits

Author SHA1 Message Date
Jeff McCune
6f39cc6fdc docs: add istio section to expose-a-service
This patch adds Istio to the Expose a Service documentation and
introduces new concepts.  The Kubernetes build plan schema, the
namespaces component, and an example of how to safely re-use Helm values
from the root to multiple leaf components.

fix: istio cni not ready on k3d
---

The istio-k3d component embedded into holos fixes the cni pod not
becoming ready with our k3d local cluster guide.  The pod log error this
fixes is:

    configuration requires updates, (re)writing CNI config file at "": no networks found in /host/etc/cni/net.d
    Istio CNI is configured as chained plugin, but cannot find existing CNI network config: no networks found in /host/etc/cni/net.d
    Waiting for CNI network config file to be written in /host/etc/cni/net.d...

[Platform k3d]: https://istio.io/latest/docs/ambient/install/platform-prerequisites/#k3d

docs: clarify how to reset the local cluster
---

This is something we do all the time while developing and documenting,
so make it easy and fast to reset the cluster to a known good state.
2024-09-12 10:36:56 -07:00
Jeff McCune
e410563f82 docs: add namespaces to expose a service guide
This patch adds the schema api for the Kubernetes build plan, which
produces plain API resources directly from CUE.  It's needed for the
namespaces component which is foundational to many of our guides.

The first guide that needs this is the expose a service guide, we need
to register the namespaces from the istio component.
2024-09-11 17:22:01 -07:00
Jeff McCune
0a53bef72a docs: apply the gateway-api in the expose a service doc
This patch completes the first draft of the Gateway API section.
2024-09-11 14:31:02 -07:00
Jeff McCune
02a450e597 api: clarify Name field of Helm and Kustomize schema 2024-09-11 14:09:13 -07:00
Jeff McCune
e1222cf367 docs: add the gateway-api to the expose-a-service doc
The Expose a Service doc is meant to be the second step after the
Quickstart doc.  This commit adds the section describing how to install
the Gateway API.

The Kustomize build plan is introduced at this point in a similar way
the Helm build plan was introduced in the quickstart.
2024-09-11 14:03:40 -07:00
Jeff McCune
740a3d21a1 generate: add schematic for a workload-cluster
We need an easy way to help people add a workload cluster to their
workload fleet when working through the guides.  Generated platforms
should not define any clusters so they can be reused with multiple
guides.

This patch adds a simple component schematic that drops a root cue file
to define a workload cluster named workload.

The result is the following sequence renders the Gateway API when run
from an empty directory.

    holos generate platform guide
    holos generate component workload-cluster
    holos generate component gateway-api
    holos render platform ./platform

Without this patch nothing is rendered because there are no workload
clusters in the base guide platform.
2024-09-11 13:23:36 -07:00
Jeff McCune
1114b65a47 schema: remove management cluster from standard fleet
Having the management cluster hard coded into the definition of the
standard fleets is problematic for guides that don't need a management
cluster.

Define the fleets, but leave the set of clusters empty until they're
needed.
2024-09-11 13:12:44 -07:00
37 changed files with 15661 additions and 61 deletions

View File

@@ -5,9 +5,12 @@
"mdx"
],
"words": [
"admissionregistration",
"apiextensions",
"applicationset",
"argoproj",
"authcode",
"authorizationpolicies",
"authpolicy",
"authproxy",
"authroutes",
@@ -16,21 +19,31 @@
"CAROOT",
"clsx",
"clusterissuer",
"clusterrole",
"clusterrolebinding",
"configmap",
"cookiesecret",
"coredns",
"corev",
"CRD's",
"crds",
"creds",
"crossplane",
"cuecontext",
"cuelang",
"customresourcedefinition",
"daemonset",
"destinationrules",
"devicecode",
"dnsmasq",
"dscacheutil",
"entgo",
"envoyfilters",
"errgroup",
"fctr",
"fieldmaskpb",
"flushcache",
"gatewayclasses",
"gendoc",
"ghaction",
"gitops",
@@ -38,10 +51,13 @@
"golangci",
"goreleaser",
"grpcreflect",
"grpcroutes",
"grpcurl",
"holos",
"holoslogger",
"horizontalpodautoscaler",
"httpbin",
"httproutes",
"Infima",
"isatty",
"istiod",
@@ -60,7 +76,9 @@
"mattn",
"mindmap",
"mktemp",
"msqbn",
"Multicluster",
"mutatingwebhookconfiguration",
"mxcl",
"myhostname",
"nameserver",
@@ -68,26 +86,38 @@
"orgid",
"otelconnect",
"Parentspanid",
"peerauthentications",
"pflag",
"pipefail",
"PKCE",
"platformconnect",
"poddisruptionbudget",
"podinfo",
"portmapping",
"promhttp",
"protobuf",
"protojson",
"proxyconfigs",
"Pulumi",
"putenv",
"quickstart",
"referencegrants",
"requestauthentications",
"retryable",
"rolebinding",
"ropc",
"SECRETKEY",
"secretstores",
"serverlb",
"serverside",
"serviceaccount",
"serviceentries",
"spanid",
"spiffe",
"startupapicheck",
"stefanprodan",
"structpb",
"svclb",
"systemconnect",
"tablewriter",
"Tiltfile",
@@ -105,7 +135,13 @@
"usecases",
"userconnect",
"userdata",
"validatingwebhookconfiguration",
"virtualservices",
"wasmplugins",
"workloadentries",
"workloadgroups",
"zerolog",
"zitadel"
"zitadel",
"ztunnel"
]
}

View File

@@ -15,14 +15,14 @@ import (
// from package core. Useful as a convenience wrapper to render a HelmChart
// with optional mix-in resources and Kustomization post-processing.
type Helm struct {
// Name represents the chart name.
// Name represents the Component name.
Name string
// Version represents the chart version.
Version string
// Namespace represents the helm namespace option when rendering the chart.
Namespace string
// Resources are kubernetes api objects to mix into the output.
Resources map[string]any `cue:"{...}"`
Resources map[string]any
// Repo represents the chart repository
Repo struct {
@@ -129,7 +129,7 @@ type StandardFleets struct {
// Workload represents a Fleet of zero or more workload Clusters.
Workload Fleet `json:"workload" cue:"{name: \"workload\"}"`
// Management represents a Fleet with one Cluster named management.
Management Fleet `json:"management" cue:"{name: \"management\", clusters: management: _}"`
Management Fleet `json:"management" cue:"{name: \"management\"}"`
}
// Platform is a convenience structure to produce a core Platform specification
@@ -148,3 +148,28 @@ type Platform struct {
// and render each listed Component, injecting the Model.
Output core.Platform
}
// Kustomize provides a BuildPlan via the Output field which contains one
// KustomizeBuild from package core.
type Kustomize struct {
// Name represents the Component name.
Name string
// Kustomization represents the kustomize build plan for holos to render.
Kustomization core.KustomizeBuild
// Output represents the derived BuildPlan for the Holos cli to render.
Output core.BuildPlan
}
// Kubernetes provides a BuildPlan via the Output field which contains inline
// API Objects provided directly from CUE.
type Kubernetes struct {
// Name represents the Component name.
Name string
// Resources represents the kubernetes api objects for the Component.
Resources map[string]any
// Output represents the derived BuildPlan for the Holos cli to render.
Output core.BuildPlan
}

View File

@@ -14,6 +14,8 @@ Package v1alpha3 contains CUE definitions intended as convenience wrappers aroun
- [type Cluster](<#Cluster>)
- [type Fleet](<#Fleet>)
- [type Helm](<#Helm>)
- [type Kubernetes](<#Kubernetes>)
- [type Kustomize](<#Kustomize>)
- [type Platform](<#Platform>)
- [type StandardFleets](<#StandardFleets>)
@@ -80,14 +82,14 @@ Helm provides a BuildPlan via the Output field which contains one HelmChart from
```go
type Helm struct {
// Name represents the chart name.
// Name represents the Component name.
Name string
// Version represents the chart version.
Version string
// Namespace represents the helm namespace option when rendering the chart.
Namespace string
// Resources are kubernetes api objects to mix into the output.
Resources map[string]any `cue:"{...}"`
Resources map[string]any
// Repo represents the chart repository
Repo struct {
@@ -131,6 +133,41 @@ type Helm struct {
}
```
<a name="Kubernetes"></a>
## type Kubernetes {#Kubernetes}
Kubernetes provides a BuildPlan via the Output field which contains inline API Objects provided directly from CUE.
```go
type Kubernetes struct {
// Name represents the Component name.
Name string
// Resources represents the kubernetes api objects for the Component.
Resources map[string]any
// Output represents the derived BuildPlan for the Holos cli to render.
Output core.BuildPlan
}
```
<a name="Kustomize"></a>
## type Kustomize {#Kustomize}
Kustomize provides a BuildPlan via the Output field which contains one KustomizeBuild from package core.
```go
type Kustomize struct {
// Name represents the Component name.
Name string
// Kustomization represents the kustomize build plan for holos to render.
Kustomization core.KustomizeBuild
// Output represents the derived BuildPlan for the Holos cli to render.
Output core.BuildPlan
}
```
<a name="Platform"></a>
## type Platform {#Platform}
@@ -161,7 +198,7 @@ type StandardFleets struct {
// Workload represents a Fleet of zero or more workload Clusters.
Workload Fleet `json:"workload" cue:"{name: \"workload\"}"`
// Management represents a Fleet with one Cluster named management.
Management Fleet `json:"management" cue:"{name: \"management\", clusters: management: _}"`
Management Fleet `json:"management" cue:"{name: \"management\"}"`
}
```

View File

@@ -1,7 +1,7 @@
---
description: Use Holos to expose a Service with the Gateway API.
slug: /guides/expose-a-service
sidebar_position: 300
sidebar_position: 200
---
import Tabs from '@theme/Tabs';
@@ -13,24 +13,16 @@ import Admonition from '@theme/Admonition';
In this guide, you'll learn how to expose a service with Holos using the Gateway
API.
:::warning TODO
Complete this section once the steps are complete.
:::
The [Concepts](/docs/concepts) page defines capitalized terms such as Platform
and Component.
## What you'll need {#requirements}
:::warning TODO
Complete this section once the steps are complete.
:::
You'll need the following tools installed to complete this guide.
1. [holos](/docs/install) - to build the Platform.
2. [helm](https://helm.sh/docs/intro/install/) - to render Holos Components that
wrap upstream Helm charts.
2. [helm](https://helm.sh/docs/intro/install/) - to render Helm Components.
3. [kubectl](https://kubernetes.io/docs/tasks/tools/) - to render Kustomize Components.
Optionally, if you'd like to apply the rendered manifests to a real Cluster,
first complete the [localhost Guide](../local-cluster).
@@ -42,16 +34,16 @@ stored in a Git repository.
<Tabs groupId="init">
<TabItem value="command" label="Command">
```bash
mkdir expose-a-service
cd expose-a-service
git init
```
```bash
mkdir expose-a-service
cd expose-a-service
git init
```
</TabItem>
<TabItem value="output" label="Output">
```txt
Initialized empty Git repository in /expose-a-service/.git/
```
```txt showLineNumbers
Initialized empty Git repository in /expose-a-service/.git/
```
</TabItem>
</Tabs>
@@ -60,36 +52,748 @@ repository unless stated otherwise.
## Generate the Platform {#Generate-Platform}
Start by generating a platform used as the basis for our guides.
Start by generating a platform with one workload Cluster. The `guide` Platform
is intended as a starting point for all of our guides.
<Tabs groupId="generate-platform">
<TabItem value="command" label="Command">
```bash
holos generate platform guide
holos generate component workload-cluster
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
generated component
```
</TabItem>
</Tabs>
Commit the generated platform config to the repository.
<Tabs groupId="commit-platform">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "holos generate platform guide - $(holos --version)"
```
```bash
git add .
git commit -m "holos generate platform guide - $(holos --version)"
```
</TabItem>
<TabItem value="output" label="Output">
```txt
[main (root-commit) 0b17b7f] holos generate platform guide - 0.93.3
213 files changed, 72349 insertions(+)
...
```
```txt showLineNumbers
[main (root-commit) 0b17b7f] holos generate platform guide - 0.93.3
213 files changed, 72349 insertions(+)
...
```
</TabItem>
</Tabs>
## Manage httpbin {#manage-httpbin}
## Gateway API
The platform you generated is currently empty. Run the following command to
generate a Holos Component for the
[httpbin](https://github.com/mccutchen/go-httpbin) service.
The Gateway API is an official Kubernetes project focused on L4 and L7 routing .
You'll use the custom resources defined by the Gateway API to expose the httpbin
service outside of the cluster. The Kubernetes Gateway API does not come
installed by default on most Kubernetes clusters, so we need to manage the
custom resource definitions (CRDs).
httpbin is a simple backend service useful for end-to-end testing. In this
guide, we use httpbin as a example of a service your organization develops and
deploy onto your Platform.
Run the following command to generate a Component to manage the Gateway API.
<Tabs groupId="gen-gateway-api">
<TabItem value="command" label="Command">
```bash
holos generate component gateway-api
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
generated component
```
</TabItem>
</Tabs>
The command generates two main configuration files, one at the leaf, and another
at the root of the tree. At the leaf, the config produces a Kustomize build
plan for Holos to render. At the root, the config adds the Component to all
Clusters in the Platform.
Notice the `kustomization.yaml` file at the leaf. This is an unmodified
upstream copy of the standard way to install the Gateway API.
<Tabs groupId="gateway-api-files">
<TabItem value="components/gateway-api/gateway-api.cue" label="Leaf">
`components/gateway-api/gateway-api.cue`
```cue showLineNumbers
package holos
// Produce a kubectl kustomize build plan.
(#Kustomize & {Name: "gateway-api"}).Output
```
</TabItem>
<TabItem value="kustomization.yaml" label="kustomization.yaml">
`components/gateway-api/kustomization.yaml`
```yaml showLineNumbers
resources:
- standard/gateway.networking.k8s.io_gatewayclasses.yaml
- standard/gateway.networking.k8s.io_gateways.yaml
- standard/gateway.networking.k8s.io_grpcroutes.yaml
- standard/gateway.networking.k8s.io_httproutes.yaml
- standard/gateway.networking.k8s.io_referencegrants.yaml
```
</TabItem>
<TabItem value="gateway-api.gen.cue" label="Root">
`gateway-api.gen.cue`
```cue showLineNumbers
package holos
// Manage on every Cluster in the Platform
for Fleet in #Fleets {
for Cluster in Fleet.clusters {
#Platform: Components: "\(Cluster.name)/gateway-api": {
path: "components/gateway-api"
cluster: Cluster.name
}
}
}
```
</TabItem>
</Tabs>
Render the Platform to render the Component for the workload clusters.
<Tabs groupId="render-platform-gateway">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
rendered components/gateway-api for cluster workload in 279.312292ms
```
</TabItem>
</Tabs>
:::tip
This example is equivalent to running `kubectl kustomize
./components/gateway-api` and saving the output to a file. Holos simplifies
this task and makes it consistent with Helm and other tools.
:::
Add and commit the Component and rendered Platform.
<Tabs groupId="commit-gateway-api">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "add gateway-api component"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
[main 88575a5] add gateway-api component
9 files changed, 26907 insertions(+)
create mode 100644 components/gateway-api/gateway-api.cue
create mode 100644 components/gateway-api/kustomization.yaml
create mode 100644 components/gateway-api/standard/gateway.networking.k8s.io_gatewayclasses.yaml
create mode 100644 components/gateway-api/standard/gateway.networking.k8s.io_gateways.yaml
create mode 100644 components/gateway-api/standard/gateway.networking.k8s.io_grpcroutes.yaml
create mode 100644 components/gateway-api/standard/gateway.networking.k8s.io_httproutes.yaml
create mode 100644 components/gateway-api/standard/gateway.networking.k8s.io_referencegrants.yaml
create mode 100644 deploy/clusters/workload/components/gateway-api/gateway-api.gen.yaml
create mode 100644 gateway-api.gen.cue
```
</TabItem>
</Tabs>
Optionally apply the rendered component to your cluster.
<Tabs groupId="apply-gateway-api">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/gateway-api
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
customresourcedefinition.apiextensions.k8s.io/gatewayclasses.gateway.networking.k8s.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/gateways.gateway.networking.k8s.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/grpcroutes.gateway.networking.k8s.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/httproutes.gateway.networking.k8s.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/referencegrants.gateway.networking.k8s.io serverside-applied
```
</TabItem>
</Tabs>
## Namespaces
We often need to manage namespaces prior to workloads being deployed. This is
necessary because a namespace is a security boundary. Holos makes it easier,
safer, and more consistent to manage service accounts, role bindings, and
secrets prior to deploying workloads into a namespace.
We'll see how this works with the namespaces component, which offers a mechanism
for other components to register their namespaces. The namespaces component
initializes each registered namespace, optionally mixing in resources
consistently.
Run the following command to generate the namespaces component.
<Tabs groupId="gen-namespaces">
<TabItem value="command" label="Command">
```bash
holos generate component namespaces
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
generated component
```
</TabItem>
</Tabs>
The command generates two main configuration files like we've seen with other
components. One file at the leaf, and another at the root. The leaf uses a
Kubernetes build plan to produce resources directly from CUE.
<Tabs groupId="namespaces-files">
<TabItem value="components/namespaces/namespaces.cue" label="Leaf">
`components/namespaces/namespaces.cue`
```cue showLineNumbers
package holos
let Objects = {
Name: "namespaces"
// highlight-next-line
Resources: Namespace: #Namespaces
}
// Produce a kubernetes objects build plan.
(#Kubernetes & Objects).Output
```
</TabItem>
<TabItem value="namespaces.gen.cue" label="Root">
`namespaces.gen.cue`
```cue showLineNumbers
package holos
import corev1 "k8s.io/api/core/v1"
// #Namespaces defines all managed namespaces in the Platform.
// Holos adopts the sig-multicluster position of namespace sameness.
#Namespaces: {
// Validate against v1 of the kubernetes core api
// highlight-next-line
[Name=string]: corev1.#Namespace & {
metadata: name: Name
}
}
// Manage the Component on every Cluster in the Platform
for Fleet in #Fleets {
for Cluster in Fleet.clusters {
#Platform: Components: "\(Cluster.name)/namespaces": {
path: "components/namespaces"
cluster: Cluster.name
}
}
}
```
</TabItem>
</Tabs>
Notice the highlighted line in the leaf file. Resources are managed directly in
CUE at the leaf using the Kubernetes component. This is the same mechanism used
to mix-in resources to Helm and Kustomize components. The leaf refers to
`#Namespaces` defined at the root. At the root `#Namespaces` enforces a
constraint: each Namespace must conform to the `k8s.io/api/core/v1`
specification.
:::important
We've covered three kinds of components so far. The [Quickstart] guide
introduced Helm. We've used Kustomize and Kubernetes in this guide.
Holos offers a consistent way to manage these different kinds of packaging
safely and easily.
:::
- At the **leaf** Holos tailors the component to your platform, mixing
in resources and customizing the rendered output.
- At the **root** Holos integrates a component with the rest of your platform.
You'll see this pattern again and again as you build your platform.
Render the platform to render the component for the workload clusters.
<Tabs groupId="render-platform-gateway">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
rendered components/namespaces for cluster workload in 72.675292ms
rendered components/gateway-api for cluster workload in 259.174583ms
```
</TabItem>
</Tabs>
Add and commit the configuration and rendered manifests.
<Tabs groupId="commit-gateway-api">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "add namespaces component"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
[main 1bf0d61] add namespaces component
3 files changed, 30 insertions(+)
create mode 100644 components/namespaces/namespaces.cue
create mode 100644 deploy/clusters/workload/components/namespaces/namespaces.gen.yaml
create mode 100644 namespaces.gen.cue
```
</TabItem>
</Tabs>
`#Namespaces` is currently empty, so the rendered output of
`namespaces.gen.yaml` is also empty.
:::tip
Namespaces will be automatically managed as we add more components to the
platform over time.
:::
## Istio
We'll manage Istio to implement the Gateway API so we can expose the httpbin
service outside of the cluster.
Run the following command to generate the istio components.
<Tabs groupId="gen-istio">
<TabItem value="command" label="Command">
```bash
holos generate component istio
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
generated component
```
</TabItem>
</Tabs>
:::important
Mix in the `istio-k3d` component if you're applying the rendered manifests to
k3d as described in our [Local Cluster] guide.
:::
Skip this step if you aren't using k3d. Istio needs to be configured to refer
to the nonstandard cni configuration paths k3d uses.
<Tabs groupId="gen-istio-k3d">
<TabItem value="command" label="Command">
```bash
holos generate component istio-k3d
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
generated component
```
</TabItem>
<TabItem value="file" label="istio-k3d.gen.cue">
Holos makes it easier and safer to mix-in this additional configuration at the root.
```cue showLineNumbers
package holos
// If you are using k3d with the default Flannel CNI, you must append some
// values to your installation command, as k3d uses nonstandard locations for
// CNI configuration and binaries.
//
// See https://istio.io/latest/docs/ambient/install/platform-prerequisites/#k3d
#Istio: Values: cni: {
cniConfDir: "/var/lib/rancher/k3s/agent/etc/cni/net.d"
cniBinDir: "/bin"
}
```
</TabItem>
</Tabs>
Consistent with the other components we've seen, the istio components define
configuration at the root and leafs of the tree. Unlike previous components
we've generated, this command generated multiple components to manage Istio.
<Tabs groupId="tree-istio">
<TabItem value="command" label="Command">
```bash
tree components/istio
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
components/istio
├── base
│   ├── istio-base.cue
│   └── values.gen.cue
├── cni
│   ├── cni.cue
│   └── values.gen.cue
├── gateway
│   ├── gateway.cue
│   └── values.gen.cue
├── istiod
│   ├── istiod.cue
│   └── values.gen.cue
└── ztunnel
├── values.gen.cue
└── ztunnel.cue
6 directories, 10 files
```
</TabItem>
</Tabs>
These components share the configuration defined at the root in `istio.gen.cue`.
Let's review how Holos makes it safer and easier to share Helm values defined at
the root with the istiod and cni components defined at the leaf.
1. istiod and cni use version `"1.23.1"` and namespace `"istio-system"` defined at
the root.
2. The Helm value to configure ambient (sidecar-less) mode is defined once at
the root.
3. The root adds a constraint to fail validation if the istio system namespace
is not `"istio-system"`. Future upgrades are safer with this constraint, if the
upstream vendor changes the default in the future the component will fail
validation.
4. The root registers two Namespaces, `"istio-system"` and `"istio-ingress"`.
5. The root manages the components on all workload clusters in the platform.
<Tabs groupId="istio-files">
<TabItem value="istiod.cue" label="istiod">
Leaf `components/istio/istiod/istiod.cue`
```cue showLineNumbers
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istiod"
// highlight-next-line
Version: #Istio.Version
// highlight-next-line
Namespace: #Istio.System.Namespace
Chart: chart: name: "istiod"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
// highlight-next-line
Values: #Istio.Values
}
```
</TabItem>
<TabItem value="cni.cue" label="cni">
Leaf `components/istio/cni/cni.cue`
```cue showLineNumbers
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istio-cni"
// highlight-next-line
Version: #Istio.Version
// highlight-next-line
Namespace: #Istio.System.Namespace
Chart: chart: name: "cni"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
// highlight-next-line
Values: #Istio.Values
}
```
</TabItem>
<TabItem value="istio.gen.cue" label="Root">
Root `istio.gen.cue`
```cue showLineNumbers
package holos
// #Istio represents platform wide configuration
#Istio: {
// highlight-next-line
Version: "1.23.1"
// highlight-next-line
System: Namespace: "istio-system"
Gateway: Namespace: "istio-ingress"
// Constrain Helm values for safer, easier upgrades and consistency across
// platform components.
// highlight-next-line
Values: global: istioNamespace: System.Namespace
// Configure ambient mode
// highlight-next-line
Values: profile: "ambient"
}
// Register the Namespaces
#Namespaces: (#Istio.System.Namespace): _
#Namespaces: (#Istio.Gateway.Namespace): _
// Manage istio on workload clusters
for Cluster in #Fleets.workload.clusters {
#Platform: Components: {
"\(Cluster.name)/istio-base": {
path: "components/istio/base"
cluster: Cluster.name
}
"\(Cluster.name)/istiod": {
path: "components/istio/istiod"
cluster: Cluster.name
}
"\(Cluster.name)/istio-cni": {
path: "components/istio/cni"
cluster: Cluster.name
}
"\(Cluster.name)/istio-ztunnel": {
path: "components/istio/ztunnel"
cluster: Cluster.name
}
"\(Cluster.name)/istio-gateway": {
path: "components/istio/gateway"
cluster: Cluster.name
}
}
}
```
</TabItem>
</Tabs>
:::tip
Many software projects managed by Holos are organized into a collection of
components working together, for example to safely manage custom resource
definitions, secrets, and policy separately from the workloads that rely on
them.
:::
Render the platform to render the istio components for the workload clusters.
<Tabs groupId="render-platform-istio">
<TabItem value="command" label="Command">
```bash
holos render platform ./platform
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
rendered components/namespaces for cluster workload in 85.490833ms
rendered components/istio/ztunnel for cluster workload in 111.784667ms
rendered components/istio/cni for cluster workload in 112.362417ms
rendered components/istio/base for cluster workload in 113.058ms
rendered components/istio/gateway for cluster workload in 119.018208ms
rendered components/istio/istiod for cluster workload in 127.736334ms
rendered components/gateway-api for cluster workload in 181.922333ms
```
</TabItem>
</Tabs>
Add and commit the configuration and rendered manifests.
<Tabs groupId="commit-istio">
<TabItem value="command" label="Command">
```bash
git add .
git commit -m "add istio"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
[main aca8ff6] add istio
18 files changed, 18955 insertions(+)
create mode 100644 components/istio/base/istio-base.cue
create mode 100644 components/istio/base/values.gen.cue
create mode 100644 components/istio/cni/cni.cue
create mode 100644 components/istio/cni/values.gen.cue
create mode 100644 components/istio/gateway/gateway.cue
create mode 100644 components/istio/gateway/values.gen.cue
create mode 100644 components/istio/istiod/istiod.cue
create mode 100644 components/istio/istiod/values.gen.cue
create mode 100644 components/istio/ztunnel/values.gen.cue
create mode 100644 components/istio/ztunnel/ztunnel.cue
create mode 100644 deploy/clusters/workload/components/istio-base/istio-base.gen.yaml
create mode 100644 deploy/clusters/workload/components/istio-cni/istio-cni.gen.yaml
create mode 100644 deploy/clusters/workload/components/istio-gateway/istio-gateway.gen.yaml
create mode 100644 deploy/clusters/workload/components/istio-ztunnel/istio-ztunnel.gen.yaml
create mode 100644 deploy/clusters/workload/components/istiod/istiod.gen.yaml
create mode 100644 istio-k3d.gen.cue
create mode 100644 istio.gen.cue
```
</TabItem>
</Tabs>
Optionally apply the rendered component to your cluster.
<Tabs groupId="apply-istio-namespaces">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/namespaces
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
namespace/istio-ingress serverside-applied
namespace/istio-system serverside-applied
```
</TabItem>
</Tabs>
<Tabs groupId="apply-istio-base">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/istio-base
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
customresourcedefinition.apiextensions.k8s.io/wasmplugins.extensions.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/destinationrules.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/envoyfilters.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/gateways.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/proxyconfigs.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/serviceentries.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/sidecars.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/virtualservices.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/workloadentries.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/workloadgroups.networking.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/authorizationpolicies.security.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/peerauthentications.security.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/requestauthentications.security.istio.io serverside-applied
customresourcedefinition.apiextensions.k8s.io/telemetries.telemetry.istio.io serverside-applied
serviceaccount/istio-reader-service-account serverside-applied
validatingwebhookconfiguration.admissionregistration.k8s.io/istiod-default-validator serverside-applied
```
</TabItem>
</Tabs>
<Tabs groupId="apply-istiod">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/istiod
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
poddisruptionbudget.policy/istiod serverside-applied
serviceaccount/istiod serverside-applied
configmap/istio serverside-applied
configmap/istio-sidecar-injector serverside-applied
clusterrole.rbac.authorization.k8s.io/istiod-clusterrole-istio-system serverside-applied
clusterrole.rbac.authorization.k8s.io/istiod-gateway-controller-istio-system serverside-applied
clusterrole.rbac.authorization.k8s.io/istio-reader-clusterrole-istio-system serverside-applied
clusterrolebinding.rbac.authorization.k8s.io/istiod-clusterrole-istio-system serverside-applied
clusterrolebinding.rbac.authorization.k8s.io/istiod-gateway-controller-istio-system serverside-applied
clusterrolebinding.rbac.authorization.k8s.io/istio-reader-clusterrole-istio-system serverside-applied
role.rbac.authorization.k8s.io/istiod serverside-applied
rolebinding.rbac.authorization.k8s.io/istiod serverside-applied
service/istiod serverside-applied
deployment.apps/istiod serverside-applied
horizontalpodautoscaler.autoscaling/istiod serverside-applied
mutatingwebhookconfiguration.admissionregistration.k8s.io/istio-sidecar-injector serverside-applied
validatingwebhookconfiguration.admissionregistration.k8s.io/istio-validator-istio-system serverside-applied
```
</TabItem>
</Tabs>
<Tabs groupId="apply-istio-cni">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/istio-cni
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
serviceaccount/istio-cni serverside-applied
configmap/istio-cni-config serverside-applied
clusterrole.rbac.authorization.k8s.io/istio-cni serverside-applied
clusterrole.rbac.authorization.k8s.io/istio-cni-repair-role serverside-applied
clusterrole.rbac.authorization.k8s.io/istio-cni-ambient serverside-applied
clusterrolebinding.rbac.authorization.k8s.io/istio-cni serverside-applied
clusterrolebinding.rbac.authorization.k8s.io/istio-cni-repair-rolebinding serverside-applied
clusterrolebinding.rbac.authorization.k8s.io/istio-cni-ambient serverside-applied
daemonset.apps/istio-cni-node serverside-applied
```
</TabItem>
</Tabs>
<Tabs groupId="apply-istio-ztunnel">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/istio-ztunnel
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
serviceaccount/ztunnel serverside-applied
daemonset.apps/ztunnel serverside-applied
```
</TabItem>
</Tabs>
<Tabs groupId="apply-istio-gateway">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f deploy/clusters/workload/components/istio-gateway
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
serviceaccount/gateway serverside-applied
role.rbac.authorization.k8s.io/gateway serverside-applied
rolebinding.rbac.authorization.k8s.io/gateway serverside-applied
service/gateway serverside-applied
deployment.apps/gateway serverside-applied
horizontalpodautoscaler.autoscaling/gateway serverside-applied
```
</TabItem>
</Tabs>
Make sure all pod containers become ready.
<Tabs groupId="apply-istio-ready">
<TabItem value="command" label="Command">
```bash
kubectl get pods -A
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
NAMESPACE NAME READY STATUS RESTARTS AGE
istio-ingress gateway-6748c5f547-s46pj 1/1 Running 0 26s
istio-system istio-cni-node-852nr 1/1 Running 0 2m9s
istio-system istiod-5b4d8d4c77-t694z 1/1 Running 0 3m15s
istio-system ztunnel-msqbn 1/1 Running 0 63s
kube-system coredns-576bfc4dc7-2g4k9 1/1 Running 0 113m
kube-system local-path-provisioner-6795b5f9d8-wsz8p 1/1 Running 0 113m
kube-system metrics-server-557ff575fb-fctr7 1/1 Running 0 113m
kube-system svclb-gateway-5d311af0-fp5mk 3/3 Running 0 26s
```
</TabItem>
</Tabs>
Once all pods are ready, we're ready to manage httpbin so we can route http
traffic to it.
## httpbin
[Quickstart]: /docs/quickstart
[Local Cluster]: /docs/guides/local-cluster

View File

@@ -1,7 +1,7 @@
---
description: Build a local Cluster to use with these guides.
slug: /guides/local-cluster
sidebar_position: 200
sidebar_position: 300
---
import Tabs from '@theme/Tabs';
@@ -10,15 +10,97 @@ import Admonition from '@theme/Admonition';
# Local Cluster
In this guide you'll set up a Cluster on your local host to apply and explore
the configuration described in our other guides. After completing this guide
you'll have a standard Kubernetes API server with proper DNS and TLS
certificates. You'll be able to easily reset the cluster to a known good state
to iterate on your own Platform.
In this guide we'll set up a local k3d cluster to apply and explore the
configuration described in our other guides. After completing this guide you'll
have a standard Kubernetes API server with proper DNS and TLS certificates.
You'll be able to easily reset the cluster to a known good state to iterate on
your own Platform.
The [Concepts](/docs/concepts) page defines capitalized terms such as Platform
and Component.
## Reset the Cluster
If you've already followed this guide, reset the cluster by running the
following commands. Skip this section if you're creating a cluster for the
first time.
First, delete the cluster.
<Tabs groupId="k3d-cluster-delete">
<TabItem value="command" label="Command">
```bash
k3d cluster delete workload
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
INFO[0000] Deleting cluster 'workload'
INFO[0000] Deleting cluster network 'k3d-workload'
INFO[0000] Deleting 1 attached volumes...
INFO[0000] Removing cluster details from default kubeconfig...
INFO[0000] Removing standalone kubeconfig file (if there is one)...
INFO[0000] Successfully deleted cluster workload!
```
</TabItem>
</Tabs>
Then create the cluster again.
<Tabs groupId="k3d-cluster-create">
<TabItem value="command" label="Command">
```bash
k3d cluster create workload \
--registry-use k3d-registry.holos.localhost:5100 \
--port "443:443@loadbalancer" \
--k3s-arg "--disable=traefik@server:0"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
INFO[0000] portmapping '443:443' targets the loadbalancer: defaulting to [servers:*:proxy agents:*:proxy]
INFO[0000] Prep: Network
INFO[0000] Created network 'k3d-workload'
INFO[0000] Created image volume k3d-workload-images
INFO[0000] Starting new tools node...
INFO[0000] Starting node 'k3d-workload-tools'
INFO[0001] Creating node 'k3d-workload-server-0'
INFO[0001] Creating LoadBalancer 'k3d-workload-serverlb'
INFO[0001] Using the k3d-tools node to gather environment information
INFO[0001] HostIP: using network gateway 172.17.0.1 address
INFO[0001] Starting cluster 'workload'
INFO[0001] Starting servers...
INFO[0001] Starting node 'k3d-workload-server-0'
INFO[0003] All agents already running.
INFO[0003] Starting helpers...
INFO[0003] Starting node 'k3d-workload-serverlb'
INFO[0009] Injecting records for hostAliases (incl. host.k3d.internal) and for 3 network members into CoreDNS configmap...
INFO[0012] Cluster 'workload' created successfully!
INFO[0012] You can now use it like this:
kubectl cluster-info
```
</TabItem>
</Tabs>
Finally, add your trusted certificate authority.
<Tabs groupId="apply-local-ca">
<TabItem value="command" label="Command">
```bash
kubectl apply --server-side=true -f "$(mkcert -CAROOT)/namespace.yaml"
kubectl apply --server-side=true -f "$(mkcert -CAROOT)/local-ca.yaml"
```
</TabItem>
<TabItem value="output" label="Output">
```txt showLineNumbers
namespace/cert-manager serverside-applied
secret/local-ca serverside-applied
```
</TabItem>
</Tabs>
You're back to the same state as the first time you completed this guide.
## What you'll need {#requirements}
You'll need the following tools installed to complete this guide.
@@ -150,10 +232,11 @@ cp -p "${CAROOT}/rootCA.pem" ca.crt
cp -p "${CAROOT}/rootCA.pem" tls.crt
cp -p "${CAROOT}/rootCA-key.pem" tls.key
kubectl create secret generic --from-file=. --dry-run=client -o yaml local-ca > ../local-ca.yaml
echo 'type: kubernetes.io/tls' >> ../local-ca.yaml
cd ..
echo 'type: kubernetes.io/tls' >> local-ca.yaml
kubectl apply --server-side=true -f- <<EOF
cat <<EOF > namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
@@ -164,8 +247,12 @@ spec:
finalizers:
- kubernetes
EOF
kubectl apply --server-side=true -f namespace.yaml
kubectl apply -n cert-manager --server-side=true -f local-ca.yaml
# Save the Secret to easily reset the cluster later.
install -m 0644 namespace.yaml "${CAROOT}/namespace.yaml"
install -m 0600 local-ca.yaml "${CAROOT}/local-ca.yaml"
```
:::warning
@@ -184,12 +271,6 @@ with:
k3d cluster delete workload
```
## Reset {#reset}
If you'd like to reset to a known good state, execute the [Clean Up](#clean-up)
section, then [Create the Cluster](#create-the-cluster) and the [Setup Root
CA](#setup-root-ca) tasks.
## Next Steps
Now that you have a real cluster, apply and explore the manifests Holos renders

View File

@@ -0,0 +1,4 @@
package holos
// Produce a kubectl kustomize build plan.
(#Kustomize & {Name: "gateway-api"}).Output

View File

@@ -0,0 +1,6 @@
resources:
- standard/gateway.networking.k8s.io_gatewayclasses.yaml
- standard/gateway.networking.k8s.io_gateways.yaml
- standard/gateway.networking.k8s.io_grpcroutes.yaml
- standard/gateway.networking.k8s.io_httproutes.yaml
- standard/gateway.networking.k8s.io_referencegrants.yaml

View File

@@ -0,0 +1,524 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2997
gateway.networking.k8s.io/bundle-version: v1.1.0
gateway.networking.k8s.io/channel: standard
creationTimestamp: null
name: gatewayclasses.gateway.networking.k8s.io
spec:
group: gateway.networking.k8s.io
names:
categories:
- gateway-api
kind: GatewayClass
listKind: GatewayClassList
plural: gatewayclasses
shortNames:
- gc
singular: gatewayclass
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.controllerName
name: Controller
type: string
- jsonPath: .status.conditions[?(@.type=="Accepted")].status
name: Accepted
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .spec.description
name: Description
priority: 1
type: string
name: v1
schema:
openAPIV3Schema:
description: |-
GatewayClass describes a class of Gateways available to the user for creating
Gateway resources.
It is recommended that this resource be used as a template for Gateways. This
means that a Gateway is based on the state of the GatewayClass at the time it
was created and changes to the GatewayClass or associated parameters are not
propagated down to existing Gateways. This recommendation is intended to
limit the blast radius of changes to GatewayClass or associated parameters.
If implementations choose to propagate GatewayClass changes to existing
Gateways, that MUST be clearly documented by the implementation.
Whenever one or more Gateways are using a GatewayClass, implementations SHOULD
add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the
associated GatewayClass. This ensures that a GatewayClass associated with a
Gateway is not deleted while in use.
GatewayClass is a Cluster level resource.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of GatewayClass.
properties:
controllerName:
description: |-
ControllerName is the name of the controller that is managing Gateways of
this class. The value of this field MUST be a domain prefixed path.
Example: "example.net/gateway-controller".
This field is not mutable and cannot be empty.
Support: Core
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
x-kubernetes-validations:
- message: Value is immutable
rule: self == oldSelf
description:
description: Description helps describe a GatewayClass with more details.
maxLength: 64
type: string
parametersRef:
description: |-
ParametersRef is a reference to a resource that contains the configuration
parameters corresponding to the GatewayClass. This is optional if the
controller does not require any additional configuration.
ParametersRef can reference a standard Kubernetes resource, i.e. ConfigMap,
or an implementation-specific custom resource. The resource can be
cluster-scoped or namespace-scoped.
If the referent cannot be found, the GatewayClass's "InvalidParameters"
status condition will be true.
A Gateway for this GatewayClass may provide its own `parametersRef`. When both are specified,
the merging behavior is implementation specific.
It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway.
Support: Implementation-specific
properties:
group:
description: Group is the group of the referent.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the referent.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: |-
Namespace is the namespace of the referent.
This field is required when referring to a Namespace-scoped resource and
MUST be unset when referring to a Cluster-scoped resource.
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- name
type: object
required:
- controllerName
type: object
status:
default:
conditions:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Waiting
status: Unknown
type: Accepted
description: |-
Status defines the current state of GatewayClass.
Implementations MUST populate status on all GatewayClass resources which
specify their controller name.
properties:
conditions:
default:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Pending
status: Unknown
type: Accepted
description: |-
Conditions is the current status from the controller for
this GatewayClass.
Controllers should prefer to publish conditions using values
of GatewayClassConditionType for the type of each Condition.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource.\n---\nThis struct is intended for
direct use as an array at the field path .status.conditions. For
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
observations of a foo's current state.\n\t // Known .status.conditions.type
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
\ // other fields\n\t}"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
---
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
useful (see .node.status.conditions), the ability to deconflict is important.
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
maxItems: 8
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.controllerName
name: Controller
type: string
- jsonPath: .status.conditions[?(@.type=="Accepted")].status
name: Accepted
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .spec.description
name: Description
priority: 1
type: string
name: v1beta1
schema:
openAPIV3Schema:
description: |-
GatewayClass describes a class of Gateways available to the user for creating
Gateway resources.
It is recommended that this resource be used as a template for Gateways. This
means that a Gateway is based on the state of the GatewayClass at the time it
was created and changes to the GatewayClass or associated parameters are not
propagated down to existing Gateways. This recommendation is intended to
limit the blast radius of changes to GatewayClass or associated parameters.
If implementations choose to propagate GatewayClass changes to existing
Gateways, that MUST be clearly documented by the implementation.
Whenever one or more Gateways are using a GatewayClass, implementations SHOULD
add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the
associated GatewayClass. This ensures that a GatewayClass associated with a
Gateway is not deleted while in use.
GatewayClass is a Cluster level resource.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of GatewayClass.
properties:
controllerName:
description: |-
ControllerName is the name of the controller that is managing Gateways of
this class. The value of this field MUST be a domain prefixed path.
Example: "example.net/gateway-controller".
This field is not mutable and cannot be empty.
Support: Core
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
x-kubernetes-validations:
- message: Value is immutable
rule: self == oldSelf
description:
description: Description helps describe a GatewayClass with more details.
maxLength: 64
type: string
parametersRef:
description: |-
ParametersRef is a reference to a resource that contains the configuration
parameters corresponding to the GatewayClass. This is optional if the
controller does not require any additional configuration.
ParametersRef can reference a standard Kubernetes resource, i.e. ConfigMap,
or an implementation-specific custom resource. The resource can be
cluster-scoped or namespace-scoped.
If the referent cannot be found, the GatewayClass's "InvalidParameters"
status condition will be true.
A Gateway for this GatewayClass may provide its own `parametersRef`. When both are specified,
the merging behavior is implementation specific.
It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway.
Support: Implementation-specific
properties:
group:
description: Group is the group of the referent.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the referent.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: |-
Namespace is the namespace of the referent.
This field is required when referring to a Namespace-scoped resource and
MUST be unset when referring to a Cluster-scoped resource.
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- name
type: object
required:
- controllerName
type: object
status:
default:
conditions:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Waiting
status: Unknown
type: Accepted
description: |-
Status defines the current state of GatewayClass.
Implementations MUST populate status on all GatewayClass resources which
specify their controller name.
properties:
conditions:
default:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Pending
status: Unknown
type: Accepted
description: |-
Conditions is the current status from the controller for
this GatewayClass.
Controllers should prefer to publish conditions using values
of GatewayClassConditionType for the type of each Condition.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource.\n---\nThis struct is intended for
direct use as an array at the field path .status.conditions. For
example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
observations of a foo's current state.\n\t // Known .status.conditions.type
are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
\ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
\ // other fields\n\t}"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: |-
type of condition in CamelCase or in foo.example.com/CamelCase.
---
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
useful (see .node.status.conditions), the ability to deconflict is important.
The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
maxItems: 8
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
type: object
required:
- spec
type: object
served: true
storage: false
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: null
storedVersions: null

View File

@@ -0,0 +1,383 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2997
gateway.networking.k8s.io/bundle-version: v1.1.0
gateway.networking.k8s.io/channel: standard
creationTimestamp: null
name: referencegrants.gateway.networking.k8s.io
spec:
group: gateway.networking.k8s.io
names:
categories:
- gateway-api
kind: ReferenceGrant
listKind: ReferenceGrantList
plural: referencegrants
shortNames:
- refgrant
singular: referencegrant
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: The v1alpha2 version of ReferenceGrant has been deprecated
and will be removed in a future release of the API. Please upgrade to v1beta1.
name: v1alpha2
schema:
openAPIV3Schema:
description: |-
ReferenceGrant identifies kinds of resources in other namespaces that are
trusted to reference the specified kinds of resources in the same namespace
as the policy.
Each ReferenceGrant can be used to represent a unique trust relationship.
Additional Reference Grants can be used to add to the set of trusted
sources of inbound references for the namespace they are defined within.
A ReferenceGrant is required for all cross-namespace references in Gateway API
(with the exception of cross-namespace Route-Gateway attachment, which is
governed by the AllowedRoutes configuration on the Gateway, and cross-namespace
Service ParentRefs on a "consumer" mesh Route, which defines routing rules
applicable only to workloads in the Route namespace). ReferenceGrants allowing
a reference from a Route to a Service are only applicable to BackendRefs.
ReferenceGrant is a form of runtime verification allowing users to assert
which cross-namespace object references are permitted. Implementations that
support ReferenceGrant MUST NOT permit cross-namespace references which have
no grant, and MUST respond to the removal of a grant by revoking the access
that the grant allowed.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of ReferenceGrant.
properties:
from:
description: |-
From describes the trusted namespaces and kinds that can reference the
resources described in "To". Each entry in this list MUST be considered
to be an additional place that references can be valid from, or to put
this another way, entries MUST be combined using OR.
Support: Core
items:
description: ReferenceGrantFrom describes trusted namespaces and
kinds.
properties:
group:
description: |-
Group is the group of the referent.
When empty, the Kubernetes core API group is inferred.
Support: Core
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: |-
Kind is the kind of the referent. Although implementations may support
additional resources, the following types are part of the "Core"
support level for this field.
When used to permit a SecretObjectReference:
* Gateway
When used to permit a BackendObjectReference:
* GRPCRoute
* HTTPRoute
* TCPRoute
* TLSRoute
* UDPRoute
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
namespace:
description: |-
Namespace is the namespace of the referent.
Support: Core
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- namespace
type: object
maxItems: 16
minItems: 1
type: array
to:
description: |-
To describes the resources that may be referenced by the resources
described in "From". Each entry in this list MUST be considered to be an
additional place that references can be valid to, or to put this another
way, entries MUST be combined using OR.
Support: Core
items:
description: |-
ReferenceGrantTo describes what Kinds are allowed as targets of the
references.
properties:
group:
description: |-
Group is the group of the referent.
When empty, the Kubernetes core API group is inferred.
Support: Core
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: |-
Kind is the kind of the referent. Although implementations may support
additional resources, the following types are part of the "Core"
support level for this field:
* Secret when used to permit a SecretObjectReference
* Service when used to permit a BackendObjectReference
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: |-
Name is the name of the referent. When unspecified, this policy
refers to all resources of the specified Group and Kind in the local
namespace.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
type: object
maxItems: 16
minItems: 1
type: array
required:
- from
- to
type: object
type: object
served: false
storage: false
subresources: {}
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: |-
ReferenceGrant identifies kinds of resources in other namespaces that are
trusted to reference the specified kinds of resources in the same namespace
as the policy.
Each ReferenceGrant can be used to represent a unique trust relationship.
Additional Reference Grants can be used to add to the set of trusted
sources of inbound references for the namespace they are defined within.
All cross-namespace references in Gateway API (with the exception of cross-namespace
Gateway-route attachment) require a ReferenceGrant.
ReferenceGrant is a form of runtime verification allowing users to assert
which cross-namespace object references are permitted. Implementations that
support ReferenceGrant MUST NOT permit cross-namespace references which have
no grant, and MUST respond to the removal of a grant by revoking the access
that the grant allowed.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of ReferenceGrant.
properties:
from:
description: |-
From describes the trusted namespaces and kinds that can reference the
resources described in "To". Each entry in this list MUST be considered
to be an additional place that references can be valid from, or to put
this another way, entries MUST be combined using OR.
Support: Core
items:
description: ReferenceGrantFrom describes trusted namespaces and
kinds.
properties:
group:
description: |-
Group is the group of the referent.
When empty, the Kubernetes core API group is inferred.
Support: Core
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: |-
Kind is the kind of the referent. Although implementations may support
additional resources, the following types are part of the "Core"
support level for this field.
When used to permit a SecretObjectReference:
* Gateway
When used to permit a BackendObjectReference:
* GRPCRoute
* HTTPRoute
* TCPRoute
* TLSRoute
* UDPRoute
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
namespace:
description: |-
Namespace is the namespace of the referent.
Support: Core
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- namespace
type: object
maxItems: 16
minItems: 1
type: array
to:
description: |-
To describes the resources that may be referenced by the resources
described in "From". Each entry in this list MUST be considered to be an
additional place that references can be valid to, or to put this another
way, entries MUST be combined using OR.
Support: Core
items:
description: |-
ReferenceGrantTo describes what Kinds are allowed as targets of the
references.
properties:
group:
description: |-
Group is the group of the referent.
When empty, the Kubernetes core API group is inferred.
Support: Core
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: |-
Kind is the kind of the referent. Although implementations may support
additional resources, the following types are part of the "Core"
support level for this field:
* Secret when used to permit a SecretObjectReference
* Service when used to permit a BackendObjectReference
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: |-
Name is the name of the referent. When unspecified, this policy
refers to all resources of the specified Group and Kind in the local
namespace.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
type: object
maxItems: 16
minItems: 1
type: array
required:
- from
- to
type: object
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: null
storedVersions: null

View File

@@ -0,0 +1,11 @@
package holos
// Manage on every Cluster in the Platform
for Fleet in #Fleets {
for Cluster in Fleet.clusters {
#Platform: Components: "\(Cluster.name)/gateway-api": {
path: "components/gateway-api"
cluster: Cluster.name
}
}
}

View File

@@ -0,0 +1,5 @@
{
"name": "gateway-api",
"short": "gateway api custom resource definitions",
"long": "Gateway API represents the next generation of Kubernetes Ingress, Load Balancing, and Service Mesh APIs."
}

View File

@@ -0,0 +1,11 @@
package holos
// If you are using k3d with the default Flannel CNI, you must append some
// values to your installation command, as k3d uses nonstandard locations for
// CNI configuration and binaries.
//
// See https://istio.io/latest/docs/ambient/install/platform-prerequisites/#k3d
#Istio: Values: cni: {
cniConfDir: "/var/lib/rancher/k3s/agent/etc/cni/net.d"
cniBinDir: "/bin"
}

View File

@@ -0,0 +1,5 @@
{
"name": "istio-k3d",
"short": "configure istio for the k3d flannel cni",
"long": "If you are using k3d with the default Flannel CNI, you must append some values to your installation command, as k3d uses nonstandard locations for CNI configuration and binaries. Refer to https://istio.io/latest/docs/ambient/install/platform-prerequisites/#k3d"
}

View File

@@ -0,0 +1,17 @@
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istio-base"
Version: #Istio.Version
Namespace: #Istio.System.Namespace
Chart: chart: name: "base"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
Values: #Istio.Values
}

View File

@@ -0,0 +1,48 @@
package holos
// imported from the 1.23.1 base chart
// cue import components/istio/base/vendor/base/values.yaml
#Istio: Values: {
// "defaults" is a workaround for Helm limitations. Users should NOT set ".defaults" explicitly, but rather directly set the fields internally.
// For instance, instead of `--set defaults.foo=bar`, just set `--set foo=bar`.
defaults: {
global: {
// ImagePullSecrets for control plane ServiceAccount, list of secrets in the same namespace
// to use for pulling any images in pods that reference this ServiceAccount.
// Must be set for any cluster configured with private docker registry.
imagePullSecrets: []
// Used to locate istiod.
istioNamespace: "istio-system"
externalIstiod: false
remotePilotAddress: ""
// Platform where Istio is deployed. Possible values are: "openshift", "gcp".
// An empty value means it is a vanilla Kubernetes distribution, therefore no special
// treatment will be considered.
platform: ""
// Setup how istiod Service is configured. See https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
// This is intended only for use with external istiod.
ipFamilyPolicy: ""
ipFamilies: []
}
base: {
// Used for helm2 to add the CRDs to templates.
enableCRDTemplates: false
// Validation webhook configuration url
// For example: https://$remotePilotAddress:15017/validate
validationURL: ""
// Validation webhook caBundle value. Useful when running pilot with a well known cert
validationCABundle: ""
// For istioctl usage to disable istio config crds in base
enableIstioConfigCRDs: true
}
defaultRevision: "default"
experimental: stableValidationPolicy: false
}
}

View File

@@ -0,0 +1,17 @@
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istio-cni"
Version: #Istio.Version
Namespace: #Istio.System.Namespace
Chart: chart: name: "cni"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
Values: #Istio.Values
}

View File

@@ -0,0 +1,148 @@
package holos
// imported from the 1.23.1 cni chart
// cue import components/istio/cni/vendor/cni/values.yaml
#Istio: Values: {
// "defaults" is a workaround for Helm limitations. Users should NOT set ".defaults" explicitly, but rather directly set the fields internally.
// For instance, instead of `--set defaults.foo=bar`, just set `--set foo=bar`.
defaults: {
cni: {
hub: ""
tag: ""
variant: ""
image: "install-cni"
pullPolicy: ""
// Same as `global.logging.level`, but will override it if set
logging: {
level: ""
}
// Configuration file to insert istio-cni plugin configuration
// by default this will be the first file found in the cni-conf-dir
// Example
// cniConfFileName: 10-calico.conflist
// CNI bin and conf dir override settings
// defaults:
cniBinDir: "" // Auto-detected based on version; defaults to /opt/cni/bin.
cniConfDir: "/etc/cni/net.d"
cniConfFileName: ""
// This directory must exist on the node, if it does not, consult your container runtime
// documentation for the appropriate path.
cniNetnsDir: null // Defaults to '/var/run/netns', in minikube/docker/others can be '/var/run/docker/netns'.
excludeNamespaces: ["kube-system"]
// Allows user to set custom affinity for the DaemonSet
affinity: {}
// Custom annotations on pod level, if you need them
podAnnotations: {}
// Deploy the config files as plugin chain (value "true") or as standalone files in the conf dir (value "false")?
// Some k8s flavors (e.g. OpenShift) do not support the chain approach, set to false if this is the case
chained: true
// Custom configuration happens based on the CNI provider.
// Possible values: "default", "multus"
provider: "default"
// Configure ambient settings
ambient: {
// If enabled, ambient redirection will be enabled
enabled: false
// Set ambient config dir path: defaults to /etc/ambient-config
configDir: ""
// If enabled, and ambient is enabled, DNS redirection will be enabled
dnsCapture: false
// If enabled, and ambient is enabled, enables ipv6 support
ipv6: true
}
repair: {
enabled: true
hub: ""
tag: ""
// Repair controller has 3 modes. Pick which one meets your use cases. Note only one may be used.
// This defines the action the controller will take when a pod is detected as broken.
// labelPods will label all pods with <brokenPodLabelKey>=<brokenPodLabelValue>.
// This is only capable of identifying broken pods; the user is responsible for fixing them (generally, by deleting them).
// Note this gives the DaemonSet a relatively high privilege, as modifying pod metadata/status can have wider impacts.
labelPods: false
// deletePods will delete any broken pod. These will then be rescheduled, hopefully onto a node that is fully ready.
// Note this gives the DaemonSet a relatively high privilege, as it can delete any Pod.
deletePods: false
// repairPods will dynamically repair any broken pod by setting up the pod networking configuration even after it has started.
// Note the pod will be crashlooping, so this may take a few minutes to become fully functional based on when the retry occurs.
// This requires no RBAC privilege, but does require `securityContext.privileged/CAP_SYS_ADMIN`.
repairPods: true
initContainerName: "istio-validation"
brokenPodLabelKey: "cni.istio.io/uninitialized"
brokenPodLabelValue: "true"
}
// Set to `type: RuntimeDefault` to use the default profile if available.
seccompProfile: {}
resources: requests: {
cpu: "100m"
memory: "100Mi"
}
resourceQuotas: {
enabled: false
pods: 5000
}
// The number of pods that can be unavailable during rolling update (see
// `updateStrategy.rollingUpdate.maxUnavailable` here:
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec).
// May be specified as a number of pods or as a percent of the total number
// of pods at the start of the update.
rollingMaxUnavailable: 1
}
// Revision is set as 'version' label and part of the resource names when installing multiple control planes.
revision: ""
// For Helm compatibility.
ownerName: ""
global: {
// Default hub for Istio images.
// Releases are published to docker hub under 'istio' project.
// Dev builds from prow are on gcr.io
hub: "docker.io/istio"
// Default tag for Istio images.
tag: "1.23.1"
// Variant of the image to use.
// Currently supported are: [debug, distroless]
variant: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent.
imagePullPolicy: ""
// change cni scope level to control logging out of istio-cni-node DaemonSet
logging: {
level: "info"
}
logAsJson: false
// ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace
// to use for pulling any images in pods that reference this ServiceAccount.
// For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)
// ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.
// Must be set for any cluster configured with private docker registry.
// - private-registry-key
imagePullSecrets: []
// Default resources allocated
defaultResources: {
requests: {
cpu: "100m"
memory: "100Mi"
}
}
}
}
}

View File

@@ -0,0 +1,17 @@
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istio-gateway"
Version: #Istio.Version
Namespace: #Istio.Gateway.Namespace
Chart: chart: name: "gateway"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
Values: #Istio.Values
}

View File

@@ -0,0 +1,160 @@
package holos
// imported from the 1.23.1 gateway chart
// cue import components/istio/gateway/vendor/gateway/values.yaml
#Istio: Values: {
// "defaults" is a workaround for Helm limitations. Users should NOT set ".defaults" explicitly, but rather directly set the fields internally.
// For instance, instead of `--set defaults.foo=bar`, just set `--set foo=bar`.
defaults: {
// Name allows overriding the release name. Generally this should not be set
name: ""
// revision declares which revision this gateway is a part of
revision: ""
// Controls the spec.replicas setting for the Gateway deployment if set.
// Otherwise defaults to Kubernetes Deployment default (1).
replicaCount: null
kind: "Deployment"
rbac: {
// If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
// when using http://gateway-api.org/.
enabled: true
}
serviceAccount: {
// If set, a service account will be created. Otherwise, the default is used
create: true
// Annotations to add to the service account
annotations: {}
// The name of the service account to use.
// If not set, the release name is used
name: ""
}
podAnnotations: {
"prometheus.io/port": "15020"
"prometheus.io/scrape": "true"
"prometheus.io/path": "/stats/prometheus"
"inject.istio.io/templates": "gateway"
"sidecar.istio.io/inject": "true"
}
// Define the security context for the pod.
// If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
// On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: {}
containerSecurityContext: {}
service: {
// Type of service. Set to "None" to disable the service entirely
type: "LoadBalancer"
ports: [{
name: "status-port"
port: 15021
protocol: "TCP"
targetPort: 15021
}, {
name: "http2"
port: 80
protocol: "TCP"
targetPort: 80
}, {
name: "https"
port: 443
protocol: "TCP"
targetPort: 443
}]
annotations: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
ipFamilyPolicy: ""
//# Whether to automatically allocate NodePorts (only for LoadBalancers).
// allocateLoadBalancerNodePorts: false
ipFamilies: []
}
resources: {
requests: {
cpu: "100m"
memory: "128Mi"
}
limits: {
cpu: "2000m"
memory: "1024Mi"
}
}
autoscaling: {
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: {}
autoscaleBehavior: {}
}
// Pod environment variables
env: {}
// Labels to apply to all resources
labels: {}
// Annotations to apply to all resources
annotations: {}
nodeSelector: {}
tolerations: []
topologySpreadConstraints: []
affinity: {}
// If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
imagePullSecrets: []
// This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
//
// By default, the `podDisruptionBudget` is disabled (set to `{}`),
// which means that no PodDisruptionBudget resource will be created.
//
// To enable the PodDisruptionBudget, configure it by specifying the
// `minAvailable` or `maxUnavailable`. For example, to set the
// minimum number of available replicas to 1, you can update this value as follows:
//
// podDisruptionBudget:
// minAvailable: 1
//
// Or, to allow a maximum of 1 unavailable replica, you can set:
//
// podDisruptionBudget:
// maxUnavailable: 1
//
// You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
// For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
//
// podDisruptionBudget:
// minAvailable: 1
// unhealthyPodEvictionPolicy: AlwaysAllow
//
// To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
//
// podDisruptionBudget: {}
//
podDisruptionBudget: {}
terminationGracePeriodSeconds: 30
// A list of `Volumes` added into the Gateway Pods. See
// https://kubernetes.io/docs/concepts/storage/volumes/.
volumes: []
// A list of `VolumeMounts` added into the Gateway Pods. See
// https://kubernetes.io/docs/concepts/storage/volumes/.
volumeMounts: []
// Configure this to a higher priority class in order to make sure your Istio gateway pods
// will not be killed because of low priority class.
// Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
// for more detail.
priorityClassName: ""
}
}

View File

@@ -0,0 +1,17 @@
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istiod"
Version: #Istio.Version
Namespace: #Istio.System.Namespace
Chart: chart: name: "istiod"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
Values: #Istio.Values
}

View File

@@ -0,0 +1,539 @@
package holos
// imported from the 1.23.1 istiod chart
// cue import components/istio/istiod/vendor/istiod/values.yaml
#Istio: Values: {
// "defaults" is a workaround for Helm limitations. Users should NOT set ".defaults" explicitly, but rather directly set the fields internally.
// For instance, instead of `--set defaults.foo=bar`, just set `--set foo=bar`.
defaults: {
//.Values.pilot for discovery and mesh wide config
//# Discovery Settings
pilot: {
autoscaleEnabled: true
autoscaleMin: 1
autoscaleMax: 5
autoscaleBehavior: {}
replicaCount: 1
rollingMaxSurge: "100%"
rollingMaxUnavailable: "25%"
hub: ""
tag: ""
variant: ""
// Can be a full hub/image:tag
image: "pilot"
traceSampling: 1.0
// Resources for a small pilot install
resources: {
requests: {
cpu: "500m"
memory: "2048Mi"
}
}
// Set to `type: RuntimeDefault` to use the default profile if available.
seccompProfile: {}
// Whether to use an existing CNI installation
cni: {
enabled: false
provider: "default"
}
// Additional container arguments
extraContainerArgs: []
env: {}
// Settings related to the untaint controller
// This controller will remove `cni.istio.io/not-ready` from nodes when the istio-cni pod becomes ready
// It should be noted that cluster operator/owner is responsible for having the taint set by their infrastructure provider when new nodes are added to the cluster; the untaint controller does not taint nodes
taint: {
// Controls whether or not the untaint controller is active
enabled: false
// What namespace the untaint controller should watch for istio-cni pods. This is only required when istio-cni is running in a different namespace than istiod
namespace: ""
}
affinity: {}
tolerations: []
cpu: targetAverageUtilization: 80
// targetAverageUtilization: 80
memory: {}
// Additional volumeMounts to the istiod container
volumeMounts: []
// Additional volumes to the istiod pod
volumes: []
nodeSelector: {}
podAnnotations: {}
serviceAnnotations: {}
serviceAccountAnnotations: {}
topologySpreadConstraints: []
// You can use jwksResolverExtraRootCA to provide a root certificate
// in PEM format. This will then be trusted by pilot when resolving
// JWKS URIs.
jwksResolverExtraRootCA: ""
// The following is used to limit how long a sidecar can be connected
// to a pilot. It balances out load across pilot instances at the cost of
// increasing system churn.
keepaliveMaxServerConnectionAge: "30m"
// Additional labels to apply to the deployment.
deploymentLabels: {}
//# Mesh config settings
// Install the mesh config map, generated from values.yaml.
// If false, pilot wil use default values (by default) or user-supplied values.
configMap: true
// Additional labels to apply on the pod level for monitoring and logging configuration.
podLabels: {}
// Setup how istiod Service is configured. See https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
ipFamilyPolicy: ""
ipFamilies: []
// Ambient mode only.
// Set this if you install ztunnel to a different namespace from `istiod`.
// If set, `istiod` will allow connections from trusted node proxy ztunnels
// in the provided namespace.
// If unset, `istiod` will assume the trusted node proxy ztunnel resides
// in the same namespace as itself.
trustedZtunnelNamespace: ""
}
sidecarInjectorWebhook: {
// You can use the field called alwaysInjectSelector and neverInjectSelector which will always inject the sidecar or
// always skip the injection on pods that match that label selector, regardless of the global policy.
// See https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#more-control-adding-exceptions
neverInjectSelector: []
alwaysInjectSelector: []
// injectedAnnotations are additional annotations that will be added to the pod spec after injection
// This is primarily to support PSP annotations. For example, if you defined a PSP with the annotations:
//
// annotations:
// apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
// apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
//
// The PSP controller would add corresponding annotations to the pod spec for each container. However, this happens before
// the inject adds additional containers, so we must specify them explicitly here. With the above example, we could specify:
// injectedAnnotations:
// container.apparmor.security.beta.kubernetes.io/istio-init: runtime/default
// container.apparmor.security.beta.kubernetes.io/istio-proxy: runtime/default
injectedAnnotations: {}
// This enables injection of sidecar in all namespaces,
// with the exception of namespaces with "istio-injection:disabled" annotation
// Only one environment should have this enabled.
enableNamespacesByDefault: false
// Mutations that occur after the sidecar injector are not handled by default, as the Istio sidecar injector is only run
// once. For example, an OPA sidecar injected after the Istio sidecar will not have it's liveness/readiness probes rewritten.
// Setting this to `IfNeeded` will result in the sidecar injector being run again if additional mutations occur.
reinvocationPolicy: "Never"
rewriteAppHTTPProbe: true
// Templates defines a set of custom injection templates that can be used. For example, defining:
//
// templates:
// hello: |
// metadata:
// labels:
// hello: world
//
// Then starting a pod with the `inject.istio.io/templates: hello` annotation, will result in the pod
// being injected with the hello=world labels.
// This is intended for advanced configuration only; most users should use the built in template
templates: {}
// Default templates specifies a set of default templates that are used in sidecar injection.
// By default, a template `sidecar` is always provided, which contains the template of default sidecar.
// To inject other additional templates, define it using the `templates` option, and add it to
// the default templates list.
// For example:
//
// templates:
// hello: |
// metadata:
// labels:
// hello: world
//
// defaultTemplates: ["sidecar", "hello"]
defaultTemplates: []
}
istiodRemote: {
// Sidecar injector mutating webhook configuration clientConfig.url value.
// For example: https://$remotePilotAddress:15017/inject
// The host should not refer to a service running in the cluster; use a service reference by specifying
// the clientConfig.service field instead.
injectionURL: ""
// Sidecar injector mutating webhook configuration path value for the clientConfig.service field.
// Override to pass env variables, for example: /inject/cluster/remote/net/network2
injectionPath: "/inject"
injectionCABundle: ""
}
telemetry: {
enabled: true
v2: {
// For Null VM case now.
// This also enables metadata exchange.
enabled: true
// Indicate if prometheus stats filter is enabled or not
prometheus: {
enabled: true
}
// stackdriver filter settings.
stackdriver: {
enabled: false
}
}
}
// Revision is set as 'version' label and part of the resource names when installing multiple control planes.
revision: ""
// Revision tags are aliases to Istio control plane revisions
revisionTags: []
// For Helm compatibility.
ownerName: ""
// meshConfig defines runtime configuration of components, including Istiod and istio-agent behavior
// See https://istio.io/docs/reference/config/istio.mesh.v1alpha1/ for all available options
meshConfig: {
enablePrometheusMerge: true
}
experimental: stableValidationPolicy: false
global: {
// Used to locate istiod.
istioNamespace: "istio-system"
// List of cert-signers to allow "approve" action in the istio cluster role
//
// certSigners:
// - clusterissuers.cert-manager.io/istio-ca
certSigners: []
// enable pod disruption budget for the control plane, which is used to
// ensure Istio control plane components are gradually upgraded or recovered.
defaultPodDisruptionBudget: {
// The values aren't mutable due to a current PodDisruptionBudget limitation
// minAvailable: 1
enabled: true
}
// A minimal set of requested resources to applied to all deployments so that
// Horizontal Pod Autoscaler will be able to function (if set).
// Each component can overwrite these default values by adding its own resources
// block in the relevant section below and setting the desired resources values.
defaultResources: {
// memory: 128Mi
// limits:
// cpu: 100m
// memory: 128Mi
requests: {
cpu: "10m"
}
}
// Default hub for Istio images.
// Releases are published to docker hub under 'istio' project.
// Dev builds from prow are on gcr.io
hub: "docker.io/istio"
// Default tag for Istio images.
tag: "1.23.1"
// Variant of the image to use.
// Currently supported are: [debug, distroless]
variant: ""
// Specify image pull policy if default behavior isn't desired.
// Default behavior: latest images will be Always else IfNotPresent.
imagePullPolicy: ""
// ImagePullSecrets for all ServiceAccount, list of secrets in the same namespace
// to use for pulling any images in pods that reference this ServiceAccount.
// For components that don't use ServiceAccounts (i.e. grafana, servicegraph, tracing)
// ImagePullSecrets will be added to the corresponding Deployment(StatefulSet) objects.
// Must be set for any cluster configured with private docker registry.
// - private-registry-key
imagePullSecrets: []
// Enabled by default in master for maximising testing.
istiod: {
enableAnalysis: false
}
// To output all istio components logs in json format by adding --log_as_json argument to each container argument
logAsJson: false
// Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>
// The control plane has different scopes depending on component, but can configure default log level across all components
// If empty, default scope and level will be used as configured in code
logging: {
level: "default:info"
}
omitSidecarInjectorConfigMap: false
// Configure whether Operator manages webhook configurations. The current behavior
// of Istiod is to manage its own webhook configurations.
// When this option is set as true, Istio Operator, instead of webhooks, manages the
// webhook configurations. When this option is set as false, webhooks manage their
// own webhook configurations.
operatorManageWebhooks: false
// Custom DNS config for the pod to resolve names of services in other
// clusters. Use this to add additional search domains, and other settings.
// see
// https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config
// This does not apply to gateway pods as they typically need a different
// set of DNS settings than the normal application pods (e.g., in
// multicluster scenarios).
// NOTE: If using templates, follow the pattern in the commented example below.
//podDNSSearchNamespaces:
//- global
// Kubernetes >=v1.11.0 will create two PriorityClass, including system-cluster-critical and
// system-node-critical, it is better to configure this in order to make sure your Istio pods
// will not be killed because of low priority class.
// Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
// for more detail.
priorityClassName: ""
proxy: {
image: "proxyv2"
// This controls the 'policy' in the sidecar injector.
autoInject: "enabled"
// CAUTION: It is important to ensure that all Istio helm charts specify the same clusterDomain value
// cluster domain. Default value is "cluster.local".
clusterDomain: "cluster.local"
// Per Component log level for proxy, applies to gateways and sidecars. If a component level is
// not set, then the global "logLevel" will be used.
componentLogLevel: "misc:error"
// If set, newly injected sidecars will have core dumps enabled.
enableCoreDump: false
// istio ingress capture allowlist
// examples:
// Redirect only selected ports: --includeInboundPorts="80,8080"
excludeInboundPorts: ""
includeInboundPorts: "*"
// istio egress capture allowlist
// https://istio.io/docs/tasks/traffic-management/egress.html#calling-external-services-directly
// example: includeIPRanges: "172.30.0.0/16,172.20.0.0/16"
// would only capture egress traffic on those two IP Ranges, all other outbound traffic would
// be allowed by the sidecar
includeIPRanges: "*"
excludeIPRanges: ""
includeOutboundPorts: ""
excludeOutboundPorts: ""
// Log level for proxy, applies to gateways and sidecars.
// Expected values are: trace|debug|info|warning|error|critical|off
logLevel: "warning"
// Specify the path to the outlier event log.
// Example: /dev/stdout
outlierLogPath: ""
//If set to true, istio-proxy container will have privileged securityContext
privileged: false
// The number of successive failed probes before indicating readiness failure.
readinessFailureThreshold: 4
// The initial delay for readiness probes in seconds.
readinessInitialDelaySeconds: 0
// The period between readiness probes.
readinessPeriodSeconds: 15
// Enables or disables a startup probe.
// For optimal startup times, changing this should be tied to the readiness probe values.
//
// If the probe is enabled, it is recommended to have delay=0s,period=15s,failureThreshold=4.
// This ensures the pod is marked ready immediately after the startup probe passes (which has a 1s poll interval),
// and doesn't spam the readiness endpoint too much
//
// If the probe is disabled, it is recommended to have delay=1s,period=2s,failureThreshold=30.
// This ensures the startup is reasonable fast (polling every 2s). 1s delay is used since the startup is not often ready instantly.
startupProbe: {
enabled: true
failureThreshold: 600 // 10 minutes
}
// Resources for the sidecar.
resources: {
requests: {
cpu: "100m"
memory: "128Mi"
}
limits: {
cpu: "2000m"
memory: "1024Mi"
}
}
// Default port for Pilot agent health checks. A value of 0 will disable health checking.
statusPort: 15020
// Specify which tracer to use. One of: zipkin, lightstep, datadog, stackdriver, none.
// If using stackdriver tracer outside GCP, set env GOOGLE_APPLICATION_CREDENTIALS to the GCP credential file.
tracer: "none"
}
proxy_init: {
// Base name for the proxy_init container, used to configure iptables.
image: "proxyv2"
}
// configure remote pilot and istiod service and endpoint
remotePilotAddress: ""
//#############################################################################################
// The following values are found in other charts. To effectively modify these values, make #
// make sure they are consistent across your Istio helm charts #
//#############################################################################################
// The customized CA address to retrieve certificates for the pods in the cluster.
// CSR clients such as the Istio Agent and ingress gateways can use this to specify the CA endpoint.
// If not set explicitly, default to the Istio discovery address.
caAddress: ""
// Configure a remote cluster data plane controlled by an external istiod.
// When set to true, istiod is not deployed locally and only a subset of the other
// discovery charts are enabled.
externalIstiod: false
// Configure a remote cluster as the config cluster for an external istiod.
configCluster: false
// configValidation enables the validation webhook for Istio configuration.
configValidation: true
// Mesh ID means Mesh Identifier. It should be unique within the scope where
// meshes will interact with each other, but it is not required to be
// globally/universally unique. For example, if any of the following are true,
// then two meshes must have different Mesh IDs:
// - Meshes will have their telemetry aggregated in one place
// - Meshes will be federated together
// - Policy will be written referencing one mesh from the other
//
// If an administrator expects that any of these conditions may become true in
// the future, they should ensure their meshes have different Mesh IDs
// assigned.
//
// Within a multicluster mesh, each cluster must be (manually or auto)
// configured to have the same Mesh ID value. If an existing cluster 'joins' a
// multicluster mesh, it will need to be migrated to the new mesh ID. Details
// of migration TBD, and it may be a disruptive operation to change the Mesh
// ID post-install.
//
// If the mesh admin does not specify a value, Istio will use the value of the
// mesh's Trust Domain. The best practice is to select a proper Trust Domain
// value.
meshID: ""
// Configure the mesh networks to be used by the Split Horizon EDS.
//
// The following example defines two networks with different endpoints association methods.
// For `network1` all endpoints that their IP belongs to the provided CIDR range will be
// mapped to network1. The gateway for this network example is specified by its public IP
// address and port.
// The second network, `network2`, in this example is defined differently with all endpoints
// retrieved through the specified Multi-Cluster registry being mapped to network2. The
// gateway is also defined differently with the name of the gateway service on the remote
// cluster. The public IP for the gateway will be determined from that remote service (only
// LoadBalancer gateway service type is currently supported, for a NodePort type gateway service,
// it still need to be configured manually).
//
// meshNetworks:
// network1:
// endpoints:
// - fromCidr: "192.168.0.1/24"
// gateways:
// - address: 1.1.1.1
// port: 80
// network2:
// endpoints:
// - fromRegistry: reg1
// gateways:
// - registryServiceName: istio-ingressgateway.istio-system.svc.cluster.local
// port: 443
//
meshNetworks: {}
// Use the user-specified, secret volume mounted key and certs for Pilot and workloads.
mountMtlsCerts: false
multiCluster: {
// Set to true to connect two kubernetes clusters via their respective
// ingressgateway services when pods in each cluster cannot directly
// talk to one another. All clusters should be using Istio mTLS and must
// have a shared root CA for this model to work.
enabled: false
// Should be set to the name of the cluster this installation will run in. This is required for sidecar injection
// to properly label proxies
clusterName: ""
}
// Network defines the network this cluster belong to. This name
// corresponds to the networks in the map of mesh networks.
network: ""
// Configure the certificate provider for control plane communication.
// Currently, two providers are supported: "kubernetes" and "istiod".
// As some platforms may not have kubernetes signing APIs,
// Istiod is the default
pilotCertProvider: "istiod"
sds: {
// The JWT token for SDS and the aud field of such JWT. See RFC 7519, section 4.1.3.
// When a CSR is sent from Istio Agent to the CA (e.g. Istiod), this aud is to make sure the
// JWT is intended for the CA.
token: {
aud: "istio-ca"
}
}
sts: {
// The service port used by Security Token Service (STS) server to handle token exchange requests.
// Setting this port to a non-zero value enables STS server.
servicePort: 0
}
// The name of the CA for workload certificates.
// For example, when caName=GkeWorkloadCertificate, GKE workload certificates
// will be used as the certificates for workloads.
// The default value is "" and when caName="", the CA will be configured by other
// mechanisms (e.g., environmental variable CA_PROVIDER).
caName: ""
// whether to use autoscaling/v2 template for HPA settings
// for internal usage only, not to be configured by users.
autoscalingv2API: true
}
base: {
// For istioctl usage to disable istio config crds in base
enableIstioConfigCRDs: true
}
// `istio_cni` has been deprecated and will be removed in a future release. use `pilot.cni` instead
istio_cni: {
// `chained` has been deprecated and will be removed in a future release. use `provider` instead
chained: true
provider: "default"
}
// Gateway Settings
gateways: {
// Define the security context for the pod.
// If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
// On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: {}
// Set to `type: RuntimeDefault` to use the default profile for templated gateways, if your container runtime supports it
seccompProfile: {}
}
}
}

View File

@@ -0,0 +1,107 @@
package holos
// imported from the 1.23.1 ztunnel chart
// cue import components/istio/ztunnel/vendor/ztunnel/values.yaml
#Istio: Values: {
// "defaults" is a workaround for Helm limitations. Users should NOT set ".defaults" explicitly, but rather directly set the fields internally.
// For instance, instead of `--set defaults.foo=bar`, just set `--set foo=bar`.
defaults: {
// Hub to pull from. Image will be `Hub/Image:Tag-Variant`
hub: "docker.io/istio"
// Tag to pull from. Image will be `Hub/Image:Tag-Variant`
tag: "1.23.1"
// Variant to pull. Options are "debug" or "distroless". Unset will use the default for the given version.
variant: ""
// Image name to pull from. Image will be `Hub/Image:Tag-Variant`
// If Image contains a "/", it will replace the entire `image` in the pod.
image: "ztunnel"
// Labels to apply to all top level resources
labels: {}
// Annotations to apply to all top level resources
annotations: {}
// Additional volumeMounts to the ztunnel container
volumeMounts: []
// Additional volumes to the ztunnel pod
volumes: []
// Annotations added to each pod. The default annotations are required for scraping prometheus (in most environments).
podAnnotations: {
"prometheus.io/port": "15020"
"prometheus.io/scrape": "true"
}
// Additional labels to apply on the pod level
podLabels: {}
// Pod resource configuration
resources: {
requests: {
cpu: "200m"
// Ztunnel memory scales with the size of the cluster and traffic load
// While there are many factors, this is enough for ~200k pod cluster or 100k concurrently open connections.
memory: "512Mi"
}
}
// List of secret names to add to the service account as image pull secrets
imagePullSecrets: []
// A `key: value` mapping of environment variables to add to the pod
env: {}
// Override for the pod imagePullPolicy
imagePullPolicy: ""
// Settings for multicluster
multiCluster: {
// The name of the cluster we are installing in. Note this is a user-defined name, which must be consistent
// with Istiod configuration.
clusterName: ""
}
// meshConfig defines runtime configuration of components.
// For ztunnel, only defaultConfig is used, but this is nested under `meshConfig` for consistency with other
// components.
// TODO: https://github.com/istio/istio/issues/43248
meshConfig: {
defaultConfig: proxyMetadata: {}
}
// This value defines:
// 1. how many seconds kube waits for ztunnel pod to gracefully exit before forcibly terminating it (this value)
// 2. how many seconds ztunnel waits to drain its own connections (this value - 1 sec)
// Default K8S value is 30 seconds
terminationGracePeriodSeconds: 30
// Revision is set as 'version' label and part of the resource names when installing multiple control planes.
// Used to locate the XDS and CA, if caAddress or xdsAddress are not set explicitly.
revision: ""
// The customized CA address to retrieve certificates for the pods in the cluster.
// CSR clients such as the Istio Agent and ingress gateways can use this to specify the CA endpoint.
caAddress: ""
// The customized XDS address to retrieve configuration.
// This should include the port - 15012 for Istiod. TLS will be used with the certificates in "istiod-ca-cert" secret.
// By default, it is istiod.istio-system.svc:15012 if revision is not set, or istiod-<revision>.<istioNamespace>.svc:15012
xdsAddress: ""
// Used to locate the XDS and CA, if caAddress or xdsAddress are not set.
istioNamespace: "istio-system"
// Configuration log level of ztunnel binary, default is info.
// Valid values are: trace, debug, info, warn, error
logLevel: "info"
// Set to `type: RuntimeDefault` to use the default profile if available.
// TODO Ambient inpod - for OpenShift, set to the following to get writable sockets in hostmounts to work, eventually consider CSI driver instead
//seLinuxOptions:
// type: spc_t
seLinuxOptions: {}
}
}

View File

@@ -0,0 +1,17 @@
package holos
// Produce a helm chart build plan.
(#Helm & Chart).Output
let Chart = {
Name: "istio-ztunnel"
Version: #Istio.Version
Namespace: #Istio.System.Namespace
Chart: chart: name: "ztunnel"
Repo: name: "istio"
Repo: url: "https://istio-release.storage.googleapis.com/charts"
Values: #Istio.Values
}

View File

@@ -0,0 +1,45 @@
package holos
// #Istio represents platform wide configuration
#Istio: {
Version: "1.23.1"
System: Namespace: "istio-system"
Gateway: Namespace: "istio-ingress"
// Constrain Helm values for safer, easier upgrades and consistency across
// platform components.
Values: global: istioNamespace: System.Namespace
// Configure ambient mode
Values: profile: "ambient"
}
// Register the Namespaces
#Namespaces: (#Istio.System.Namespace): _
#Namespaces: (#Istio.Gateway.Namespace): _
// Manage istio on workload clusters
for Cluster in #Fleets.workload.clusters {
#Platform: Components: {
"\(Cluster.name)/istio-base": {
path: "components/istio/base"
cluster: Cluster.name
}
"\(Cluster.name)/istiod": {
path: "components/istio/istiod"
cluster: Cluster.name
}
"\(Cluster.name)/istio-cni": {
path: "components/istio/cni"
cluster: Cluster.name
}
"\(Cluster.name)/istio-ztunnel": {
path: "components/istio/ztunnel"
cluster: Cluster.name
}
"\(Cluster.name)/istio-gateway": {
path: "components/istio/gateway"
cluster: Cluster.name
}
}
}

View File

@@ -0,0 +1,5 @@
{
"name": "istio",
"short": "istio service mesh",
"long": "Easily build cloud native workloads securely and reliably with Istio."
}

View File

@@ -0,0 +1,9 @@
package holos
let Objects = {
Name: "namespaces"
Resources: Namespace: #Namespaces
}
// Produce a kubernetes objects build plan.
(#Kubernetes & Objects).Output

View File

@@ -0,0 +1,21 @@
package holos
import corev1 "k8s.io/api/core/v1"
// #Namespaces defines all managed namespaces in the Platform.
// Holos adopts the sig-multicluster position of namespace sameness.
#Namespaces: {
[Name=string]: corev1.#Namespace & {
metadata: name: Name
}
}
// Manage the Component on every Cluster in the Platform
for Fleet in #Fleets {
for Cluster in Fleet.clusters {
#Platform: Components: "\(Cluster.name)/namespaces": {
path: "components/namespaces"
cluster: Cluster.name
}
}
}

View File

@@ -0,0 +1,5 @@
{
"name": "namespaces",
"short": "manage namespaces consistently",
"long": "Provides the #Namespaces root struct for components to register with."
}

View File

@@ -0,0 +1,5 @@
{
"name": "workload-cluter",
"short": "define a workload cluster for the guides",
"long": "Define a workload cluster named workload for use with the documentation."
}

View File

@@ -0,0 +1,4 @@
package holos
// Manage a workload cluster named workload for use with the guides.
#Fleets: workload: clusters: workload: _

View File

@@ -17,7 +17,7 @@ import (
// from package core. Useful as a convenience wrapper to render a HelmChart
// with optional mix-in resources and Kustomization post-processing.
#Helm: {
// Name represents the chart name.
// Name represents the Component name.
Name: string
// Version represents the chart version.
@@ -27,7 +27,7 @@ import (
Namespace: string
// Resources are kubernetes api objects to mix into the output.
Resources: {...} & {...} @go(,map[string]any)
Resources: {...} @go(,map[string]any)
// Repo represents the chart repository
Repo: {
@@ -127,7 +127,7 @@ import (
workload: #Fleet & {name: "workload"} @go(Workload)
// Management represents a Fleet with one Cluster named management.
management: #Fleet & {name: "management", clusters: management: _} @go(Management)
management: #Fleet & {name: "management"} @go(Management)
}
// Platform is a convenience structure to produce a core Platform specification
@@ -149,3 +149,29 @@ import (
// and render each listed Component, injecting the Model.
Output: core.#Platform
}
// Kustomize provides a BuildPlan via the Output field which contains one
// KustomizeBuild from package core.
#Kustomize: {
// Name represents the Component name.
Name: string
// Kustomization represents the kustomize build plan for holos to render.
Kustomization: core.#KustomizeBuild
// Output represents the derived BuildPlan for the Holos cli to render.
Output: core.#BuildPlan
}
// Kubernetes provides a BuildPlan via the Output field which contains inline
// API Objects provided directly from CUE.
#Kubernetes: {
// Name represents the Component name.
Name: string
// Resources represents the kubernetes api objects for the Component.
Resources: {...} @go(,map[string]any)
// Output represents the derived BuildPlan for the Holos cli to render.
Output: core.#BuildPlan
}

View File

@@ -208,3 +208,26 @@ import (
Output: spec: model: Model
Output: spec: components: [for c in Components {c}]
}
#Kustomize: {
Name: _
Kustomization: metadata: name: string | *Name
Output: #BuildPlan & {
_Name: Name
spec: components: kustomizeBuildList: [Kustomization]
}
}
#Kubernetes: {
Name: _
Resources: #Resources
Output: #BuildPlan & {
_Name: Name
// resources is a map unlike other build plans which use a list.
spec: components: resources: (Name): {
metadata: name: Name
apiObjectMap: (#APIObjects & {apiObjects: Resources}).apiObjectMap
}
}
}

View File

@@ -6,6 +6,10 @@ import schema "github.com/holos-run/holos/api/schema/v1alpha3"
ArgoConfig: #ArgoConfig
}
#Kustomize: schema.#Kustomize
#Kubernetes: schema.#Kubernetes
#ArgoConfig: schema.#ArgoConfig & {
ClusterName: _ClusterName
}

View File

@@ -1 +1 @@
3
4