Compare commits

...

8 Commits

Author SHA1 Message Date
Jeff McCune
4de9f77fbf (#22) Add holos create secret --data-stdin flag
This patch enables quickly copying secrets from vault to the provisioner
cluster.  For example:

    vault kv get -format=json -field data kv/k2/kube-namespace/ceph-csi-rbd/csi-rbd-secret \
      | holos create secret --namespace ceph-system csi-rbd-secret --data-stdin --append-hash=false
2024-02-28 15:29:32 -08:00
Jeff McCune
4c5429b64a (#22) Ceph CSI for Metal clusters
This patch adds the ceph-csi-rbd helm chart component to the metal
cluster type.  The purpose is to enable PersistentVolumeClaims on ois
metal clusters.

Cloud clusters like GKE and EKS are expected to skip rendering the metal
type.

Helm values are handled with CUE.  The ceph secret is managed as an
ExternalSecret resource, appended to the rendered output by cue and the
holos cli.

Use:

    ❯ holos render --cluster-name=k2 ~/workspace/holos-run/holos/docs/examples/platforms/reference/clusters/metal/...
    2:45PM INF render.go:40 rendered prod-metal-ceph version=0.47.0 status=ok action=rendered name=prod-metal-ceph
2024-02-28 14:46:03 -08:00
Jeff McCune
ac5bff4b32 (#20) Error if secret is not found
Without this patch scripts incorrectly proceeded without detecting a
secret was not fetched.

    holos get secret notfound

    8:34AM ERR could not execute version=0.46.3 err="not found: notfound" loc=get.go:66
2024-02-28 08:33:55 -08:00
Jeff McCune
6090ab224e (#14) Validate secrets fetched from provisioner cluster
This patch validates secrets are synced from the provisioner cluster to
a workload cluster.  This verifies the eso-creds-refresher job, external
secrets operator, etc...

Refer to
0ae58858f5
for the corresponding commit on the k2 cluster.
2024-02-27 15:55:17 -08:00
Jeff McCune
10e140258d (#15) Report multiple cue errors
This patch prints out the cue file and line numbers when a cue error
contains multiple go errors to unwrap.

For example:

```
❯ holos render --cluster-name=k2 ~/workspace/holos-run/holos/docs/examples/platforms/reference/clusters/workload/...
3:31PM ERR could not execute version=0.46.0 err="could not decode: content: error in call to encoding/yaml.MarshalStream: incomplete value string (and 1 more errors)" loc=builder.go:212
content: error in call to encoding/yaml.MarshalStream: incomplete value string:
    /home/jeff/workspace/holos-run/holos/docs/examples/schema.cue:199:11
    /home/jeff/workspace/holos-run/holos/docs/examples/cue.mod/gen/external-secrets.io/externalsecret/v1beta1/types_gen.cue:83:14
```
2024-02-27 15:32:11 -08:00
Jeff McCune
40ac705f0d (#16) Add create secret --append-hash=false
So we can easily create secrets for use with ExternalSecret resources.
2024-02-27 12:04:00 -08:00
Jeff McCune
b4ad6425e5 (#14) Validate SecretStore works
This patch validates a SecretStore in the holos-system namespace works
after provisioner credentials are refreshed.
2024-02-27 11:25:00 -08:00
Jeff McCune
3343d226e5 (#14) Fix namespaces "external-secrets" not found
Needed for the `prod-secrets-eso` component to reconcile with flux.

NAME                                    REVISION                SUSPENDED       READY   MESSAGE
flux-system                             main@sha1:28b9ab6b      False           True    Applied revision: main@sha1:28b9ab6b
prod-secrets-eso                        main@sha1:28b9ab6b      False           True    Applied revision: main@sha1:28b9ab6b
prod-secrets-eso-creds-refresher        main@sha1:28b9ab6b      False           True    Applied revision: main@sha1:28b9ab6b
prod-secrets-namespaces                 main@sha1:28b9ab6b      False           True    Applied revision: main@sha1:28b9ab6b
2024-02-26 20:53:43 -08:00
30 changed files with 423 additions and 98 deletions

View File

@@ -0,0 +1,15 @@
# Want cue errors to show files and lines
! exec holos build .
stderr 'could not decode: content: cannot convert non-concrete value string'
stderr '/component.cue:6:1$'
-- cue.mod --
package holos
-- component.cue --
package holos
apiVersion: "holos.run/v1alpha1"
kind: "KubernetesObjects"
cluster: string @tag(cluster, string)
content: foo
foo: string

View File

@@ -922,7 +922,7 @@ import (
kubernetes?: {
// Auth configures how secret-manager authenticates with a
// Kubernetes instance.
auth: struct.MaxFields(1) & {
auth: {
// has both clientCert and clientKey as secretKeySelector
cert?: {
// A reference to a specific 'key' within a Secret resource,

View File

@@ -0,0 +1,29 @@
package holos
// Manage Ceph CSI to provide PersistentVolumeClaims to a cluster.
#TargetNamespace: "ceph-system"
#InputKeys: {
project: "metal"
service: "ceph"
component: "ceph"
}
#Kustomization: spec: {
dependsOn: [{name: "prod-secrets-namespaces"}]
targetNamespace: #TargetNamespace
}
#HelmChart & {
namespace: #TargetNamespace
chart: {
name: "ceph-csi-rbd"
version: "3.10.2"
repository: {
name: "ceph-csi"
url: "https://ceph.github.io/csi-charts"
}
}
objects: [#ExternalSecret & { _name: "ceph-csi-rbd" }]
}

View File

@@ -0,0 +1,177 @@
package holos
#Input: {
config : {
// (required) String representing a Ceph cluster to provision storage from.
// Should be unique across all Ceph clusters in use for provisioning,
// cannot be greater than 36 bytes in length, and should remain immutable for
// the lifetime of the StorageClass in use.
clusterID: string
// (required) []String list of ceph monitor "address:port" values.
monitors: [...string]
}
}
// Imported from https://github.com/holos-run/holos-infra/blob/0ae58858f5583d25fa7543e47b5f5e9f0b2f3c83/components/core/metal/ceph-csi-rbd/values.holos.yaml
#ChartValues: {
// Necessary for Talos see https://github.com/siderolabs/talos/discussions/8163
selinuxMount: false
csiConfig: [#Input.config]
storageClass: {
annotations: "storageclass.kubernetes.io/is-default-class": "true"
// Specifies whether the storageclass should be created
create: true
name: "ceph-ssd"
// (optional) Prefix to use for naming RBD images.
// If omitted, defaults to "csi-vol-".
// NOTE: Set this to a cluster specific value, e.g. vol-k1-
volumeNamePrefix: "vol-\(#InputKeys.cluster)-"
// (required) String representing a Ceph cluster to provision storage from.
// Should be unique across all Ceph clusters in use for provisioning,
// cannot be greater than 36 bytes in length, and should remain immutable for
// the lifetime of the StorageClass in use.
clusterID: #Input.config.clusterID
// (optional) If you want to use erasure coded pool with RBD, you need to
// create two pools. one erasure coded and one replicated.
// You need to specify the replicated pool here in the `pool` parameter, it is
// used for the metadata of the images.
// The erasure coded pool must be set as the `dataPool` parameter below.
// dataPool: <ec-data-pool>
dataPool: ""
// (required) Ceph pool into which the RBD image shall be created
// eg: pool: replicapool
pool: "k8s-dev"
// (optional) RBD image features, CSI creates image with image-format 2 CSI
// RBD currently supports `layering`, `journaling`, `exclusive-lock`,
// `object-map`, `fast-diff`, `deep-flatten` features.
// Refer https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#image-features
// for image feature dependencies.
// imageFeatures: layering,journaling,exclusive-lock,object-map,fast-diff
imageFeatures: "layering"
// (optional) Specifies whether to try other mounters in case if the current
// mounter fails to mount the rbd image for any reason. True means fallback
// to next mounter, default is set to false.
// Note: tryOtherMounters is currently useful to fallback from krbd to rbd-nbd
// in case if any of the specified imageFeatures is not supported by krbd
// driver on node scheduled for application pod launch, but in the future this
// should work with any mounter type.
// tryOtherMounters: false
// (optional) uncomment the following to use rbd-nbd as mounter
// on supported nodes
// mounter: rbd-nbd
mounter: ""
// (optional) ceph client log location, eg: rbd-nbd
// By default host-path /var/log/ceph of node is bind-mounted into
// csi-rbdplugin pod at /var/log/ceph mount path. This is to configure
// target bindmount path used inside container for ceph clients logging.
// See docs/rbd-nbd.md for available configuration options.
// cephLogDir: /var/log/ceph
cephLogDir: ""
// (optional) ceph client log strategy
// By default, log file belonging to a particular volume will be deleted
// on unmap, but you can choose to just compress instead of deleting it
// or even preserve the log file in text format as it is.
// Available options `remove` or `compress` or `preserve`
// cephLogStrategy: remove
cephLogStrategy: ""
// (optional) Instruct the plugin it has to encrypt the volume
// By default it is disabled. Valid values are "true" or "false".
// A string is expected here, i.e. "true", not true.
// encrypted: "true"
encrypted: ""
// (optional) Use external key management system for encryption passphrases by
// specifying a unique ID matching KMS ConfigMap. The ID is only used for
// correlation to configmap entry.
encryptionKMSID: ""
// Add topology constrained pools configuration, if topology based pools
// are setup, and topology constrained provisioning is required.
// For further information read TODO<doc>
// topologyConstrainedPools: |
// [{"poolName":"pool0",
// "dataPool":"ec-pool0" # optional, erasure-coded pool for data
// "domainSegments":[
// {"domainLabel":"region","value":"east"},
// {"domainLabel":"zone","value":"zone1"}]},
// {"poolName":"pool1",
// "dataPool":"ec-pool1" # optional, erasure-coded pool for data
// "domainSegments":[
// {"domainLabel":"region","value":"east"},
// {"domainLabel":"zone","value":"zone2"}]},
// {"poolName":"pool2",
// "dataPool":"ec-pool2" # optional, erasure-coded pool for data
// "domainSegments":[
// {"domainLabel":"region","value":"west"},
// {"domainLabel":"zone","value":"zone1"}]}
// ]
topologyConstrainedPools: []
// (optional) mapOptions is a comma-separated list of map options.
// For krbd options refer
// https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
// For nbd options refer
// https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
// Format:
// mapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
// An empty mounter field is treated as krbd type for compatibility.
// eg:
// mapOptions: "krbd:lock_on_read,queue_depth=1024;nbd:try-netlink"
mapOptions: ""
// (optional) unmapOptions is a comma-separated list of unmap options.
// For krbd options refer
// https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options
// For nbd options refer
// https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
// Format:
// unmapOptions: "<mounter>:op1,op2;<mounter>:op1,op2"
// An empty mounter field is treated as krbd type for compatibility.
// eg:
// unmapOptions: "krbd:force;nbd:force"
unmapOptions: ""
// The secrets have to contain Ceph credentials with required access
// to the 'pool'.
provisionerSecret: "csi-rbd-secret"
// If Namespaces are left empty, the secrets are assumed to be in the
// Release namespace.
provisionerSecretNamespace: ""
controllerExpandSecret: "csi-rbd-secret"
controllerExpandSecretNamespace: ""
nodeStageSecret: "csi-rbd-secret"
nodeStageSecretNamespace: ""
// Specify the filesystem type of the volume. If not specified,
// csi-provisioner will set default as `ext4`.
fstype: "ext4"
reclaimPolicy: "Delete"
allowVolumeExpansion: true
mountOptions: []
}
secret: {
// Specifies whether the secret should be created
create: false
name: "csi-rbd-secret"
// Key values correspond to a user name and its key, as defined in the
// ceph cluster. User ID should have required access to the 'pool'
// specified in the storage class
userID: "admin"
userKey: "$(ceph auth get-key client.admin)"
// Encryption passphrase
encryptionPassphrase: "$(python -c 'import secrets; print(secrets.token_hex(32));')"
}
}

View File

@@ -0,0 +1,8 @@
package holos
#Input: {
config: {
clusterID: "a6de32ab-c84f-49a6-b97e-e31dc2a70931"
monitors: ["10.64.1.21:6789", "10.64.1.31:6789", "10.64.1.41:6789"]
}
}

View File

@@ -0,0 +1,3 @@
# Metal Clusters
This cluster type is overlaid onto other cluster types to add services necessary outside of a cloud like GKE or EKS. Ceph for PersistenVolumeClaim support on a Talos Proxmox cluster is the primary use case.

View File

@@ -15,6 +15,10 @@ objects: #CredsRefresherService.objects
#TargetNamespace: #CredsRefresher.namespace
#Kustomization: spec: {
dependsOn: [{name: #InstancePrefix + "-namespaces"}]
}
let NAME = #CredsRefresher.name
let AUD = "//iam.googleapis.com/projects/\(#InputKeys.gcpProjectNumber)/locations/global/workloadIdentityPools/holos/providers/k8s-\(#InputKeys.cluster)"
let MOUNT = "/var/run/service-account"

View File

@@ -1,3 +1,30 @@
package holos
// Manages the External Secrets Operator from the official upstream Helm chart.
#TargetNamespace: "external-secrets"
#InputKeys: component: "eso"
#InputKeys: {
project: "secrets"
service: "eso"
}
#Kustomization: spec: {
dependsOn: [{name: #InstancePrefix + "-namespaces"}]
targetNamespace: #TargetNamespace
}
#HelmChart & {
values: installCrds: true
namespace: #TargetNamespace
chart: {
name: "external-secrets"
version: "0.9.12"
repository: {
name: "external-secrets"
url: "https://charts.external-secrets.io"
}
}
}

View File

@@ -1,8 +0,0 @@
package holos
#TargetNamespace: "external-secrets"
#InputKeys: {
project: "secrets"
service: "eso"
}

View File

@@ -1,16 +0,0 @@
package holos
#Kustomization: spec: dependsOn: [{name: #InstancePrefix + "-namespaces"}]
#HelmChart & {
values: installCrds: true
namespace: #TargetNamespace
chart: {
name: "external-secrets"
version: "0.9.12"
repository: {
name: "external-secrets"
url: "https://charts.external-secrets.io"
}
}
}

View File

@@ -0,0 +1,19 @@
package holos
// Validate ESO by syncing a secret with a SecretStore.
#TargetNamespace: "holos-system"
#InputKeys: {
project: "secrets"
component: "validate"
}
#Kustomization: spec: dependsOn: [{name: #InstancePrefix + "-eso"}]
objects: [
#SecretStore,
#ExternalSecret & { _name: "validate" },
]
{} & #KubernetesObjects

View File

@@ -1,6 +1,9 @@
package holos
// #PlatformNamespaces is the union of all namespaces across all cluster types. Namespaces are created in all clusters regardless of if they're
// used within the cluster or not. The is important for security and consistency with IAM, RBAC, and Secrets sync between clusters.
#PlatformNamespaces: [
{name: "external-secrets"},
{name: "holos-system"},
{name: "flux-system"},
{name: "ceph-system"},

View File

@@ -1,13 +0,0 @@
package holos
#Kustomization: spec: dependsOn: [{name: #InstancePrefix + "-eso"}]
objects: [
#SecretStore,
#ExternalSecret & {
_name: "validate"
spec: dataFrom: [{extract: key: "ns/" + #TargetNamespace + "/test"}]
},
]
{} & #KubernetesObjects

View File

@@ -1,8 +0,0 @@
package holos
#TargetNamespace: "default"
#InputKeys: {
project: "secrets"
component: "validate"
}

View File

@@ -79,8 +79,10 @@ _apiVersion: "holos.run/v1alpha1"
kind: string | *"GitRepository"
name: string | *"flux-system"
}
timeout: string | *"3m0s"
wait: bool | *true
suspend?: bool
targetNamespace?: string
timeout: string | *"3m0s"
wait: bool | *true
}
}
@@ -88,7 +90,7 @@ _apiVersion: "holos.run/v1alpha1"
#ExternalSecret: #NamespaceObject & es.#ExternalSecret & {
_name: string
metadata: {
namespace: string | *"default"
namespace: #TargetNamespace
name: _name
}
spec: {
@@ -100,24 +102,29 @@ _apiVersion: "holos.run/v1alpha1"
target: {
creationPolicy: string | *"Owner"
}
data: [{
remoteRef: key: _name
secretKey: _name
}]
}
}
#SecretStore: #NamespaceObject & ss.#SecretStore & {
metadata: {
name: string | *"default"
namespace: string | *#TargetNamespace
namespace: #TargetNamespace
}
spec: provider: {
vault: {
auth: kubernetes: {
mountPath: #InputKeys.cluster
role: string | *"default"
serviceAccountRef: name: string | *"default"
kubernetes: {
remoteNamespace: #TargetNamespace
auth: token: bearerToken: {
name: string | *"eso-reader"
key: string | *"token"
}
server: {
caBundle: #InputKeys.provisionerCABundle
url: #InputKeys.provisionerURL
}
path: string | *"kv/k8s"
server: "https://vault.core." + #Platform.org.domain
version: string | *"v2"
}
}
}
@@ -138,6 +145,11 @@ _apiVersion: "holos.run/v1alpha1"
// GCP Project Info used for the Provisioner Cluster
gcpProjectID: string @tag(gcpProjectID, type=string)
gcpProjectNumber: int @tag(gcpProjectNumber, type=int)
// Same as cluster certificate-authority-data field in ~/.holos/kubeconfig.provisioner
provisionerCABundle: string @tag(provisionerCABundle, type=string)
// Same as the cluster server field in ~/.holos/kubeconfig.provisioner
provisionerURL: string @tag(provisionerURL, type=string)
}
// #Platform defines the primary lookup table for the platform. Lookup keys should be limited to those defined in #KeyTags.
@@ -187,8 +199,9 @@ _apiVersion: "holos.run/v1alpha1"
kind: "KubernetesObjects"
// objects holds a list of the kubernetes api objects to configure.
objects: [...metav1.#TypeMeta] | *[]
// out holds the rendered yaml text stream of kubernetes api objects.
// content holds the rendered yaml text stream of kubernetes api objects.
content: yaml.MarshalStream(objects)
contentType: "application/yaml"
// ksObjects holds the flux Kustomization objects for gitops
ksObjects: [...#Kustomization] | *[#Kustomization]
// ksContent is the yaml representation of kustomization
@@ -207,6 +220,9 @@ _apiVersion: "holos.run/v1alpha1"
}
}
// #ChartValues represent the values provided to a helm chart. Existing values may be imorted using cue import values.yaml -p holos then wrapping the values.cue content in #Values: {}
#ChartValues: {...}
// #HelmChart is a holos component which produces kubernetes api objects from cue values provided to the helm template command.
#HelmChart: {
#OutputTypeMeta
@@ -220,13 +236,18 @@ _apiVersion: "holos.run/v1alpha1"
// chart defines the upstream helm chart to process.
chart: #Chart
// values represents the helm values to provide to the chart.
values: {...}
values: #ChartValues
// valuesContent holds the values yaml
valuesContent: yaml.Marshal(values)
// platform returns the platform data structure for visibility / troubleshooting.
platform: #Platform
// instance returns the key values of the holos component instance.
instance: #InputKeys
// objects holds a list of the kubernetes api objects to configure.
objects: [...metav1.#TypeMeta] | *[]
// content holds the rendered yaml text stream of kubernetes api objects.
content: yaml.MarshalStream(objects)
contentType: "application/yaml"
}
// #PlatformSpec is the output schema of a platform specification.

View File

@@ -27,11 +27,3 @@ func New(name string) *cobra.Command {
}
return cmd
}
// EnsureNewline adds a trailing newline if not already there.
func EnsureNewline(b []byte) []byte {
if len(b) > 0 && b[len(b)-1] != '\n' {
b = append(b, '\n')
}
return b
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/holos-run/holos/pkg/cli/secret"
"github.com/holos-run/holos/pkg/holos"
"github.com/holos-run/holos/pkg/logger"
"github.com/holos-run/holos/pkg/util"
"github.com/holos-run/holos/pkg/wrapper"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -77,7 +78,7 @@ func makeGetRunFunc(cfg *holos.Config, cf getConfig) command.RunFunc {
// Print one file to stdout
if key := *cf.file; key != "" {
if data, found := secret.Data[key]; found {
cfg.Write(command.EnsureNewline(data))
cfg.Write(util.EnsureNewline(data))
return nil
}
return wrapper.Wrap(fmt.Errorf("not found: %s have %#v", key, keys))
@@ -89,7 +90,7 @@ func makeGetRunFunc(cfg *holos.Config, cf getConfig) command.RunFunc {
for k, v := range secret.Data {
cfg.Printf("-- %s --\n", k)
cfg.Write(command.EnsureNewline(v))
cfg.Write(util.EnsureNewline(v))
}
}
return nil

View File

@@ -2,7 +2,8 @@ package cli
import (
"context"
"errors"
"cuelang.org/go/cue/errors"
"fmt"
"github.com/holos-run/holos/pkg/holos"
"github.com/holos-run/holos/pkg/wrapper"
"log/slog"
@@ -15,21 +16,27 @@ func MakeMain(options ...holos.Option) func() int {
slog.SetDefault(cfg.Logger())
ctx := context.Background()
if err := New(cfg).ExecuteContext(ctx); err != nil {
return handleError(ctx, err, cfg)
return HandleError(ctx, err, cfg)
}
return 0
}
}
// handleError is the top level error handler that unwraps and logs errors.
func handleError(ctx context.Context, err error, hc *holos.Config) (exitCode int) {
// HandleError is the top level error handler that unwraps and logs errors.
func HandleError(ctx context.Context, err error, hc *holos.Config) (exitCode int) {
log := hc.NewTopLevelLogger()
var cueErr errors.Error
var errAt *wrapper.ErrorAt
const msg = "could not execute"
if ok := errors.As(err, &errAt); ok {
if errors.As(err, &errAt) {
log.ErrorContext(ctx, msg, "err", errAt.Unwrap(), "loc", errAt.Source.Loc())
} else {
log.ErrorContext(ctx, msg, "err", err)
}
// cue errors are bundled up as a list and refer to multiple files / lines.
if errors.As(err, &cueErr) {
msg := errors.Details(cueErr, nil)
_, _ = fmt.Fprint(hc.Stderr(), msg)
}
return 1
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/holos-run/holos/pkg/logger"
"github.com/holos-run/holos/pkg/wrapper"
"github.com/spf13/cobra"
"io"
"io/fs"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,6 +27,8 @@ func NewCreateCmd(hc *holos.Config) *cobra.Command {
cfg, flagSet := newConfig()
flagSet.Var(&cfg.files, "from-file", "store files as keys in the secret")
cfg.dryRun = flagSet.Bool("dry-run", false, "dry run")
cfg.appendHash = flagSet.Bool("append-hash", true, "append hash to kubernetes secret name")
cfg.dataStdin = flagSet.Bool("data-stdin", false, "read data field as json from stdin if")
cmd.Flags().SortFlags = false
cmd.Flags().AddGoFlagSet(flagSet)
@@ -45,8 +48,9 @@ func makeCreateRunFunc(hc *holos.Config, cfg *config) command.RunFunc {
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Labels: map[string]string{NameLabel: secretName},
Name: secretName,
Namespace: *cfg.namespace,
Labels: map[string]string{NameLabel: secretName},
},
Data: make(secretData),
}
@@ -59,6 +63,22 @@ func makeCreateRunFunc(hc *holos.Config, cfg *config) command.RunFunc {
}
}
if *cfg.dataStdin {
log.InfoContext(ctx, "reading data keys from stdin...")
var obj map[string]string
data, err := io.ReadAll(hc.Stdin())
if err != nil {
return wrapper.Wrap(err)
}
err = yaml.Unmarshal(data, &obj)
if err != nil {
return wrapper.Wrap(err)
}
for k, v := range obj {
secret.Data[k] = []byte(v)
}
}
for _, file := range cfg.files {
if err := filepath.WalkDir(file, makeWalkFunc(secret.Data, file)); err != nil {
return wrapper.Wrap(err)
@@ -72,10 +92,12 @@ func makeCreateRunFunc(hc *holos.Config, cfg *config) command.RunFunc {
secret.Labels[ClusterLabel] = *cfg.cluster
}
if secretHash, err := hash.SecretHash(secret); err != nil {
return wrapper.Wrap(err)
} else {
secret.Name = fmt.Sprintf("%s-%s", secret.Name, secretHash)
if *cfg.appendHash {
if secretHash, err := hash.SecretHash(secret); err != nil {
return wrapper.Wrap(err)
} else {
secret.Name = fmt.Sprintf("%s-%s", secret.Name, secretHash)
}
}
if *cfg.dryRun {

View File

@@ -63,7 +63,7 @@ func makeGetRunFunc(hc *holos.Config, cfg *config) command.RunFunc {
log.DebugContext(ctx, "results", "len", len(list.Items))
if len(list.Items) < 1 {
continue
return wrapper.Wrap(fmt.Errorf("not found: %v", secretName))
}
// Sort oldest first.

View File

@@ -12,13 +12,15 @@ const ClusterLabel = "holos.run/cluster.name"
type secretData map[string][]byte
type config struct {
files holos.StringSlice
printFile *string
extract *bool
dryRun *bool
cluster *string
namespace *string
extractTo *string
files holos.StringSlice
printFile *string
extract *bool
dryRun *bool
appendHash *bool
dataStdin *bool
cluster *string
namespace *string
extractTo *string
}
func newConfig() (*config, *flag.FlagSet) {

View File

@@ -55,11 +55,12 @@ func cmdHolos(ts *testscript.TestScript, neg bool, args []string) {
cmd := cli.New(cfg)
cmd.SetArgs(args)
err := cmd.Execute()
if neg {
if err == nil {
ts.Fatalf("want: error\nhave: %v", err)
ts.Fatalf("\nwant: error\nhave: %v", err)
} else {
ts.Logf("want: error\nhave: %v", err)
cli.HandleError(cmd.Context(), err, cfg)
}
} else {
ts.Check(err)

View File

@@ -0,0 +1,7 @@
# Want no hash appended
holos create secret test --namespace holos-system --from-file $WORK/test --append-hash=false
stderr ' created: test '
stderr ' secret=test '
-- test --
sekret

View File

@@ -0,0 +1,6 @@
# Want no hash appended
holos create secret test --namespace holos-system --from-file $WORK/test --append-hash=false --dry-run
stdout 'name: test$'
-- test --
sekret

View File

@@ -0,0 +1,3 @@
# Want missing secrets to exit non-zero https://github.com/holos-run/holos/issues/20
! holos get secret does-not-exist
stderr 'not found: does-not-exist'

View File

@@ -67,9 +67,9 @@ func printFile(w io.Writer, idx int, a *txtar.Archive) (err error) {
return wrapper.Wrap(fmt.Errorf("idx cannot be 0"))
}
if idx > 0 {
_, err = w.Write(command.EnsureNewline(a.Files[idx-1].Data))
_, err = w.Write(util.EnsureNewline(a.Files[idx-1].Data))
} else {
_, err = w.Write(command.EnsureNewline(a.Files[len(a.Files)+idx].Data))
_, err = w.Write(util.EnsureNewline(a.Files[len(a.Files)+idx].Data))
}
return
}

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"github.com/holos-run/holos"
"github.com/holos-run/holos/pkg/logger"
"github.com/holos-run/holos/pkg/util"
"github.com/holos-run/holos/pkg/wrapper"
"os"
"os/exec"
@@ -74,9 +75,10 @@ type Metadata struct {
// Result is the build result for display or writing.
type Result struct {
Metadata Metadata `json:"metadata,omitempty"`
Content string `json:"content,omitempty"`
KsContent string `json:"ksContent,omitempty"`
Metadata Metadata `json:"metadata,omitempty"`
Content string `json:"content,omitempty"`
ContentType string `json:"contentType"`
KsContent string `json:"ksContent,omitempty"`
}
type Repository struct {
@@ -99,6 +101,8 @@ type HelmChart struct {
Namespace string `json:"namespace"`
Chart Chart `json:"chart"`
ValuesContent string `json:"valuesContent"`
ContentType string `json:"contentType"`
Content string `json:"content"`
}
// Name returns the metadata name of the result. Equivalent to the
@@ -225,6 +229,16 @@ func (b *Builder) Run(ctx context.Context) (results []*Result, err error) {
if err := runHelm(ctx, &helmChart, &result, holos.PathComponent(instance.Dir)); err != nil {
return nil, err
}
// Append any cue api objects defined alongside the helm holos component.
if helmChart.Content != "" && helmChart.ContentType == "application/yaml" {
buf := []byte(result.Content)
util.EnsureNewline(buf)
buf = append(buf, []byte("---\n# Source: holos component overlay objects\n")...)
buf = append(buf, []byte(helmChart.Content)...)
log.DebugContext(ctx, "added additional api objects", "bytes", len(buf))
result.Content = string(buf)
}
default:
return nil, wrapper.Wrap(fmt.Errorf("build kind not implemented: %v", kind))
}

9
pkg/util/util.go Normal file
View File

@@ -0,0 +1,9 @@
package util
// EnsureNewline adds a trailing newline if not already there.
func EnsureNewline(b []byte) []byte {
if len(b) > 0 && b[len(b)-1] != '\n' {
b = append(b, '\n')
}
return b
}

View File

@@ -1 +1 @@
45
47

View File

@@ -1 +1 @@
2
1