diff --git a/api/v1alpha1/proxmoxcluster_types_test.go b/api/v1alpha1/proxmoxcluster_types_test.go index 791ffe2..af2623a 100644 --- a/api/v1alpha1/proxmoxcluster_types_test.go +++ b/api/v1alpha1/proxmoxcluster_types_test.go @@ -92,7 +92,9 @@ func defaultCluster() *ProxmoxCluster { ProxmoxMachineSpec: map[string]ProxmoxMachineSpec{ "controlPlane": { VirtualMachineCloneSpec: VirtualMachineCloneSpec{ - SourceNode: "pve1", + TemplateSource: TemplateSource{ + SourceNode: "pve1", + }, }, }, }, diff --git a/api/v1alpha1/proxmoxmachine_types.go b/api/v1alpha1/proxmoxmachine_types.go index 45106c1..c58b7af 100644 --- a/api/v1alpha1/proxmoxmachine_types.go +++ b/api/v1alpha1/proxmoxmachine_types.go @@ -155,8 +155,8 @@ const ( TargetStorageFormatVmdk TargetFileStorageFormat = "vmdk" ) -// VirtualMachineCloneSpec is information used to clone a virtual machine. -type VirtualMachineCloneSpec struct { +// TemplateSource defines the source of the template VM. +type TemplateSource struct { // SourceNode is the initially selected proxmox node. // This node will be used to locate the template VM, which will // be used for cloning operations. @@ -173,12 +173,22 @@ type VirtualMachineCloneSpec struct { // will be cloned onto the same node as SourceNode. // // +kubebuilder:validation:MinLength=1 - SourceNode string `json:"sourceNode"` + // +optional + SourceNode string `json:"sourceNode,omitempty"` // TemplateID the vm_template vmid used for cloning a new VM. // +optional TemplateID *int32 `json:"templateID,omitempty"` + // TemplateSelector defines MatchTags for looking up VM templates. + // +optional + TemplateSelector *TemplateSelector `json:"templateSelector,omitempty"` +} + +// VirtualMachineCloneSpec is information used to clone a virtual machine. +type VirtualMachineCloneSpec struct { + TemplateSource `json:",inline"` + // Description for the new VM. // +optional Description *string `json:"description,omitempty"` @@ -213,6 +223,16 @@ type VirtualMachineCloneSpec struct { Target *string `json:"target,omitempty"` } +// TemplateSelector defines MatchTags for looking up VM templates. +type TemplateSelector struct { + // Specifies all tags to look for, when looking up the VM template. + // Passed tags must be an exact 1:1 match with the tags on the template you want to use. + // If multiple VM templates with the same set of tags are found, provisioning will fail. + // + // +kubebuilder:validation:MinItems=1 + MatchTags []string `json:"matchTags"` +} + // NetworkSpec defines the virtual machine's network configuration. type NetworkSpec struct { // Default is the default network device, @@ -526,6 +546,8 @@ type ProxmoxMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="[has(self.sourceNode), has(self.templateSelector)].exists_one(c, c)",message="must define either SourceNode with TemplateID, OR TemplateSelector" + // +kubebuilder:validation:XValidation:rule="[has(self.templateID), has(self.templateSelector)].exists_one(c, c)",message="must define either SourceNode with TemplateID, OR TemplateSelector." // +kubebuilder:validation:XValidation:rule="self.full && self.format != ''",message="Must set full=true when specifying format" Spec ProxmoxMachineSpec `json:"spec,omitempty"` Status ProxmoxMachineStatus `json:"status,omitempty"` @@ -566,6 +588,14 @@ func (r *ProxmoxMachine) GetTemplateID() int32 { return -1 } +// GetTemplateSelectorTags get the tags, the desired vm template should have. +func (r *ProxmoxMachine) GetTemplateSelectorTags() []string { + if r.Spec.TemplateSelector != nil { + return r.Spec.TemplateSelector.MatchTags + } + return nil +} + // GetNode get the Proxmox node used to provision this machine. func (r *ProxmoxMachine) GetNode() string { return r.Spec.SourceNode diff --git a/api/v1alpha1/proxmoxmachine_types_test.go b/api/v1alpha1/proxmoxmachine_types_test.go index 0d0b019..bbf4c15 100644 --- a/api/v1alpha1/proxmoxmachine_types_test.go +++ b/api/v1alpha1/proxmoxmachine_types_test.go @@ -34,11 +34,14 @@ func defaultMachine() *ProxmoxMachine { Namespace: metav1.NamespaceDefault, }, Spec: ProxmoxMachineSpec{ - VirtualMachineCloneSpec: VirtualMachineCloneSpec{ - SourceNode: "pve1", - }, ProviderID: ptr.To("proxmox://abcdef"), VirtualMachineID: ptr.To[int64](100), + VirtualMachineCloneSpec: VirtualMachineCloneSpec{ + TemplateSource: TemplateSource{ + SourceNode: "pve1", + TemplateID: ptr.To[int32](100), + }, + }, Disks: &Storage{ BootVolume: &DiskSize{ Disk: "scsi0", @@ -56,19 +59,33 @@ var _ = Describe("ProxmoxMachine Test", func() { }) Context("VirtualMachineCloneSpec", func() { - It("Should not allow empty source node", func() { - dm := defaultMachine() - dm.Spec.SourceNode = "" - - Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should be at least 1 chars long"))) - }) - It("Should not allow specifying format if full clone is disabled", func() { dm := defaultMachine() dm.Spec.Full = ptr.To(false) Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("Must set full=true when specifying format"))) }) + + It("Should disallow absence of SourceNode, TemplateID and TemplateSelector", func() { + dm := defaultMachine() + dm.Spec.TemplateSource.SourceNode = "" + dm.Spec.TemplateSource.TemplateID = nil + dm.Spec.TemplateSelector = nil + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("must define either SourceNode with TemplateID, OR TemplateSelector"))) + }) + + It("Should not allow specifying TemplateSelector together with SourceNode and/or TemplateID", func() { + dm := defaultMachine() + dm.Spec.TemplateSelector = &TemplateSelector{MatchTags: []string{"test"}} + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("must define either SourceNode with TemplateID, OR TemplateSelector"))) + }) + + It("Should not allow specifying TemplateSelector with empty MatchTags", func() { + dm := defaultMachine() + dm.Spec.TemplateSelector = &TemplateSelector{MatchTags: []string{}} + + Expect(k8sClient.Create(context.Background(), dm)).Should(MatchError(ContainSubstring("should have at least 1 items"))) + }) }) Context("Disks", func() { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 06199c2..bb82fa4 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -922,6 +922,51 @@ func (in *Storage) DeepCopy() *Storage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSelector) DeepCopyInto(out *TemplateSelector) { + *out = *in + if in.MatchTags != nil { + in, out := &in.MatchTags, &out.MatchTags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSelector. +func (in *TemplateSelector) DeepCopy() *TemplateSelector { + if in == nil { + return nil + } + out := new(TemplateSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateSource) DeepCopyInto(out *TemplateSource) { + *out = *in + if in.TemplateID != nil { + in, out := &in.TemplateID, &out.TemplateID + *out = new(int32) + **out = **in + } + if in.TemplateSelector != nil { + in, out := &in.TemplateSelector, &out.TemplateSelector + *out = new(TemplateSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSource. +func (in *TemplateSource) DeepCopy() *TemplateSource { + if in == nil { + return nil + } + out := new(TemplateSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VMIDRange) DeepCopyInto(out *VMIDRange) { *out = *in @@ -983,11 +1028,7 @@ func (in *VirtualMachine) DeepCopy() *VirtualMachine { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VirtualMachineCloneSpec) DeepCopyInto(out *VirtualMachineCloneSpec) { *out = *in - if in.TemplateID != nil { - in, out := &in.TemplateID, &out.TemplateID - *out = new(int32) - **out = **in - } + in.TemplateSource.DeepCopyInto(&out.TemplateSource) if in.Description != nil { in, out := &in.Description, &out.Description *out = new(string) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 2381b09..5077773 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -559,6 +559,22 @@ spec: a new VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines MatchTags for looking + up VM templates. + properties: + matchTags: + description: |- + Specifies all tags to look for, when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. @@ -590,8 +606,6 @@ spec: x-kubernetes-validations: - message: end should be greater than or equal to start rule: self.end >= self.start - required: - - sourceNode type: object type: object x-kubernetes-validations: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml index 3ffec28..f5b2a1e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml @@ -600,6 +600,22 @@ spec: for cloning a new VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines MatchTags + for looking up VM templates. + properties: + matchTags: + description: |- + Specifies all tags to look for, when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. @@ -632,8 +648,6 @@ spec: - message: end should be greater than or equal to start rule: self.end >= self.start - required: - - sourceNode type: object type: object x-kubernetes-validations: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml index 1565510..c337629 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml @@ -527,6 +527,22 @@ spec: VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines MatchTags for looking up VM + templates. + properties: + matchTags: + description: |- + Specifies all tags to look for, when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. @@ -557,10 +573,14 @@ spec: x-kubernetes-validations: - message: end should be greater than or equal to start rule: self.end >= self.start - required: - - sourceNode type: object x-kubernetes-validations: + - message: must define either SourceNode with TemplateID, OR TemplateSelector + rule: '[has(self.sourceNode), has(self.templateSelector)].exists_one(c, + c)' + - message: must define either SourceNode with TemplateID, OR TemplateSelector. + rule: '[has(self.templateID), has(self.templateSelector)].exists_one(c, + c)' - message: Must set full=true when specifying format rule: self.full && self.format != '' status: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml index 7b6f4a9..c07d275 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml @@ -559,6 +559,22 @@ spec: a new VM. format: int32 type: integer + templateSelector: + description: TemplateSelector defines MatchTags for looking + up VM templates. + properties: + matchTags: + description: |- + Specifies all tags to look for, when looking up the VM template. + Passed tags must be an exact 1:1 match with the tags on the template you want to use. + If multiple VM templates with the same set of tags are found, provisioning will fail. + items: + type: string + minItems: 1 + type: array + required: + - matchTags + type: object virtualMachineID: description: VirtualMachineID is the Proxmox identifier for the ProxmoxMachine VM. @@ -589,8 +605,6 @@ spec: x-kubernetes-validations: - message: end should be greater than or equal to start rule: self.end >= self.start - required: - - sourceNode type: object required: - spec diff --git a/docs/advanced-setups.md b/docs/advanced-setups.md index 8c10f3a..697e0f8 100644 --- a/docs/advanced-setups.md +++ b/docs/advanced-setups.md @@ -176,6 +176,14 @@ This behaviour can be configured in the `ProxmoxCluster` CR through the field `. For example, setting it to `0` (zero), entirely disables scheduling based on memory. Alternatively, if you set it to any value greater than `0`, the scheduler will treat your host as it would have `${value}%` of memory. In real numbers that would mean, if you have a host with 64GB of memory and set the number to `300`, the scheduler would allow you to provision guests with a total of 192GB memory and therefore overprovision the host. (Use with caution! It's strongly suggested to have memory ballooning configured everywhere.). Or, if you were to set it to `95` for example, it would treat your host as it would only have 60,8GB of memory, and leave the remaining 3,2GB for the host. +## Template lookup based on Proxmox tags + +Our provider is able to look up templates based on their attached tags, for `ProxmoxMachine` resources, that make use of an tag selector. + +For example, you can set the `TEMPLATE_TAGS="tag1,tag2"` environment variable. Your custom image will then be used when using the [auto-image](https://github.com/ionos-cloud/cluster-api-provider-ionoscloud/blob/main/templates/cluster-template-auto-image.yaml) template. + +Please note: Passed tags must be an exact 1:1 match with the tags on the template you want to use. The matched result must be unique. If multiple templates are found, provisioning will fail. + ## Proxmox RBAC with least privileges For the Proxmox API user/token you create for CAPMOX, these are the minimum required permissions. diff --git a/envfile.example b/envfile.example index c334778..b111be9 100644 --- a/envfile.example +++ b/envfile.example @@ -3,6 +3,7 @@ export PROXMOX_TOKEN="" export PROXMOX_SECRET="" export PROXMOX_SOURCENODE="pve" export TEMPLATE_VMID=100 +export TEMPLATE_TAGS="tag1,tag2" export VM_SSH_KEYS="ssh-ed25519 ..., ssh-ed25519 ..." export KUBERNETES_VERSION="1.25.1" export CONTROL_PLANE_ENDPOINT_IP=10.10.10.4 diff --git a/internal/service/vmservice/helpers_test.go b/internal/service/vmservice/helpers_test.go index 7ea03fa..ce6ee52 100644 --- a/internal/service/vmservice/helpers_test.go +++ b/internal/service/vmservice/helpers_test.go @@ -109,8 +109,10 @@ func setupReconcilerTest(t *testing.T) (*scope.MachineScope, *proxmoxtest.MockCl }, Spec: infrav1alpha1.ProxmoxMachineSpec{ VirtualMachineCloneSpec: infrav1alpha1.VirtualMachineCloneSpec{ - SourceNode: "node1", - TemplateID: ptr.To[int32](123), + TemplateSource: infrav1alpha1.TemplateSource{ + SourceNode: "node1", + TemplateID: ptr.To[int32](123), + }, }, }, } diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 17bc35f..81fb328 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -384,6 +384,20 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe } templateID := scope.ProxmoxMachine.GetTemplateID() + if templateID == -1 { + var err error + templateSelectorTags := scope.ProxmoxMachine.GetTemplateSelectorTags() + options.Node, templateID, err = scope.InfraCluster.ProxmoxClient.FindVMTemplateByTags(ctx, templateSelectorTags) + + if err != nil { + if errors.Is(err, goproxmox.ErrTemplateNotFound) { + scope.SetFailureMessage(err) + scope.SetFailureReason(capierrors.MachineStatusError("VMTemplateNotFound")) + conditions.MarkFalse(scope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition, infrav1alpha1.VMProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err) + } + return proxmox.VMCloneResponse{}, err + } + } res, err := scope.InfraCluster.ProxmoxClient.CloneVM(ctx, int(templateID), options) if err != nil { return res, err diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 001909e..9d35d6a 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -145,6 +145,80 @@ func TestEnsureVirtualMachine_CreateVM_FullOptions(t *testing.T) { requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) } +func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector(t *testing.T) { + vmTemplateTags := []string{"foo", "bar"} + + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") + machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") + machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") + machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + expectedOptions := proxmox.VMCloneRequest{ + Node: "node1", + Name: "test", + Description: "test vm", + Format: "raw", + Full: 1, + Pool: "pool", + SnapName: "snap", + Storage: "storage", + Target: "node2", + } + + proxmoxClient.EXPECT().FindVMTemplateByTags(context.Background(), vmTemplateTags).Return("node1", 123, nil).Once() + + response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()} + proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once() + + requeue, err := ensureVirtualMachine(context.Background(), machineScope) + require.NoError(t, err) + require.True(t, requeue) + + require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode) + require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false)) + requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition) +} + +func TestEnsureVirtualMachine_CreateVM_FullOptions_TemplateSelector_VMTemplateNotFound(t *testing.T) { + ctx := context.Background() + vmTemplateTags := []string{"foo", "bar"} + + machineScope, proxmoxClient, _ := setupReconcilerTest(t) + machineScope.ProxmoxMachine.Spec.VirtualMachineCloneSpec = infrav1alpha1.VirtualMachineCloneSpec{ + TemplateSource: infrav1alpha1.TemplateSource{ + TemplateSelector: &infrav1alpha1.TemplateSelector{ + MatchTags: vmTemplateTags, + }, + }, + } + machineScope.ProxmoxMachine.Spec.Description = ptr.To("test vm") + machineScope.ProxmoxMachine.Spec.Format = ptr.To(infrav1alpha1.TargetStorageFormatRaw) + machineScope.ProxmoxMachine.Spec.Full = ptr.To(true) + machineScope.ProxmoxMachine.Spec.Pool = ptr.To("pool") + machineScope.ProxmoxMachine.Spec.SnapName = ptr.To("snap") + machineScope.ProxmoxMachine.Spec.Storage = ptr.To("storage") + machineScope.ProxmoxMachine.Spec.Target = ptr.To("node2") + + proxmoxClient.EXPECT().FindVMTemplateByTags(context.Background(), vmTemplateTags).Return("", -1, goproxmox.ErrTemplateNotFound).Once() + + _, err := createVM(ctx, machineScope) + + require.Equal(t, ptr.To(capierrors.MachineStatusError("VMTemplateNotFound")), machineScope.ProxmoxMachine.Status.FailureReason) + require.Equal(t, ptr.To("VM template not found"), machineScope.ProxmoxMachine.Status.FailureMessage) + require.Error(t, err) + require.Contains(t, "VM template not found", err.Error()) +} + func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) { machineScope, proxmoxClient, _ := setupReconcilerTest(t) machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3"} diff --git a/internal/webhook/proxmoxmachine_webhook_test.go b/internal/webhook/proxmoxmachine_webhook_test.go index 025a385..61a17e5 100644 --- a/internal/webhook/proxmoxmachine_webhook_test.go +++ b/internal/webhook/proxmoxmachine_webhook_test.go @@ -110,7 +110,10 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine { }, Spec: infrav1.ProxmoxMachineSpec{ VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{ - SourceNode: "pve", + TemplateSource: infrav1.TemplateSource{ + SourceNode: "pve", + TemplateID: ptr.To[int32](100), + }, }, NumSockets: 1, NumCores: 1, diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index 902af0f..3c376d0 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -30,6 +30,7 @@ type Client interface { ConfigureVM(ctx context.Context, vm *proxmox.VirtualMachine, options ...VirtualMachineOption) (*proxmox.Task, error) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.ClusterResource, error) + FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) CheckID(ctx context.Context, vmID int64) (bool, error) diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index 2315005..9139a09 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "net/url" + "slices" "strings" "github.com/go-logr/logr" @@ -141,6 +142,51 @@ func (c *APIClient) FindVMResource(ctx context.Context, vmID uint64) (*proxmox.C return nil, fmt.Errorf("unable to find VM with ID %d on any of the nodes", vmID) } +// FindVMTemplateByTags tries to find a VMID by its tags across the whole cluster. +func (c *APIClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { + vmTemplates := make([]*proxmox.ClusterResource, 0) + + sortedTags := make([]string, len(templateTags)) + for i, tag := range templateTags { + // Proxmox VM tags are always lowercase + sortedTags[i] = strings.ToLower(tag) + } + slices.Sort(sortedTags) + uniqueTags := slices.Compact(sortedTags) + + cluster, err := c.Cluster(ctx) + if err != nil { + return "", -1, fmt.Errorf("cannot get cluster status: %w", err) + } + + vmResources, err := cluster.Resources(ctx, "vm") + if err != nil { + return "", -1, fmt.Errorf("could not list vm resources: %w", err) + } + + for _, vm := range vmResources { + if vm.Template == 0 { + continue + } + if len(vm.Tags) == 0 { + continue + } + + vmTags := strings.Split(vm.Tags, ";") + slices.Sort(vmTags) + + if slices.Equal(vmTags, uniqueTags) { + vmTemplates = append(vmTemplates, vm) + } + } + + if n := len(vmTemplates); n != 1 { + return "", -1, fmt.Errorf("%w: found %d VM templates with tags %q", ErrTemplateNotFound, n, strings.Join(templateTags, ";")) + } + + return vmTemplates[0].Node, int32(vmTemplates[0].VMID), nil +} + // DeleteVM deletes a VM based on the nodeName and vmID. func (c *APIClient) DeleteVM(ctx context.Context, nodeName string, vmID int64) (*proxmox.Task, error) { // A vmID can not be lower than 100. diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index 6404244..0724766 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -371,6 +371,126 @@ func TestProxmoxAPIClient_FindVMResource(t *testing.T) { } } +func TestProxmoxAPIClient_FindVMTemplateByTags(t *testing.T) { + proxmoxClusterResources := proxmox.ClusterResources{ + &proxmox.ClusterResource{VMID: 101, Name: "k8s-node01", Node: "capmox01", Tags: ""}, + &proxmox.ClusterResource{VMID: 102, Name: "k8s-node02", Node: "capmox02", Tags: ""}, + &proxmox.ClusterResource{VMID: 150, Name: "template-without-tags", Node: "capmox01", Tags: "", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 201, Name: "ubuntu-22.04-k8s-v1.28.3", Node: "capmox01", Tags: "template;capmox;v1.28.3", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 202, Name: "ubuntu-22.04-k8s-v1.30.2", Node: "capmox02", Tags: "capmox;template;v1.30.2", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 301, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, + &proxmox.ClusterResource{VMID: 302, Name: "ubuntu-22.04-k8s-v1.29.2", Node: "capmox02", Tags: "capmox;template;v1.29.2", Template: uint64(1)}, + } + tests := []struct { + name string + http []int + vmTags []string + fails bool + err string + vmTemplateNode string + vmTemplateID int32 + }{ + { + name: "clusterstatus broken", + http: []int{500, 200}, + fails: true, + err: "cannot get cluster status: 500", + }, + { + name: "resourcelisting broken", + http: []int{200, 500}, + fails: true, + err: "could not list vm resources: 500", + }, + { + name: "find-template", + http: []int{200, 200}, + vmTags: []string{"template", "capmox", "v1.28.3"}, + fails: false, + err: "", + vmTemplateNode: "capmox01", + vmTemplateID: 201, + }, + { + name: "find-template-nil", + http: []int{200, 200}, + vmTags: nil, + fails: true, + err: "VM template not found: found 0 VM templates with tags \"\"", + vmTemplateNode: "capmox01", + vmTemplateID: 201, + }, + { + // Proxmox VM tags are always lowercase + name: "find-template-uppercase", + http: []int{200, 200}, + vmTags: []string{"TEMPLATE", "CAPMOX", "v1.28.3"}, + fails: false, + err: "", + vmTemplateNode: "capmox01", + vmTemplateID: 201, + }, + { + name: "find-template-unordered", + http: []int{200, 200}, + vmTags: []string{"template", "capmox", "v1.30.2"}, + fails: false, + err: "", + vmTemplateNode: "capmox02", + vmTemplateID: 202, + }, + { + name: "find-template-duplicate-tag", + http: []int{200, 200}, + vmTags: []string{"template", "capmox", "capmox", "v1.30.2"}, + fails: false, + err: "", + vmTemplateNode: "capmox02", + vmTemplateID: 202, + }, + { + name: "find-multiple-templates", + http: []int{200, 200}, + vmTags: []string{"template", "capmox"}, + fails: true, + err: "VM template not found: found 0 VM templates with tags \"template;capmox\"", + vmTemplateID: 69, + vmTemplateNode: "nice", + }, + { + name: "find-multiple-templates", + http: []int{200, 200}, + vmTags: []string{"template", "capmox", "v1.29.2"}, + fails: true, + err: "VM template not found: found 2 VM templates with tags \"template;capmox;v1.29.2\"", + vmTemplateID: 69, + vmTemplateNode: "nice", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := newTestClient(t) + + httpmock.RegisterResponder(http.MethodGet, `=~/cluster/status`, + newJSONResponder(test.http[0], proxmox.NodeStatuses{})) + httpmock.RegisterResponder(http.MethodGet, `=~/cluster/resources`, + newJSONResponder(test.http[1], proxmoxClusterResources)) + + vmTemplateNode, vmTemplateID, err := client.FindVMTemplateByTags(context.Background(), test.vmTags) + + if test.fails { + require.Error(t, err) + require.Equal(t, test.err, err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, vmTemplateID, test.vmTemplateID) + require.Equal(t, vmTemplateNode, test.vmTemplateNode) + } + }) + } +} + func TestProxmoxAPIClient_DeleteVM(t *testing.T) { tests := []struct { name string diff --git a/pkg/proxmox/goproxmox/errors.go b/pkg/proxmox/goproxmox/errors.go index 164cabe..967a0fb 100644 --- a/pkg/proxmox/goproxmox/errors.go +++ b/pkg/proxmox/goproxmox/errors.go @@ -5,4 +5,7 @@ import "github.com/pkg/errors" var ( // ErrCloudInitFailed is returned when cloud-init failed execution. ErrCloudInitFailed = errors.New("cloud-init failed execution") + + // ErrTemplateNotFound is returned when a VM template is not found. + ErrTemplateNotFound = errors.New("VM template not found") ) diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index a488fed..7368ad1 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -365,6 +365,66 @@ func (_c *MockClient_FindVMResource_Call) RunAndReturn(run func(context.Context, return _c } +// FindVMTemplateByTags provides a mock function with given fields: ctx, templateTags +func (_m *MockClient) FindVMTemplateByTags(ctx context.Context, templateTags []string) (string, int32, error) { + ret := _m.Called(ctx, templateTags) + + var r0 string + var r1 int32 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []string) (string, int32, error)); ok { + return rf(ctx, templateTags) + } + if rf, ok := ret.Get(0).(func(context.Context, []string) string); ok { + r0 = rf(ctx, templateTags) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, []string) int32); ok { + r1 = rf(ctx, templateTags) + } else { + r1 = ret.Get(1).(int32) + } + + if rf, ok := ret.Get(2).(func(context.Context, []string) error); ok { + r2 = rf(ctx, templateTags) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// MockClient_FindVMTemplateByTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FindVMTemplateByTags' +type MockClient_FindVMTemplateByTags_Call struct { + *mock.Call +} + +// FindVMTemplateByTags is a helper method to define mock.On call +// - ctx context.Context +// - templateTags []string +func (_e *MockClient_Expecter) FindVMTemplateByTags(ctx interface{}, templateTags interface{}) *MockClient_FindVMTemplateByTags_Call { + return &MockClient_FindVMTemplateByTags_Call{Call: _e.mock.On("FindVMTemplateByTags", ctx, templateTags)} +} + +func (_c *MockClient_FindVMTemplateByTags_Call) Run(run func(ctx context.Context, templateTags []string)) *MockClient_FindVMTemplateByTags_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]string)) + }) + return _c +} + +func (_c *MockClient_FindVMTemplateByTags_Call) Return(_a0 string, _a1 int32, _a2 error) *MockClient_FindVMTemplateByTags_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *MockClient_FindVMTemplateByTags_Call) RunAndReturn(run func(context.Context, []string) (string, int32, error)) *MockClient_FindVMTemplateByTags_Call { + _c.Call.Return(run) + return _c +} + // GetReservableMemoryBytes provides a mock function with given fields: ctx, nodeName, nodeMemoryAdjustment func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { ret := _m.Called(ctx, nodeName, nodeMemoryAdjustment) diff --git a/templates/cluster-template-auto-image.yaml b/templates/cluster-template-auto-image.yaml new file mode 100644 index 0000000..b21a89e --- /dev/null +++ b/templates/cluster-template-auto-image.yaml @@ -0,0 +1,250 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: 6443 + ipv4Config: + addresses: ${NODE_IP_RANGES} + prefix: ${IP_PREFIX} + gateway: ${GATEWAY} + dnsServers: ${DNS_SERVERS} + allowedNodes: ${ALLOWED_NODES:=[]} +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: ProxmoxMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE=""} + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: port + value: "6443" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.7.1 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - localhost + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - path: /etc/kube-vip-prepare.sh + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + permissions: "0700" + preKubeadmCommands: + - /etc/kube-vip-prepare.sh + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'" + version: "${KUBERNETES_VERSION}" +--- +kind: ProxmoxMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + templateSelector: + matchTags: [${TEMPLATE_TAGS}] + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ProxmoxMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ProxmoxMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + templateSelector: + matchTags: [${TEMPLATE_TAGS}] + format: "qcow2" + full: true + numSockets: ${NUM_SOCKETS:=2} + numCores: ${NUM_CORES:=4} + memoryMiB: ${MEMORY_MIB:=16384} + disks: + bootVolume: + disk: ${BOOT_VOLUME_DEVICE} + sizeGb: ${BOOT_VOLUME_SIZE:=100} + network: + default: + bridge: ${BRIDGE} + model: virtio +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${VM_SSH_KEYS}] + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'"