mirror of
https://github.com/outbackdingo/cluster-api-provider-proxmox.git
synced 2026-01-27 10:18:38 +00:00
Adding Finalizer on Secret (#279)
* Bumped some versions as the controller-gen crashed * Added ProxmoxClusterTemplate properly * Proper alignment //+kubebuilder, and provide annotatons as specified by spec * Added concurrency to controllers. Added constructors to controllers * Added capiflags * metrics-bind-address is obsolete. Use diagnostics address * Add finalizer on secret * Cleanup to minimize differences * Processing review feedback * Processing feedback * Update * Processing feedback * Fixing tests * Update * Delete capi cluster first * Processing review feedback * Renamed test context * Processing review feedback: solve linting issues --------- Co-authored-by: Mohamed Chiheb Ben Jemaa <mc.benjemaa@gmail.com>
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,6 +24,7 @@ out/
|
||||
|
||||
# editor and IDE paraphernalia
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
8
PROJECT
8
PROJECT
@@ -42,4 +42,12 @@ resources:
|
||||
kind: ProxmoxMachineTemplate
|
||||
path: github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1
|
||||
version: v1alpha1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
domain: cluster.x-k8s.io
|
||||
group: infrastructure
|
||||
kind: ProxmoxClusterTemplate
|
||||
path: github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1
|
||||
version: v1alpha1
|
||||
version: "3"
|
||||
|
||||
@@ -83,4 +83,8 @@ const (
|
||||
const (
|
||||
// ProxmoxClusterReady documents the status of ProxmoxCluster and its underlying resources.
|
||||
ProxmoxClusterReady clusterv1.ConditionType = "ClusterReady"
|
||||
|
||||
// ProxmoxUnreachableReason (Severity=Error) documents a controller detecting
|
||||
// issues with Proxmox reachability.
|
||||
ProxmoxUnreachableReason = "ProxmoxUnreachable"
|
||||
)
|
||||
|
||||
@@ -31,6 +31,8 @@ const (
|
||||
// ClusterFinalizer allows cleaning up resources associated with a
|
||||
// ProxmoxCluster before removing it from the apiserver.
|
||||
ClusterFinalizer = "proxmoxcluster.infrastructure.cluster.x-k8s.io"
|
||||
// SecretFinalizer is the finalizer for ProxmoxCluster credentials secrets .
|
||||
SecretFinalizer = "proxmoxcluster.infrastructure.cluster.x-k8s.io/secret" //nolint:gosec
|
||||
)
|
||||
|
||||
// ProxmoxClusterSpec defines the desired state of a ProxmoxCluster.
|
||||
@@ -218,12 +220,12 @@ type NodeLocation struct {
|
||||
Node string `json:"node"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:resource:path=proxmoxclusters,scope=Namespaced,categories=cluster-api,singular=proxmoxcluster
|
||||
//+kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels['cluster\\.x-k8s\\.io/cluster-name']",description="Cluster"
|
||||
//+kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready"
|
||||
//+kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="API Endpoint"
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:path=proxmoxclusters,scope=Namespaced,categories=cluster-api,singular=proxmoxcluster
|
||||
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels['cluster\\.x-k8s\\.io/cluster-name']",description="Cluster"
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Cluster infrastructure is ready"
|
||||
// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint",description="API Endpoint"
|
||||
|
||||
// ProxmoxCluster is the Schema for the proxmoxclusters API.
|
||||
type ProxmoxCluster struct {
|
||||
@@ -235,7 +237,7 @@ type ProxmoxCluster struct {
|
||||
Status ProxmoxClusterStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// ProxmoxClusterList contains a list of ProxmoxCluster.
|
||||
type ProxmoxClusterList struct {
|
||||
|
||||
@@ -35,8 +35,10 @@ type ProxmoxClusterTemplateResource struct {
|
||||
Spec ProxmoxClusterSpec `json:"spec"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:path=proxmoxclustertemplates,scope=Namespaced,categories=cluster-api,shortName=pct
|
||||
// +kubebuilder:storageversion
|
||||
|
||||
// ProxmoxClusterTemplate is the Schema for the proxmoxclustertemplates API.
|
||||
type ProxmoxClusterTemplate struct {
|
||||
|
||||
@@ -26,7 +26,9 @@ type ProxmoxMachineTemplateSpec struct {
|
||||
Template ProxmoxMachineTemplateResource `json:"template"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:path=proxmoxmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=pmt
|
||||
// +kubebuilder:storageversion
|
||||
|
||||
// ProxmoxMachineTemplate is the Schema for the proxmoxmachinetemplates API.
|
||||
type ProxmoxMachineTemplate struct {
|
||||
|
||||
12
cmd/main.go
12
cmd/main.go
@@ -51,7 +51,7 @@ import (
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/internal/webhook"
|
||||
capmox "github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/goproxmox"
|
||||
//+kubebuilder:scaffold:imports
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -78,7 +78,7 @@ func init() {
|
||||
_ = ipamicv1.AddToScheme(scheme)
|
||||
_ = ipamv1.AddToScheme(scheme)
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -144,7 +144,7 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
//+kubebuilder:scaffold:builder
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
@@ -162,12 +162,12 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func setupReconcilers(ctx context.Context, mgr ctrl.Manager, client capmox.Client) error {
|
||||
func setupReconcilers(ctx context.Context, mgr ctrl.Manager, proxmoxClient capmox.Client) error {
|
||||
if err := (&controller.ProxmoxClusterReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("proxmoxcluster-controller"),
|
||||
ProxmoxClient: client,
|
||||
ProxmoxClient: proxmoxClient,
|
||||
}).SetupWithManager(ctx, mgr); err != nil {
|
||||
return fmt.Errorf("setting up ProxmoxCluster controller: %w", err)
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, client capmox.Clien
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Recorder: mgr.GetEventRecorderFor("proxmoxmachine-controller"),
|
||||
ProxmoxClient: client,
|
||||
ProxmoxClient: proxmoxClient,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
return fmt.Errorf("setting up ProxmoxMachine controller: %w", err)
|
||||
}
|
||||
|
||||
@@ -8,9 +8,13 @@ metadata:
|
||||
spec:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
names:
|
||||
categories:
|
||||
- cluster-api
|
||||
kind: ProxmoxClusterTemplate
|
||||
listKind: ProxmoxClusterTemplateList
|
||||
plural: proxmoxclustertemplates
|
||||
shortNames:
|
||||
- pct
|
||||
singular: proxmoxclustertemplate
|
||||
scope: Namespaced
|
||||
versions:
|
||||
|
||||
@@ -8,9 +8,13 @@ metadata:
|
||||
spec:
|
||||
group: infrastructure.cluster.x-k8s.io
|
||||
names:
|
||||
categories:
|
||||
- cluster-api
|
||||
kind: ProxmoxMachineTemplate
|
||||
listKind: ProxmoxMachineTemplateList
|
||||
plural: proxmoxmachinetemplates
|
||||
shortNames:
|
||||
- pmt
|
||||
singular: proxmoxmachinetemplate
|
||||
scope: Namespaced
|
||||
versions:
|
||||
|
||||
@@ -22,6 +22,7 @@ commonLabels:
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
|
||||
# patches here are for enabling the CA injection for each CRD
|
||||
#- patches/cainjection_in_proxmoxclusters.yaml
|
||||
#- patches/cainjection_in_proxmoxclustertemplates.yaml
|
||||
#- patches/cainjection_in_proxmoxmachines.yaml
|
||||
#- patches/cainjection_in_proxmoxmachinetemplates.yaml
|
||||
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
|
||||
|
||||
27
config/rbac/proxmoxclustertemplate_editor_role.yaml
Normal file
27
config/rbac/proxmoxclustertemplate_editor_role.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# permissions for end users to edit proxmoxclustertemplates.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: cluster-api-provider-proxmox
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: proxmoxclustertemplate-editor-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
- proxmoxclustertemplates
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
- proxmoxclustertemplates/status
|
||||
verbs:
|
||||
- get
|
||||
23
config/rbac/proxmoxclustertemplate_viewer_role.yaml
Normal file
23
config/rbac/proxmoxclustertemplate_viewer_role.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# permissions for end users to view proxmoxclustertemplates.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: cluster-api-provider-proxmox
|
||||
app.kubernetes.io/managed-by: kustomize
|
||||
name: proxmoxclustertemplate-viewer-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
- proxmoxclustertemplates
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
- proxmoxclustertemplates/status
|
||||
verbs:
|
||||
- get
|
||||
@@ -41,6 +41,15 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- apiGroups:
|
||||
- infrastructure.cluster.x-k8s.io
|
||||
resources:
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
## Append samples of your project ##
|
||||
resources:
|
||||
- infrastructure_v1alpha1_proxmoxcluster.yaml
|
||||
- infrastructure_v1alpha1_proxmoxclustertemplate.yaml
|
||||
- infrastructure_v1alpha1_proxmoxmachine.yaml
|
||||
- infrastructure_v1alpha1_proxmoxmachinetemplate.yaml
|
||||
# +kubebuilder:scaffold:manifestskustomizesamples
|
||||
|
||||
@@ -19,16 +19,22 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
"sigs.k8s.io/cluster-api/util"
|
||||
clustererrors "sigs.k8s.io/cluster-api/errors"
|
||||
clusterutil "sigs.k8s.io/cluster-api/util"
|
||||
"sigs.k8s.io/cluster-api/util/annotations"
|
||||
"sigs.k8s.io/cluster-api/util/conditions"
|
||||
"sigs.k8s.io/cluster-api/util/patch"
|
||||
"sigs.k8s.io/cluster-api/util/predicates"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
@@ -57,16 +63,29 @@ type ProxmoxClusterReconciler struct {
|
||||
ProxmoxClient proxmox.Client
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters/finalizers,verbs=update
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *ProxmoxClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&infrav1alpha1.ProxmoxCluster{}).
|
||||
WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))).
|
||||
Watches(&clusterv1.Cluster{},
|
||||
handler.EnqueueRequestsFromMapFunc(clusterutil.ClusterToInfrastructureMapFunc(ctx, infrav1alpha1.GroupVersion.WithKind(infrav1alpha1.ProxmoxClusterKind), mgr.GetClient(), &infrav1alpha1.ProxmoxCluster{})),
|
||||
builder.WithPredicates(predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)))).
|
||||
WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;patch
|
||||
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxclusters/finalizers,verbs=update
|
||||
|
||||
//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=inclusterippools,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=globalinclusterippools,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch
|
||||
//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete
|
||||
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
|
||||
|
||||
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=inclusterippools,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=globalinclusterippools,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
@@ -85,7 +104,7 @@ func (r *ProxmoxClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
// Get owner cluster
|
||||
cluster, err := util.GetOwnerCluster(ctx, r.Client, proxmoxCluster.ObjectMeta)
|
||||
cluster, err := clusterutil.GetOwnerCluster(ctx, r.Client, proxmoxCluster.ObjectMeta)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
@@ -157,6 +176,10 @@ func (r *ProxmoxClusterReconciler) reconcileDelete(ctx context.Context, clusterS
|
||||
return ctrl.Result{RequeueAfter: infrav1alpha1.DefaultReconcilerRequeue}, nil
|
||||
}
|
||||
|
||||
if err := r.reconcileDeleteCredentialsSecret(ctx, clusterScope); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
clusterScope.Info("cluster deleted successfully")
|
||||
ctrlutil.RemoveFinalizer(clusterScope.ProxmoxCluster, infrav1alpha1.ClusterFinalizer)
|
||||
return ctrl.Result{}, nil
|
||||
@@ -177,6 +200,15 @@ func (r *ProxmoxClusterReconciler) reconcileNormal(ctx context.Context, clusterS
|
||||
return res, nil
|
||||
}
|
||||
|
||||
if err := r.reconcileNormalCredentialsSecret(ctx, clusterScope); err != nil {
|
||||
conditions.MarkFalse(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady, infrav1alpha1.ProxmoxUnreachableReason, clusterv1.ConditionSeverityError, err.Error())
|
||||
if apierrors.IsNotFound(err) {
|
||||
clusterScope.ProxmoxCluster.Status.FailureMessage = ptr.To("credentials secret not found")
|
||||
clusterScope.ProxmoxCluster.Status.FailureReason = ptr.To(clustererrors.InvalidConfigurationClusterError)
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
conditions.MarkTrue(clusterScope.ProxmoxCluster, infrav1alpha1.ProxmoxClusterReady)
|
||||
|
||||
clusterScope.ProxmoxCluster.Status.Ready = true
|
||||
@@ -233,13 +265,98 @@ func (r *ProxmoxClusterReconciler) listProxmoxMachinesForCluster(ctx context.Con
|
||||
return machineList.Items, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *ProxmoxClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&infrav1alpha1.ProxmoxCluster{}).
|
||||
WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))).
|
||||
Watches(&clusterv1.Cluster{},
|
||||
handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1alpha1.GroupVersion.WithKind(infrav1alpha1.ProxmoxClusterKind), mgr.GetClient(), &infrav1alpha1.ProxmoxCluster{})),
|
||||
builder.WithPredicates(predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx)))).
|
||||
Complete(r)
|
||||
func (r *ProxmoxClusterReconciler) reconcileNormalCredentialsSecret(ctx context.Context, clusterScope *scope.ClusterScope) error {
|
||||
proxmoxCluster := clusterScope.ProxmoxCluster
|
||||
if !hasCredentialsRef(proxmoxCluster) {
|
||||
return nil
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{}
|
||||
secretKey := client.ObjectKey{
|
||||
Namespace: getNamespaceFromProxmoxCluster(proxmoxCluster),
|
||||
Name: proxmoxCluster.Spec.CredentialsRef.Name,
|
||||
}
|
||||
err := r.Client.Get(ctx, secretKey, secret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helper, err := patch.NewHelper(secret, r.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure the ProxmoxCluster is an owner and that the APIVersion is up-to-date.
|
||||
secret.SetOwnerReferences(clusterutil.EnsureOwnerRef(secret.GetOwnerReferences(),
|
||||
metav1.OwnerReference{
|
||||
APIVersion: infrav1alpha1.GroupVersion.String(),
|
||||
Kind: "ProxmoxCluster",
|
||||
Name: proxmoxCluster.Name,
|
||||
UID: proxmoxCluster.UID,
|
||||
},
|
||||
))
|
||||
|
||||
// Ensure the finalizer is added.
|
||||
if !ctrlutil.ContainsFinalizer(secret, infrav1alpha1.SecretFinalizer) {
|
||||
ctrlutil.AddFinalizer(secret, infrav1alpha1.SecretFinalizer)
|
||||
}
|
||||
|
||||
return helper.Patch(ctx, secret)
|
||||
}
|
||||
|
||||
func (r *ProxmoxClusterReconciler) reconcileDeleteCredentialsSecret(ctx context.Context, clusterScope *scope.ClusterScope) error {
|
||||
proxmoxCluster := clusterScope.ProxmoxCluster
|
||||
if !hasCredentialsRef(proxmoxCluster) {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// Remove finalizer on Identity Secret
|
||||
secret := &corev1.Secret{}
|
||||
secretKey := client.ObjectKey{
|
||||
Namespace: getNamespaceFromProxmoxCluster(proxmoxCluster),
|
||||
Name: proxmoxCluster.Spec.CredentialsRef.Name,
|
||||
}
|
||||
if err := r.Client.Get(ctx, secretKey, secret); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
helper, err := patch.NewHelper(secret, r.Client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ownerRef := metav1.OwnerReference{
|
||||
APIVersion: infrav1alpha1.GroupVersion.String(),
|
||||
Kind: "ProxmoxCluster",
|
||||
Name: proxmoxCluster.Name,
|
||||
UID: proxmoxCluster.UID,
|
||||
}
|
||||
|
||||
if len(secret.GetOwnerReferences()) > 1 {
|
||||
// Remove the ProxmoxCluster from the OwnerRef.
|
||||
secret.SetOwnerReferences(clusterutil.RemoveOwnerRef(secret.GetOwnerReferences(), ownerRef))
|
||||
} else if clusterutil.HasOwnerRef(secret.GetOwnerReferences(), ownerRef) && ctrlutil.ContainsFinalizer(secret, infrav1alpha1.SecretFinalizer) {
|
||||
// There is only one OwnerRef, the current ProxmoxCluster. Remove the Finalizer (if present).
|
||||
logger.Info(fmt.Sprintf("Removing finalizer %s", infrav1alpha1.SecretFinalizer), "Secret", klog.KObj(secret))
|
||||
ctrlutil.RemoveFinalizer(secret, infrav1alpha1.SecretFinalizer)
|
||||
}
|
||||
|
||||
return helper.Patch(ctx, secret)
|
||||
}
|
||||
|
||||
func hasCredentialsRef(proxmoxCluster *infrav1alpha1.ProxmoxCluster) bool {
|
||||
return proxmoxCluster != nil && proxmoxCluster.Spec.CredentialsRef != nil
|
||||
}
|
||||
|
||||
func getNamespaceFromProxmoxCluster(proxmoxCluster *infrav1alpha1.ProxmoxCluster) string {
|
||||
namespace := proxmoxCluster.Spec.CredentialsRef.Namespace
|
||||
if len(namespace) == 0 {
|
||||
namespace = proxmoxCluster.GetNamespace()
|
||||
}
|
||||
return namespace
|
||||
}
|
||||
|
||||
@@ -21,9 +21,12 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1"
|
||||
"sigs.k8s.io/cluster-api/util"
|
||||
"sigs.k8s.io/cluster-api/util/conditions"
|
||||
"sigs.k8s.io/cluster-api/util/patch"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -35,6 +38,7 @@ import (
|
||||
ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2"
|
||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam"
|
||||
@@ -68,14 +72,14 @@ var _ = Describe("Controller Test", func() {
|
||||
},
|
||||
}
|
||||
|
||||
controllerutil.AddFinalizer(cl, testFinalizer)
|
||||
ctrlutil.AddFinalizer(cl, testFinalizer)
|
||||
g.Expect(k8sClient.Create(context.Background(), cl)).To(Succeed())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
var cl clusterv1.Cluster
|
||||
g.Expect(k8sClient.Get(testEnv.GetContext(), client.ObjectKey{Name: "test", Namespace: testNS}, &cl)).To(Succeed())
|
||||
controllerutil.RemoveFinalizer(&cl, testFinalizer)
|
||||
ctrlutil.RemoveFinalizer(&cl, testFinalizer)
|
||||
g.Expect(k8sClient.Update(testEnv.GetContext(), &cl)).To(Succeed())
|
||||
|
||||
g.Eventually(func(g Gomega) {
|
||||
@@ -201,6 +205,101 @@ var _ = Describe("Controller Test", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("External Credentials Tests", func() {
|
||||
Context("Reconcile an ProxmoxCluster", func() {
|
||||
It("create and destroy a cluster", func() {
|
||||
secret := createSecret()
|
||||
proxmoxCluster := createProxmoxCluster()
|
||||
setCredentialsRefOnProxmoxCluster(proxmoxCluster, secret)
|
||||
capiCluster := createOwnerCluster(proxmoxCluster)
|
||||
proxmoxCluster = refreshCluster(proxmoxCluster)
|
||||
setCapiClusterOwnerRefOnProxmoxCluster(proxmoxCluster, capiCluster)
|
||||
|
||||
assertProxmoxClusterHasFinalizer(proxmoxCluster, infrav1.ClusterFinalizer)
|
||||
assertSecretHasNumberOfOwnerRefs(secret, 1)
|
||||
assertSecretHasOwnerRef(secret, proxmoxCluster.Name)
|
||||
assertSecretHasFinalizer(secret, infrav1.SecretFinalizer)
|
||||
assertProxmoxClusterIsReady(proxmoxCluster)
|
||||
|
||||
deleteCapiCluster(capiCluster)
|
||||
deleteProxmoxCluster(proxmoxCluster)
|
||||
|
||||
assertSecretHasOwnerRef(secret, proxmoxCluster.Name)
|
||||
assertSecretHasFinalizer(secret, infrav1.SecretFinalizer)
|
||||
|
||||
cleanup(proxmoxCluster, capiCluster, secret)
|
||||
})
|
||||
|
||||
It("multiple clusters can set ownerRef on secret", func() {
|
||||
secret := createSecret()
|
||||
setRandomOwnerRefOnSecret(secret, "another-cluster")
|
||||
|
||||
// First cluster
|
||||
proxmoxCluster1 := createProxmoxCluster()
|
||||
setCredentialsRefOnProxmoxCluster(proxmoxCluster1, secret)
|
||||
capiCluster1 := createOwnerCluster(proxmoxCluster1)
|
||||
proxmoxCluster1 = refreshCluster(proxmoxCluster1)
|
||||
setCapiClusterOwnerRefOnProxmoxCluster(proxmoxCluster1, capiCluster1)
|
||||
assertProxmoxClusterIsReady(proxmoxCluster1)
|
||||
assertProxmoxClusterHasFinalizer(proxmoxCluster1, infrav1.ClusterFinalizer)
|
||||
|
||||
// Second cluster
|
||||
proxmoxCluster2 := createProxmoxCluster()
|
||||
setCredentialsRefOnProxmoxCluster(proxmoxCluster2, secret)
|
||||
capiCluster2 := createOwnerCluster(proxmoxCluster2)
|
||||
proxmoxCluster2 = refreshCluster(proxmoxCluster2)
|
||||
setCapiClusterOwnerRefOnProxmoxCluster(proxmoxCluster2, capiCluster2)
|
||||
assertProxmoxClusterIsReady(proxmoxCluster2)
|
||||
assertProxmoxClusterHasFinalizer(proxmoxCluster2, infrav1.ClusterFinalizer)
|
||||
|
||||
// Check owner references
|
||||
assertSecretHasNumberOfOwnerRefs(secret, 3)
|
||||
assertSecretHasOwnerRef(secret, proxmoxCluster1.Name)
|
||||
assertSecretHasOwnerRef(secret, proxmoxCluster2.Name)
|
||||
assertSecretHasOwnerRef(secret, "another-cluster")
|
||||
assertSecretHasFinalizer(secret, infrav1.SecretFinalizer)
|
||||
|
||||
// Delete second cluster
|
||||
deleteCapiCluster(capiCluster2)
|
||||
deleteProxmoxCluster(proxmoxCluster2)
|
||||
|
||||
// Check owner references
|
||||
assertSecretHasNumberOfOwnerRefs(secret, 2)
|
||||
assertSecretHasOwnerRef(secret, proxmoxCluster1.Name)
|
||||
assertSecretHasOwnerRef(secret, "another-cluster")
|
||||
assertSecretHasFinalizer(secret, infrav1.SecretFinalizer)
|
||||
|
||||
// Delete first cluster
|
||||
deleteCapiCluster(capiCluster1)
|
||||
deleteProxmoxCluster(proxmoxCluster1)
|
||||
|
||||
// Check owner references
|
||||
assertSecretHasNumberOfOwnerRefs(secret, 1)
|
||||
assertSecretHasOwnerRef(secret, "another-cluster")
|
||||
assertSecretHasFinalizer(secret, infrav1.SecretFinalizer)
|
||||
|
||||
cleanup(proxmoxCluster1, capiCluster1, proxmoxCluster2, capiCluster2, secret)
|
||||
})
|
||||
})
|
||||
|
||||
It("should remove ProxmoxCluster finalizer if the secret does not exist", func() {
|
||||
proxmoxCluster := createProxmoxCluster()
|
||||
setRandomCredentialsRefOnProxmoxCluster(proxmoxCluster)
|
||||
|
||||
capiCluster := createOwnerCluster(proxmoxCluster)
|
||||
proxmoxCluster = refreshCluster(proxmoxCluster)
|
||||
setCapiClusterOwnerRefOnProxmoxCluster(proxmoxCluster, capiCluster)
|
||||
|
||||
assertProxmoxClusterIsNotReady(proxmoxCluster)
|
||||
assertProxmoxClusterHasFinalizer(proxmoxCluster, infrav1.ClusterFinalizer)
|
||||
|
||||
By("deleting the proxmoxcluster while the secret is gone")
|
||||
deleteCapiCluster(capiCluster)
|
||||
deleteProxmoxCluster(proxmoxCluster)
|
||||
assertProxmoxClusterIsDeleted(proxmoxCluster)
|
||||
})
|
||||
})
|
||||
|
||||
func cleanupResources(ctx context.Context, g Gomega, cl infrav1.ProxmoxCluster) {
|
||||
g.Expect(k8sClient.Delete(context.Background(), &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: testNS}})).To(Succeed())
|
||||
g.Expect(k8sClient.Delete(ctx, &cl)).To(Succeed())
|
||||
@@ -287,3 +386,238 @@ func dummyIPAddress(client client.Client, owner client.Object, poolName string)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createSecret() *corev1.Secret {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "secret-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"url": []byte("url"),
|
||||
"token": []byte("token"),
|
||||
"secret": []byte("secret"),
|
||||
},
|
||||
}
|
||||
Expect(testEnv.Create(testEnv.GetContext(), secret)).To(Succeed())
|
||||
return secret
|
||||
}
|
||||
|
||||
func createProxmoxCluster() *infrav1.ProxmoxCluster {
|
||||
proxmoxCluster := &infrav1.ProxmoxCluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "proxmox-test-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: infrav1.ProxmoxClusterSpec{
|
||||
IPv4Config: &infrav1.IPConfigSpec{
|
||||
Addresses: []string{
|
||||
"10.10.10.2-10.10.10.10",
|
||||
"10.10.10.100-10.10.10.125",
|
||||
"10.10.10.192/64",
|
||||
},
|
||||
Gateway: "10.10.10.1",
|
||||
Prefix: 24,
|
||||
},
|
||||
DNSServers: []string{"8.8.8.8", "8.8.4.4"},
|
||||
},
|
||||
}
|
||||
Expect(testEnv.Create(testEnv.GetContext(), proxmoxCluster)).To(Succeed())
|
||||
return proxmoxCluster
|
||||
}
|
||||
|
||||
func setCredentialsRefOnProxmoxCluster(proxmoxCluster *infrav1.ProxmoxCluster, secret *corev1.Secret) {
|
||||
Eventually(func() error {
|
||||
ph, err := patch.NewHelper(proxmoxCluster, testEnv)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
proxmoxCluster.Spec.CredentialsRef = &corev1.SecretReference{
|
||||
Name: secret.Name,
|
||||
Namespace: secret.Namespace,
|
||||
}
|
||||
return ph.Patch(testEnv.GetContext(), proxmoxCluster, patch.WithStatusObservedGeneration{})
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
}
|
||||
|
||||
func setRandomCredentialsRefOnProxmoxCluster(proxmoxCluster *infrav1.ProxmoxCluster) {
|
||||
Eventually(func() error {
|
||||
ph, err := patch.NewHelper(proxmoxCluster, testEnv)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
proxmoxCluster.Spec.CredentialsRef = &corev1.SecretReference{
|
||||
Name: util.RandomString(6),
|
||||
Namespace: util.RandomString(6),
|
||||
}
|
||||
return ph.Patch(testEnv.GetContext(), proxmoxCluster, patch.WithStatusObservedGeneration{})
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
}
|
||||
|
||||
func createOwnerCluster(proxmoxCluster *infrav1.ProxmoxCluster) *clusterv1.Cluster {
|
||||
capiCluster := &clusterv1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: clusterv1.ClusterSpec{
|
||||
InfrastructureRef: &corev1.ObjectReference{
|
||||
APIVersion: infrav1.GroupVersion.String(),
|
||||
Kind: "ProxmoxCluster",
|
||||
Name: proxmoxCluster.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
ctrlutil.AddFinalizer(capiCluster, "cluster.cluster.x-k8s.io")
|
||||
Expect(testEnv.Create(testEnv.GetContext(), capiCluster)).To(Succeed())
|
||||
return capiCluster
|
||||
}
|
||||
|
||||
func setCapiClusterOwnerRefOnProxmoxCluster(proxmoxCluster *infrav1.ProxmoxCluster, capiCluster *clusterv1.Cluster) {
|
||||
Eventually(func() error {
|
||||
ph, err := patch.NewHelper(proxmoxCluster, testEnv)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
proxmoxCluster.OwnerReferences = append(proxmoxCluster.OwnerReferences, metav1.OwnerReference{
|
||||
Kind: "Cluster",
|
||||
APIVersion: clusterv1.GroupVersion.String(),
|
||||
Name: capiCluster.Name,
|
||||
UID: (types.UID)(util.RandomString(6)),
|
||||
})
|
||||
return ph.Patch(testEnv.GetContext(), proxmoxCluster, patch.WithStatusObservedGeneration{})
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
}
|
||||
|
||||
func setRandomOwnerRefOnSecret(secret *corev1.Secret, ownerRef string) {
|
||||
Eventually(func() error {
|
||||
ph, err := patch.NewHelper(secret, testEnv)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
secret.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: infrav1.GroupVersion.String(),
|
||||
Kind: "ProxmoxCluster",
|
||||
Name: ownerRef,
|
||||
UID: (types.UID)(util.RandomString(6)),
|
||||
},
|
||||
}
|
||||
return ph.Patch(testEnv.GetContext(), secret, patch.WithStatusObservedGeneration{})
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeNil())
|
||||
}
|
||||
|
||||
func refreshCluster(proxmoxCluster *infrav1.ProxmoxCluster) *infrav1.ProxmoxCluster {
|
||||
key := client.ObjectKey{Namespace: proxmoxCluster.Namespace, Name: proxmoxCluster.Name}
|
||||
Expect(testEnv.Get(testEnv.GetContext(), key, proxmoxCluster)).To(Succeed())
|
||||
return proxmoxCluster
|
||||
}
|
||||
|
||||
func deleteProxmoxCluster(proxmoxCluster *infrav1.ProxmoxCluster) {
|
||||
Eventually(func() bool {
|
||||
err := testEnv.Delete(testEnv.GetContext(), proxmoxCluster)
|
||||
return err == nil
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func deleteCapiCluster(cluster *clusterv1.Cluster) {
|
||||
Eventually(func() bool {
|
||||
err := testEnv.Delete(testEnv.GetContext(), cluster)
|
||||
return err == nil
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertProxmoxClusterHasFinalizer(proxmoxCluster *infrav1.ProxmoxCluster, finalizer string) {
|
||||
key := client.ObjectKey{Namespace: proxmoxCluster.Namespace, Name: proxmoxCluster.Name}
|
||||
Eventually(func() bool {
|
||||
if err := testEnv.Get(testEnv.GetContext(), key, proxmoxCluster); err != nil {
|
||||
return false
|
||||
}
|
||||
return ctrlutil.ContainsFinalizer(proxmoxCluster, finalizer)
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertSecretHasFinalizer(secret *corev1.Secret, finalizer string) {
|
||||
key := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name}
|
||||
Eventually(func() bool {
|
||||
if err := testEnv.Get(testEnv.GetContext(), key, secret); err != nil {
|
||||
return false
|
||||
}
|
||||
return ctrlutil.ContainsFinalizer(secret, finalizer)
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertSecretHasOwnerRef(secret *corev1.Secret, ownerRef string) {
|
||||
key := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name}
|
||||
Eventually(func() bool {
|
||||
if err := testEnv.Get(testEnv.GetContext(), key, secret); err != nil {
|
||||
return false
|
||||
}
|
||||
for _, ref := range secret.OwnerReferences {
|
||||
if ref.Name == ownerRef {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertSecretHasNumberOfOwnerRefs(secret *corev1.Secret, nrOfOwnerRefs int) {
|
||||
key := client.ObjectKey{Namespace: secret.Namespace, Name: secret.Name}
|
||||
Eventually(func() bool {
|
||||
if err := testEnv.Get(testEnv.GetContext(), key, secret); err != nil {
|
||||
return false
|
||||
}
|
||||
return len(secret.OwnerReferences) == nrOfOwnerRefs
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertProxmoxClusterIsReady(proxmoxCluster *infrav1.ProxmoxCluster) {
|
||||
key := client.ObjectKey{Namespace: proxmoxCluster.Namespace, Name: proxmoxCluster.Name}
|
||||
Eventually(func() bool {
|
||||
if err := testEnv.Get(testEnv.GetContext(), key, proxmoxCluster); err != nil {
|
||||
return false
|
||||
}
|
||||
return conditions.IsTrue(proxmoxCluster, infrav1.ProxmoxClusterReady)
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertProxmoxClusterIsNotReady(proxmoxCluster *infrav1.ProxmoxCluster) {
|
||||
key := client.ObjectKey{Namespace: proxmoxCluster.Namespace, Name: proxmoxCluster.Name}
|
||||
Eventually(func() bool {
|
||||
if err := testEnv.Get(testEnv.GetContext(), key, proxmoxCluster); err != nil {
|
||||
return false
|
||||
}
|
||||
return conditions.IsFalse(proxmoxCluster, infrav1.ProxmoxClusterReady)
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func assertProxmoxClusterIsDeleted(proxmoxCluster *infrav1.ProxmoxCluster) {
|
||||
key := client.ObjectKey{Namespace: proxmoxCluster.Namespace, Name: proxmoxCluster.Name}
|
||||
Eventually(func() bool {
|
||||
err := testEnv.Get(testEnv.GetContext(), key, proxmoxCluster)
|
||||
return apierrors.IsNotFound(err)
|
||||
}).WithTimeout(time.Second * 10).
|
||||
WithPolling(time.Second).
|
||||
Should(BeTrue())
|
||||
}
|
||||
|
||||
func cleanup(objs ...client.Object) {
|
||||
Expect(testEnv.Cleanup(testEnv.GetContext(), objs...)).To(Succeed())
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox"
|
||||
"github.com/pkg/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -40,7 +41,6 @@ import (
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/taskservice"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/internal/service/vmservice"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope"
|
||||
)
|
||||
|
||||
@@ -63,15 +63,15 @@ func (r *ProxmoxMachineReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines/status,verbs=get;update;patch
|
||||
//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=proxmoxmachines/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
|
||||
|
||||
//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch
|
||||
//+kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete
|
||||
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims,verbs=get;list;watch;create;update;delete
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest"
|
||||
"github.com/ionos-cloud/cluster-api-provider-proxmox/test/helpers"
|
||||
//+kubebuilder:scaffold:imports
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
@@ -59,7 +59,7 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
cachingClient, err := client.New(testEnv.GetConfig(), client.Options{Scheme: testEnv.Scheme()})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -67,14 +67,21 @@ var _ = BeforeSuite(func() {
|
||||
|
||||
k8sClient = cachingClient
|
||||
|
||||
reconciler := ProxmoxClusterReconciler{
|
||||
proxmoxClusterReconciler := ProxmoxClusterReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: testEnv.GetScheme(),
|
||||
Recorder: &record.FakeRecorder{},
|
||||
ProxmoxClient: testEnv.ProxmoxClient,
|
||||
}
|
||||
Expect(proxmoxClusterReconciler.SetupWithManager(testEnv.GetContext(), testEnv.Manager)).To(Succeed())
|
||||
|
||||
Expect(reconciler.SetupWithManager(testEnv.GetContext(), testEnv.Manager)).To(Succeed())
|
||||
proxmoxMachineReconciler := ProxmoxMachineReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: testEnv.GetScheme(),
|
||||
Recorder: &record.FakeRecorder{},
|
||||
ProxmoxClient: testEnv.ProxmoxClient,
|
||||
}
|
||||
Expect(proxmoxMachineReconciler.SetupWithManager(testEnv.Manager)).To(Succeed())
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
@@ -104,17 +104,18 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
|
||||
|
||||
clusterScope.patchHelper = helper
|
||||
|
||||
if clusterScope.ProxmoxClient == nil && clusterScope.ProxmoxCluster.Spec.CredentialsRef == nil {
|
||||
// Fail the cluster if no credentials found.
|
||||
// set failure reason
|
||||
clusterScope.ProxmoxCluster.Status.FailureMessage = ptr.To("No credentials found, ProxmoxCluster missing credentialsRef")
|
||||
clusterScope.ProxmoxCluster.Status.FailureReason = ptr.To(clustererrors.InvalidConfigurationClusterError)
|
||||
if clusterScope.ProxmoxClient == nil {
|
||||
if clusterScope.ProxmoxCluster.Spec.CredentialsRef == nil {
|
||||
// Fail the cluster if no credentials found.
|
||||
// set failure reason
|
||||
clusterScope.ProxmoxCluster.Status.FailureMessage = ptr.To("No credentials found, ProxmoxCluster missing credentialsRef")
|
||||
clusterScope.ProxmoxCluster.Status.FailureReason = ptr.To(clustererrors.InvalidConfigurationClusterError)
|
||||
|
||||
if err = clusterScope.Close(); err != nil {
|
||||
return nil, err
|
||||
if err = clusterScope.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New("No credentials found, ProxmoxCluster missing credentialsRef")
|
||||
}
|
||||
return nil, errors.New("No credentials found, ProxmoxCluster missing credentialsRef")
|
||||
} else if clusterScope.ProxmoxCluster.Spec.CredentialsRef != nil {
|
||||
// using proxmoxcluster.spec.credentialsRef
|
||||
pmoxClient, err := clusterScope.setupProxmoxClient(context.TODO())
|
||||
if err != nil {
|
||||
@@ -122,7 +123,6 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
|
||||
}
|
||||
clusterScope.ProxmoxClient = pmoxClient
|
||||
}
|
||||
|
||||
return clusterScope, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,11 +27,13 @@ import (
|
||||
|
||||
"golang.org/x/tools/go/packages"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ipamicv1 "sigs.k8s.io/cluster-api-ipam-provider-in-cluster/api/v1alpha2"
|
||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
|
||||
ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1"
|
||||
@@ -82,7 +84,11 @@ type TestEnvironment struct {
|
||||
// NewTestEnvironment creates a new testing environment with a
|
||||
// pre-configured manager, that can be used to register reconcilers.
|
||||
func NewTestEnvironment(setupWebhook bool, pmClient proxmox.Client) *TestEnvironment {
|
||||
_, filename, _, _ := goruntime.Caller(0) //nolint:dogsled
|
||||
_, filename, _, ok := goruntime.Caller(0)
|
||||
if !ok {
|
||||
klog.Fatalf("Failed to get information for current file from runtime")
|
||||
}
|
||||
|
||||
root := filepath.Dir(filename)
|
||||
|
||||
crdsPaths := []string{
|
||||
@@ -156,6 +162,22 @@ func (t *TestEnvironment) StartManager(ctx context.Context) error {
|
||||
return t.Manager.Start(t.ctx)
|
||||
}
|
||||
|
||||
// Cleanup removes objects from the TestEnvironment.
|
||||
func (t *TestEnvironment) Cleanup(ctx context.Context, objs ...client.Object) error {
|
||||
errs := make([]error, 0, len(objs))
|
||||
for _, o := range objs {
|
||||
err := t.Client.Delete(ctx, o)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// If the object is not found, it must've been garbage collected
|
||||
// already. For example, if we delete namespace first and then
|
||||
// objects within it.
|
||||
continue
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return kerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// Stop shuts down the test environment and stops the manager.
|
||||
func (t *TestEnvironment) Stop() error {
|
||||
t.cancel()
|
||||
|
||||
Reference in New Issue
Block a user