refactor: updating local tcp instance to avoid 2nd retrieval

This commit is contained in:
Dario Tranchitella
2023-02-05 16:47:43 +01:00
parent e23ae3c7f3
commit 44d1f3fa7f
6 changed files with 16 additions and 15 deletions

View File

@@ -60,7 +60,7 @@ func (c *CoreDNS) Reconcile(ctx context.Context, request reconcile.Request) (rec
return reconcile.Result{}, nil
}
if err = utils.UpdateStatus(ctx, c.AdminClient, c.GetTenantControlPlaneFunc, resource); err != nil {
if err = utils.UpdateStatus(ctx, c.AdminClient, tcp, resource); err != nil {
c.logger.Error(err, "update status failed", "resource", resource.GetName())
return reconcile.Result{}, err

View File

@@ -60,7 +60,7 @@ func (k *KonnectivityAgent) Reconcile(ctx context.Context, _ reconcile.Request)
continue
}
if err = utils.UpdateStatus(ctx, k.AdminClient, k.GetTenantControlPlaneFunc, resource); err != nil {
if err = utils.UpdateStatus(ctx, k.AdminClient, tcp, resource); err != nil {
k.logger.Error(err, "update status failed", "resource", resource.GetName())
return reconcile.Result{}, err

View File

@@ -50,7 +50,7 @@ func (k *KubeadmPhase) Reconcile(ctx context.Context, _ reconcile.Request) (reco
return reconcile.Result{}, nil
}
if err = utils.UpdateStatus(ctx, k.Phase.GetClient(), k.GetTenantControlPlaneFunc, k.Phase); err != nil {
if err = utils.UpdateStatus(ctx, k.Phase.GetClient(), tcp, k.Phase); err != nil {
k.logger.Error(err, "update status failed")
return reconcile.Result{}, err

View File

@@ -60,7 +60,7 @@ func (k *KubeProxy) Reconcile(ctx context.Context, _ reconcile.Request) (reconci
return reconcile.Result{}, nil
}
if err = utils.UpdateStatus(ctx, k.AdminClient, k.GetTenantControlPlaneFunc, resource); err != nil {
if err = utils.UpdateStatus(ctx, k.AdminClient, tcp, resource); err != nil {
k.logger.Error(err, "update status failed")
return reconcile.Result{}, err

View File

@@ -156,15 +156,13 @@ func (r *TenantControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R
continue
}
if err = utils.UpdateStatus(ctx, r.Client, r.getTenantControlPlane(ctx, req.NamespacedName), resource); err != nil {
if err = utils.UpdateStatus(ctx, r.Client, tenantControlPlane, resource); err != nil {
log.Error(err, "update of the resource failed", "resource", resource.GetName())
return ctrl.Result{}, err
}
log.Info(fmt.Sprintf("%s has been configured", resource.GetName()))
return ctrl.Result{}, nil
}
log.Info(fmt.Sprintf("%s has been reconciled", tenantControlPlane.GetName()))

View File

@@ -7,24 +7,27 @@ import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
"github.com/clastix/kamaji/internal/resources"
)
func UpdateStatus(ctx context.Context, client client.Client, tcpRetrieval TenantControlPlaneRetrievalFn, resource resources.Resource) error {
updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
tenantControlPlane, err := tcpRetrieval()
if err != nil {
return err
}
func UpdateStatus(ctx context.Context, client client.Client, tcp *kamajiv1alpha1.TenantControlPlane, resource resources.Resource) error {
updateErr := retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
defer func() {
if err != nil {
_ = client.Get(ctx, types.NamespacedName{Name: tcp.Name, Namespace: tcp.Namespace}, tcp)
}
}()
if err = resource.UpdateTenantControlPlaneStatus(ctx, tenantControlPlane); err != nil {
if err = resource.UpdateTenantControlPlaneStatus(ctx, tcp); err != nil {
return fmt.Errorf("error applying TenantcontrolPlane status: %w", err)
}
if err = client.Status().Update(ctx, tenantControlPlane); err != nil {
if err = client.Status().Update(ctx, tcp); err != nil {
return fmt.Errorf("error updating tenantControlPlane status: %w", err)
}