mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Wire contexts to Core controllers
This commit is contained in:
		@@ -20,6 +20,7 @@ limitations under the License.
 | 
			
		||||
package main
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net"
 | 
			
		||||
@@ -57,8 +58,8 @@ func (nodeIpamController *nodeIPAMController) StartNodeIpamControllerWrapper(ini
 | 
			
		||||
	}
 | 
			
		||||
	nodeIpamController.nodeIPAMControllerOptions.ApplyTo(&nodeIpamController.nodeIPAMControllerConfiguration)
 | 
			
		||||
 | 
			
		||||
	return func(ctx genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startNodeIpamController(initContext, completedConfig, nodeIpamController.nodeIPAMControllerConfiguration, ctx, cloud)
 | 
			
		||||
	return func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startNodeIpamController(initContext, completedConfig, nodeIpamController.nodeIPAMControllerConfiguration, controllerContext, cloud)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -92,7 +92,7 @@ func startServiceController(ctx context.Context, controllerContext ControllerCon
 | 
			
		||||
		klog.Errorf("Failed to start service controller: %v", err)
 | 
			
		||||
		return nil, false, nil
 | 
			
		||||
	}
 | 
			
		||||
	go serviceController.Run(ctx.Done(), int(controllerContext.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
 | 
			
		||||
	go serviceController.Run(ctx, int(controllerContext.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -174,6 +174,7 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo
 | 
			
		||||
 | 
			
		||||
func startNodeLifecycleController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
	lifecycleController, err := lifecyclecontroller.NewNodeLifecycleController(
 | 
			
		||||
		ctx,
 | 
			
		||||
		controllerContext.InformerFactory.Coordination().V1().Leases(),
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().Pods(),
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().Nodes(),
 | 
			
		||||
@@ -193,7 +194,7 @@ func startNodeLifecycleController(ctx context.Context, controllerContext Control
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, true, err
 | 
			
		||||
	}
 | 
			
		||||
	go lifecycleController.Run(ctx.Done())
 | 
			
		||||
	go lifecycleController.Run(ctx)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -212,7 +213,7 @@ func startCloudNodeLifecycleController(ctx context.Context, controllerContext Co
 | 
			
		||||
		return nil, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go cloudNodeLifecycleController.Run(ctx.Done())
 | 
			
		||||
	go cloudNodeLifecycleController.Run(ctx)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -252,7 +253,7 @@ func startRouteController(ctx context.Context, controllerContext ControllerConte
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().Nodes(),
 | 
			
		||||
		controllerContext.ComponentConfig.KubeCloudShared.ClusterName,
 | 
			
		||||
		clusterCIDRs)
 | 
			
		||||
	go routeController.Run(ctx.Done(), controllerContext.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
 | 
			
		||||
	go routeController.Run(ctx, controllerContext.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -285,7 +286,7 @@ func startPersistentVolumeBinderController(ctx context.Context, controllerContex
 | 
			
		||||
	if volumeControllerErr != nil {
 | 
			
		||||
		return nil, true, fmt.Errorf("failed to construct persistentvolume controller: %v", volumeControllerErr)
 | 
			
		||||
	}
 | 
			
		||||
	go volumeController.Run(ctx.Done())
 | 
			
		||||
	go volumeController.Run(ctx)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -361,7 +362,7 @@ func startVolumeExpandController(ctx context.Context, controllerContext Controll
 | 
			
		||||
		if expandControllerErr != nil {
 | 
			
		||||
			return nil, true, fmt.Errorf("failed to start volume expand controller: %v", expandControllerErr)
 | 
			
		||||
		}
 | 
			
		||||
		go expandController.Run(ctx.Done())
 | 
			
		||||
		go expandController.Run(ctx)
 | 
			
		||||
		return nil, true, nil
 | 
			
		||||
	}
 | 
			
		||||
	return nil, false, nil
 | 
			
		||||
@@ -375,7 +376,7 @@ func startEphemeralVolumeController(ctx context.Context, controllerContext Contr
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, true, fmt.Errorf("failed to start ephemeral volume controller: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go ephemeralController.Run(int(controllerContext.ComponentConfig.EphemeralVolumeController.ConcurrentEphemeralVolumeSyncs), ctx.Done())
 | 
			
		||||
	go ephemeralController.Run(ctx, int(controllerContext.ComponentConfig.EphemeralVolumeController.ConcurrentEphemeralVolumeSyncs))
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -386,7 +387,7 @@ func startEndpointController(ctx context.Context, controllerCtx ControllerContex
 | 
			
		||||
		controllerCtx.InformerFactory.Core().V1().Endpoints(),
 | 
			
		||||
		controllerCtx.ClientBuilder.ClientOrDie("endpoint-controller"),
 | 
			
		||||
		controllerCtx.ComponentConfig.EndpointController.EndpointUpdatesBatchPeriod.Duration,
 | 
			
		||||
	).Run(int(controllerCtx.ComponentConfig.EndpointController.ConcurrentEndpointSyncs), ctx.Done())
 | 
			
		||||
	).Run(ctx, int(controllerCtx.ComponentConfig.EndpointController.ConcurrentEndpointSyncs))
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -402,11 +403,12 @@ func startReplicationController(ctx context.Context, controllerContext Controlle
 | 
			
		||||
 | 
			
		||||
func startPodGCController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
	go podgc.NewPodGC(
 | 
			
		||||
		ctx,
 | 
			
		||||
		controllerContext.ClientBuilder.ClientOrDie("pod-garbage-collector"),
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().Pods(),
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().Nodes(),
 | 
			
		||||
		int(controllerContext.ComponentConfig.PodGCController.TerminatedPodGCThreshold),
 | 
			
		||||
	).Run(ctx.Done())
 | 
			
		||||
	).Run(ctx)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -438,7 +440,7 @@ func startResourceQuotaController(ctx context.Context, controllerContext Control
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, false, err
 | 
			
		||||
	}
 | 
			
		||||
	go resourceQuotaController.Run(int(controllerContext.ComponentConfig.ResourceQuotaController.ConcurrentResourceQuotaSyncs), ctx.Done())
 | 
			
		||||
	go resourceQuotaController.Run(ctx, int(controllerContext.ComponentConfig.ResourceQuotaController.ConcurrentResourceQuotaSyncs))
 | 
			
		||||
 | 
			
		||||
	// Periodically the quota controller to detect new resource types
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Done())
 | 
			
		||||
@@ -489,7 +491,7 @@ func startServiceAccountController(ctx context.Context, controllerContext Contro
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, true, fmt.Errorf("error creating ServiceAccount controller: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go sac.Run(1, ctx.Done())
 | 
			
		||||
	go sac.Run(ctx, 1)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -497,7 +499,7 @@ func startTTLController(ctx context.Context, controllerContext ControllerContext
 | 
			
		||||
	go ttlcontroller.NewTTLController(
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().Nodes(),
 | 
			
		||||
		controllerContext.ClientBuilder.ClientOrDie("ttl-controller"),
 | 
			
		||||
	).Run(5, ctx.Done())
 | 
			
		||||
	).Run(ctx, 5)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -536,7 +538,7 @@ func startGarbageCollectorController(ctx context.Context, controllerContext Cont
 | 
			
		||||
 | 
			
		||||
	// Start the garbage collector.
 | 
			
		||||
	workers := int(controllerContext.ComponentConfig.GarbageCollectorController.ConcurrentGCSyncs)
 | 
			
		||||
	go garbageCollector.Run(workers, ctx.Done())
 | 
			
		||||
	go garbageCollector.Run(ctx, workers)
 | 
			
		||||
 | 
			
		||||
	// Periodically refresh the RESTMapper with new discovery information and sync
 | 
			
		||||
	// the garbage collector.
 | 
			
		||||
@@ -555,7 +557,7 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go pvcProtectionController.Run(1, ctx.Done())
 | 
			
		||||
	go pvcProtectionController.Run(ctx, 1)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -564,7 +566,7 @@ func startPVProtectionController(ctx context.Context, controllerContext Controll
 | 
			
		||||
		controllerContext.InformerFactory.Core().V1().PersistentVolumes(),
 | 
			
		||||
		controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"),
 | 
			
		||||
		utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
 | 
			
		||||
	).Run(1, ctx.Done())
 | 
			
		||||
	).Run(ctx, 1)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -572,7 +574,7 @@ func startTTLAfterFinishedController(ctx context.Context, controllerContext Cont
 | 
			
		||||
	go ttlafterfinished.New(
 | 
			
		||||
		controllerContext.InformerFactory.Batch().V1().Jobs(),
 | 
			
		||||
		controllerContext.ClientBuilder.ClientOrDie("ttl-after-finished-controller"),
 | 
			
		||||
	).Run(int(controllerContext.ComponentConfig.TTLAfterFinishedController.ConcurrentTTLSyncs), ctx.Done())
 | 
			
		||||
	).Run(ctx, int(controllerContext.ComponentConfig.TTLAfterFinishedController.ConcurrentTTLSyncs))
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -674,6 +676,6 @@ func startStorageVersionGCController(ctx context.Context, controllerContext Cont
 | 
			
		||||
		controllerContext.ClientBuilder.ClientOrDie("storage-version-garbage-collector"),
 | 
			
		||||
		controllerContext.InformerFactory.Coordination().V1().Leases(),
 | 
			
		||||
		controllerContext.InformerFactory.Internal().V1alpha1().StorageVersions(),
 | 
			
		||||
	).Run(ctx.Done())
 | 
			
		||||
	).Run(ctx)
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1029,7 +1029,7 @@ func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
 | 
			
		||||
 | 
			
		||||
// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
 | 
			
		||||
// to update nodes; otherwise, no API calls. Return error if any.
 | 
			
		||||
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
 | 
			
		||||
func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
 | 
			
		||||
	if len(taints) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -1040,10 +1040,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
 | 
			
		||||
		// First we try getting node from the API server cache, as it's cheaper. If it fails
 | 
			
		||||
		// we get it from etcd to be sure to have fresh data.
 | 
			
		||||
		if firstTry {
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
 | 
			
		||||
			firstTry = false
 | 
			
		||||
		} else {
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
 | 
			
		||||
		}
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
@@ -1064,7 +1064,7 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
 | 
			
		||||
		if !updated {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return PatchNodeTaints(c, nodeName, oldNode, newNode)
 | 
			
		||||
		return PatchNodeTaints(ctx, c, nodeName, oldNode, newNode)
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1072,7 +1072,7 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
 | 
			
		||||
// won't fail if target taint doesn't exist or has been removed.
 | 
			
		||||
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
 | 
			
		||||
// any API calls.
 | 
			
		||||
func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
 | 
			
		||||
func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
 | 
			
		||||
	if len(taints) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -1097,10 +1097,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
 | 
			
		||||
		// First we try getting node from the API server cache, as it's cheaper. If it fails
 | 
			
		||||
		// we get it from etcd to be sure to have fresh data.
 | 
			
		||||
		if firstTry {
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
 | 
			
		||||
			firstTry = false
 | 
			
		||||
		} else {
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
 | 
			
		||||
			oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
 | 
			
		||||
		}
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
@@ -1121,12 +1121,12 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
 | 
			
		||||
		if !updated {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return PatchNodeTaints(c, nodeName, oldNode, newNode)
 | 
			
		||||
		return PatchNodeTaints(ctx, c, nodeName, oldNode, newNode)
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PatchNodeTaints patches node's taints.
 | 
			
		||||
func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
 | 
			
		||||
func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
 | 
			
		||||
	oldData, err := json.Marshal(oldNode)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
 | 
			
		||||
@@ -1145,7 +1145,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
 | 
			
		||||
		return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
 | 
			
		||||
	_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -815,7 +815,7 @@ func TestRemoveTaintOffNode(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		node, _ := test.nodeHandler.Get(context.TODO(), test.nodeName, metav1.GetOptions{})
 | 
			
		||||
		err := RemoveTaintOffNode(test.nodeHandler, test.nodeName, node, test.taintsToRemove...)
 | 
			
		||||
		err := RemoveTaintOffNode(context.TODO(), test.nodeHandler, test.nodeName, node, test.taintsToRemove...)
 | 
			
		||||
		assert.NoError(t, err, "%s: RemoveTaintOffNode() error = %v", test.name, err)
 | 
			
		||||
 | 
			
		||||
		node, _ = test.nodeHandler.Get(context.TODO(), test.nodeName, metav1.GetOptions{})
 | 
			
		||||
@@ -990,7 +990,7 @@ func TestAddOrUpdateTaintOnNode(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		err := AddOrUpdateTaintOnNode(test.nodeHandler, test.nodeName, test.taintsToAdd...)
 | 
			
		||||
		err := AddOrUpdateTaintOnNode(context.TODO(), test.nodeHandler, test.nodeName, test.taintsToAdd...)
 | 
			
		||||
		assert.NoError(t, err, "%s: AddOrUpdateTaintOnNode() error = %v", test.name, err)
 | 
			
		||||
 | 
			
		||||
		node, _ := test.nodeHandler.Get(context.TODO(), test.nodeName, metav1.GetOptions{})
 | 
			
		||||
 
 | 
			
		||||
@@ -186,19 +186,19 @@ type Controller struct {
 | 
			
		||||
 | 
			
		||||
// Run will not return until stopCh is closed. workers determines how many
 | 
			
		||||
// endpoints will be handled in parallel.
 | 
			
		||||
func (e *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (e *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer e.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting endpoint controller")
 | 
			
		||||
	defer klog.Infof("Shutting down endpoint controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("endpoint", stopCh, e.podsSynced, e.servicesSynced, e.endpointsSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("endpoint", ctx.Done(), e.podsSynced, e.servicesSynced, e.endpointsSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(e.worker, e.workerLoopPeriod, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, e.worker, e.workerLoopPeriod)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go func() {
 | 
			
		||||
@@ -206,7 +206,7 @@ func (e *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
		e.checkLeftoverEndpoints()
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// When a pod is added, figure out what services it will be a member of and
 | 
			
		||||
@@ -335,19 +335,19 @@ func (e *Controller) onEndpointsDelete(obj interface{}) {
 | 
			
		||||
// marks them done. You may run as many of these in parallel as you wish; the
 | 
			
		||||
// workqueue guarantees that they will not end up processing the same service
 | 
			
		||||
// at the same time.
 | 
			
		||||
func (e *Controller) worker() {
 | 
			
		||||
	for e.processNextWorkItem() {
 | 
			
		||||
func (e *Controller) worker(ctx context.Context) {
 | 
			
		||||
	for e.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *Controller) processNextWorkItem() bool {
 | 
			
		||||
func (e *Controller) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	eKey, quit := e.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer e.queue.Done(eKey)
 | 
			
		||||
 | 
			
		||||
	err := e.syncService(eKey.(string))
 | 
			
		||||
	err := e.syncService(ctx, eKey.(string))
 | 
			
		||||
	e.handleErr(err, eKey)
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
@@ -375,7 +375,7 @@ func (e *Controller) handleErr(err error, key interface{}) {
 | 
			
		||||
	utilruntime.HandleError(err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (e *Controller) syncService(key string) error {
 | 
			
		||||
func (e *Controller) syncService(ctx context.Context, key string) error {
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
		klog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
 | 
			
		||||
@@ -396,7 +396,7 @@ func (e *Controller) syncService(key string) error {
 | 
			
		||||
		// service is deleted. However, if we're down at the time when
 | 
			
		||||
		// the service is deleted, we will miss that deletion, so this
 | 
			
		||||
		// doesn't completely solve the problem. See #6877.
 | 
			
		||||
		err = e.client.CoreV1().Endpoints(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
 | 
			
		||||
		err = e.client.CoreV1().Endpoints(namespace).Delete(ctx, name, metav1.DeleteOptions{})
 | 
			
		||||
		if err != nil && !errors.IsNotFound(err) {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
@@ -553,10 +553,10 @@ func (e *Controller) syncService(key string) error {
 | 
			
		||||
	klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
 | 
			
		||||
	if createEndpoints {
 | 
			
		||||
		// No previous endpoints, create them
 | 
			
		||||
		_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(context.TODO(), newEndpoints, metav1.CreateOptions{})
 | 
			
		||||
		_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(ctx, newEndpoints, metav1.CreateOptions{})
 | 
			
		||||
	} else {
 | 
			
		||||
		// Pre-existing
 | 
			
		||||
		_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(context.TODO(), newEndpoints, metav1.UpdateOptions{})
 | 
			
		||||
		_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(ctx, newEndpoints, metav1.UpdateOptions{})
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if createEndpoints && errors.IsForbidden(err) {
 | 
			
		||||
 
 | 
			
		||||
@@ -267,7 +267,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
 | 
			
		||||
		Spec:       v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -291,7 +291,7 @@ func TestSyncEndpointsExistingNilSubsets(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -315,7 +315,7 @@ func TestSyncEndpointsExistingEmptySubsets(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -331,7 +331,7 @@ func TestSyncEndpointsNewNoSubsets(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -385,7 +385,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -428,7 +428,7 @@ func TestSyncEndpointsHeadlessServiceLabel(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -456,7 +456,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -500,7 +500,7 @@ func TestSyncEndpointsProtocolSCTP(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "SCTP"}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -541,7 +541,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -581,7 +581,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -621,7 +621,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -665,7 +665,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -708,7 +708,7 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -730,7 +730,7 @@ func TestSyncEndpointsItems(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService("other/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), "other/foo")
 | 
			
		||||
 | 
			
		||||
	expectedSubsets := []v1.EndpointSubset{{
 | 
			
		||||
		Addresses: []v1.EndpointAddress{
 | 
			
		||||
@@ -778,7 +778,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	expectedSubsets := []v1.EndpointSubset{{
 | 
			
		||||
		Addresses: []v1.EndpointAddress{
 | 
			
		||||
@@ -837,7 +837,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	serviceLabels[v1.IsHeadlessService] = ""
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -891,7 +891,7 @@ func TestWaitsForAllInformersToBeSynced2(t *testing.T) {
 | 
			
		||||
			endpoints.workerLoopPeriod = 10 * time.Millisecond
 | 
			
		||||
			stopCh := make(chan struct{})
 | 
			
		||||
			defer close(stopCh)
 | 
			
		||||
			go endpoints.Run(1, stopCh)
 | 
			
		||||
			go endpoints.Run(context.TODO(), 1)
 | 
			
		||||
 | 
			
		||||
			// cache.WaitForNamedCacheSync has a 100ms poll period, and the endpoints worker has a 10ms period.
 | 
			
		||||
			// To ensure we get all updates, including unexpected ones, we need to wait at least as long as
 | 
			
		||||
@@ -937,7 +937,7 @@ func TestSyncEndpointsHeadlessService(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	originalService := service.DeepCopy()
 | 
			
		||||
	endpoints.serviceStore.Add(service)
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:            "foo",
 | 
			
		||||
@@ -984,7 +984,7 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseFail
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:            "foo",
 | 
			
		||||
@@ -1023,7 +1023,7 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyNeverAndPhaseSucc
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:            "foo",
 | 
			
		||||
@@ -1062,7 +1062,7 @@ func TestSyncEndpointsItemsExcludeNotReadyPodsWithRestartPolicyOnFailureAndPhase
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:            "foo",
 | 
			
		||||
@@ -1091,7 +1091,7 @@ func TestSyncEndpointsHeadlessWithoutPort(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	addPods(endpoints.podStore, ns, 1, 1, 0, ipv4only)
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -1424,7 +1424,7 @@ func TestLastTriggerChangeTimeAnnotation(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -1474,7 +1474,7 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationOverridden(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -1525,7 +1525,7 @@ func TestLastTriggerChangeTimeAnnotation_AnnotationCleared(t *testing.T) {
 | 
			
		||||
			Ports:    []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	data := runtime.EncodeOrDie(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &v1.Endpoints{
 | 
			
		||||
@@ -1654,7 +1654,7 @@ func TestPodUpdatesBatching(t *testing.T) {
 | 
			
		||||
			endpoints.endpointsSynced = alwaysReady
 | 
			
		||||
			endpoints.workerLoopPeriod = 10 * time.Millisecond
 | 
			
		||||
 | 
			
		||||
			go endpoints.Run(1, stopCh)
 | 
			
		||||
			go endpoints.Run(context.TODO(), 1)
 | 
			
		||||
 | 
			
		||||
			addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only)
 | 
			
		||||
 | 
			
		||||
@@ -1777,7 +1777,7 @@ func TestPodAddsBatching(t *testing.T) {
 | 
			
		||||
			endpoints.endpointsSynced = alwaysReady
 | 
			
		||||
			endpoints.workerLoopPeriod = 10 * time.Millisecond
 | 
			
		||||
 | 
			
		||||
			go endpoints.Run(1, stopCh)
 | 
			
		||||
			go endpoints.Run(context.TODO(), 1)
 | 
			
		||||
 | 
			
		||||
			endpoints.serviceStore.Add(&v1.Service{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
 | 
			
		||||
@@ -1899,7 +1899,7 @@ func TestPodDeleteBatching(t *testing.T) {
 | 
			
		||||
			endpoints.endpointsSynced = alwaysReady
 | 
			
		||||
			endpoints.workerLoopPeriod = 10 * time.Millisecond
 | 
			
		||||
 | 
			
		||||
			go endpoints.Run(1, stopCh)
 | 
			
		||||
			go endpoints.Run(context.TODO(), 1)
 | 
			
		||||
 | 
			
		||||
			addPods(endpoints.podStore, ns, tc.podsCount, 1, 0, ipv4only)
 | 
			
		||||
 | 
			
		||||
@@ -1943,7 +1943,7 @@ func TestSyncEndpointsServiceNotFound(t *testing.T) {
 | 
			
		||||
			ResourceVersion: "1",
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	endpoints.syncService(ns + "/foo")
 | 
			
		||||
	endpoints.syncService(context.TODO(), ns+"/foo")
 | 
			
		||||
	endpointsHandler.ValidateRequestCount(t, 1)
 | 
			
		||||
	endpointsHandler.ValidateRequest(t, "/api/v1/namespaces/"+ns+"/endpoints/foo", "DELETE", nil)
 | 
			
		||||
}
 | 
			
		||||
@@ -2069,7 +2069,7 @@ func TestSyncServiceOverCapacity(t *testing.T) {
 | 
			
		||||
			c.endpointsStore.Add(endpoints)
 | 
			
		||||
			client.CoreV1().Endpoints(ns).Create(context.TODO(), endpoints, metav1.CreateOptions{})
 | 
			
		||||
 | 
			
		||||
			c.syncService(fmt.Sprintf("%s/%s", ns, svc.Name))
 | 
			
		||||
			c.syncService(context.TODO(), fmt.Sprintf("%s/%s", ns, svc.Name))
 | 
			
		||||
 | 
			
		||||
			actualEndpoints, err := client.CoreV1().Endpoints(ns).Get(context.TODO(), endpoints.Name, metav1.GetOptions{})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
@@ -2228,7 +2228,7 @@ func TestMultipleServiceChanges(t *testing.T) {
 | 
			
		||||
	*controller = *newController(testServer.URL, 0*time.Second)
 | 
			
		||||
	addPods(controller.podStore, ns, 1, 1, 0, ipv4only)
 | 
			
		||||
 | 
			
		||||
	go func() { controller.Run(1, stopChan) }()
 | 
			
		||||
	go func() { controller.Run(context.TODO(), 1) }()
 | 
			
		||||
 | 
			
		||||
	svc := &v1.Service{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
 | 
			
		||||
 
 | 
			
		||||
@@ -137,7 +137,7 @@ func (gc *GarbageCollector) resyncMonitors(deletableResources map[schema.GroupVe
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run starts garbage collector workers.
 | 
			
		||||
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (gc *GarbageCollector) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer gc.attemptToDelete.ShutDown()
 | 
			
		||||
	defer gc.attemptToOrphan.ShutDown()
 | 
			
		||||
@@ -146,9 +146,9 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
	klog.Infof("Starting garbage collector controller")
 | 
			
		||||
	defer klog.Infof("Shutting down garbage collector controller")
 | 
			
		||||
 | 
			
		||||
	go gc.dependencyGraphBuilder.Run(stopCh)
 | 
			
		||||
	go gc.dependencyGraphBuilder.Run(ctx.Done())
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("garbage collector", ctx.Done(), gc.dependencyGraphBuilder.IsSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -156,11 +156,11 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
 | 
			
		||||
	// gc workers
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(gc.runAttemptToDeleteWorker, 1*time.Second, stopCh)
 | 
			
		||||
		go wait.Until(gc.runAttemptToOrphanWorker, 1*time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, gc.runAttemptToDeleteWorker, 1*time.Second)
 | 
			
		||||
		go wait.Until(gc.runAttemptToOrphanWorker, 1*time.Second, ctx.Done())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// resettableRESTMapper is a RESTMapper which is capable of resetting itself
 | 
			
		||||
@@ -294,8 +294,8 @@ func (gc *GarbageCollector) IsSynced() bool {
 | 
			
		||||
	return gc.dependencyGraphBuilder.IsSynced()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (gc *GarbageCollector) runAttemptToDeleteWorker() {
 | 
			
		||||
	for gc.attemptToDeleteWorker() {
 | 
			
		||||
func (gc *GarbageCollector) runAttemptToDeleteWorker(ctx context.Context) {
 | 
			
		||||
	for gc.attemptToDeleteWorker(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -303,7 +303,7 @@ var enqueuedVirtualDeleteEventErr = goerrors.New("enqueued virtual delete event"
 | 
			
		||||
 | 
			
		||||
var namespacedOwnerOfClusterScopedObjectErr = goerrors.New("cluster-scoped objects cannot refer to namespaced owners")
 | 
			
		||||
 | 
			
		||||
func (gc *GarbageCollector) attemptToDeleteWorker() bool {
 | 
			
		||||
func (gc *GarbageCollector) attemptToDeleteWorker(ctx context.Context) bool {
 | 
			
		||||
	item, quit := gc.attemptToDelete.Get()
 | 
			
		||||
	gc.workerLock.RLock()
 | 
			
		||||
	defer gc.workerLock.RUnlock()
 | 
			
		||||
@@ -333,7 +333,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := gc.attemptToDeleteItem(n)
 | 
			
		||||
	err := gc.attemptToDeleteItem(ctx, n)
 | 
			
		||||
	if err == enqueuedVirtualDeleteEventErr {
 | 
			
		||||
		// a virtual event was produced and will be handled by processGraphChanges, no need to requeue this node
 | 
			
		||||
		return true
 | 
			
		||||
@@ -368,7 +368,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
 | 
			
		||||
// isDangling check if a reference is pointing to an object that doesn't exist.
 | 
			
		||||
// If isDangling looks up the referenced object at the API server, it also
 | 
			
		||||
// returns its latest state.
 | 
			
		||||
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
 | 
			
		||||
func (gc *GarbageCollector) isDangling(ctx context.Context, reference metav1.OwnerReference, item *node) (
 | 
			
		||||
	dangling bool, owner *metav1.PartialObjectMetadata, err error) {
 | 
			
		||||
 | 
			
		||||
	// check for recorded absent cluster-scoped parent
 | 
			
		||||
@@ -408,7 +408,7 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
 | 
			
		||||
	// TODO: It's only necessary to talk to the API server if the owner node
 | 
			
		||||
	// is a "virtual" node. The local graph could lag behind the real
 | 
			
		||||
	// status, but in practice, the difference is small.
 | 
			
		||||
	owner, err = gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(context.TODO(), reference.Name, metav1.GetOptions{})
 | 
			
		||||
	owner, err = gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(ctx, reference.Name, metav1.GetOptions{})
 | 
			
		||||
	switch {
 | 
			
		||||
	case errors.IsNotFound(err):
 | 
			
		||||
		gc.absentOwnerCache.Add(absentOwnerCacheKey)
 | 
			
		||||
@@ -432,10 +432,10 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
 | 
			
		||||
// waitingForDependentsDeletion: the owner exists, its deletionTimestamp is non-nil, and it has
 | 
			
		||||
// FinalizerDeletingDependents
 | 
			
		||||
// This function communicates with the server.
 | 
			
		||||
func (gc *GarbageCollector) classifyReferences(item *node, latestReferences []metav1.OwnerReference) (
 | 
			
		||||
func (gc *GarbageCollector) classifyReferences(ctx context.Context, item *node, latestReferences []metav1.OwnerReference) (
 | 
			
		||||
	solid, dangling, waitingForDependentsDeletion []metav1.OwnerReference, err error) {
 | 
			
		||||
	for _, reference := range latestReferences {
 | 
			
		||||
		isDangling, owner, err := gc.isDangling(reference, item)
 | 
			
		||||
		isDangling, owner, err := gc.isDangling(ctx, reference, item)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, nil, nil, err
 | 
			
		||||
		}
 | 
			
		||||
@@ -471,7 +471,7 @@ func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
 | 
			
		||||
//
 | 
			
		||||
// if the API get request returns a NotFound error, or the retrieved item's uid does not match,
 | 
			
		||||
// a virtual delete event for the node is enqueued and enqueuedVirtualDeleteEventErr is returned.
 | 
			
		||||
func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
 | 
			
		||||
func (gc *GarbageCollector) attemptToDeleteItem(ctx context.Context, item *node) error {
 | 
			
		||||
	klog.V(2).InfoS("Processing object", "object", klog.KRef(item.identity.Namespace, item.identity.Name),
 | 
			
		||||
		"objectUID", item.identity.UID, "kind", item.identity.Kind, "virtual", !item.isObserved())
 | 
			
		||||
 | 
			
		||||
@@ -515,7 +515,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	solid, dangling, waitingForDependentsDeletion, err := gc.classifyReferences(item, ownerReferences)
 | 
			
		||||
	solid, dangling, waitingForDependentsDeletion, err := gc.classifyReferences(ctx, item, ownerReferences)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -114,9 +114,9 @@ func TestGarbageCollectorConstruction(t *testing.T) {
 | 
			
		||||
	assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
 | 
			
		||||
 | 
			
		||||
	// Make sure the syncing mechanism also works after Run() has been called
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	go gc.Run(1, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	go gc.Run(ctx, 1)
 | 
			
		||||
 | 
			
		||||
	err = gc.resyncMonitors(twoResources)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -287,7 +287,7 @@ func TestAttemptToDeleteItem(t *testing.T) {
 | 
			
		||||
		owners:  nil,
 | 
			
		||||
		virtual: true,
 | 
			
		||||
	}
 | 
			
		||||
	err := gc.attemptToDeleteItem(item)
 | 
			
		||||
	err := gc.attemptToDeleteItem(context.TODO(), item)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("Unexpected Error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -546,12 +546,12 @@ func TestAbsentOwnerCache(t *testing.T) {
 | 
			
		||||
	gc := setupGC(t, clientConfig)
 | 
			
		||||
	defer close(gc.stop)
 | 
			
		||||
	gc.absentOwnerCache = NewReferenceCache(2)
 | 
			
		||||
	gc.attemptToDeleteItem(podToGCNode(rc1Pod1))
 | 
			
		||||
	gc.attemptToDeleteItem(podToGCNode(rc2Pod1))
 | 
			
		||||
	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod1))
 | 
			
		||||
	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc2Pod1))
 | 
			
		||||
	// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
 | 
			
		||||
	gc.attemptToDeleteItem(podToGCNode(rc1Pod2))
 | 
			
		||||
	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc1Pod2))
 | 
			
		||||
	// after this call, rc2 should be evicted from the UIDCache
 | 
			
		||||
	gc.attemptToDeleteItem(podToGCNode(rc3Pod1))
 | 
			
		||||
	gc.attemptToDeleteItem(context.TODO(), podToGCNode(rc3Pod1))
 | 
			
		||||
	// check cache
 | 
			
		||||
	if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc1", UID: "1", APIVersion: "v1"}}) {
 | 
			
		||||
		t.Errorf("expected rc1 to be in the cache")
 | 
			
		||||
@@ -851,9 +851,9 @@ func TestGarbageCollectorSync(t *testing.T) {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	go gc.Run(1, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	go gc.Run(ctx, 1)
 | 
			
		||||
	// The pseudo-code of GarbageCollector.Sync():
 | 
			
		||||
	// GarbageCollector.Sync(client, period, stopCh):
 | 
			
		||||
	//    wait.Until() loops with `period` until the `stopCh` is closed :
 | 
			
		||||
@@ -868,7 +868,7 @@ func TestGarbageCollectorSync(t *testing.T) {
 | 
			
		||||
	// The 1s sleep in the test allows GetDeletableResources and
 | 
			
		||||
	// gc.resyncMonitors to run ~5 times to ensure the changes to the
 | 
			
		||||
	// fakeDiscoveryClient are picked up.
 | 
			
		||||
	go gc.Sync(fakeDiscoveryClient, 200*time.Millisecond, stopCh)
 | 
			
		||||
	go gc.Sync(fakeDiscoveryClient, 200*time.Millisecond, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	// Wait until the sync discovers the initial resources
 | 
			
		||||
	time.Sleep(1 * time.Second)
 | 
			
		||||
@@ -2434,7 +2434,7 @@ func processAttemptToDelete(count int) step {
 | 
			
		||||
			if count <= 0 {
 | 
			
		||||
				// process all
 | 
			
		||||
				for ctx.gc.dependencyGraphBuilder.attemptToDelete.Len() != 0 {
 | 
			
		||||
					ctx.gc.attemptToDeleteWorker()
 | 
			
		||||
					ctx.gc.attemptToDeleteWorker(context.TODO())
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				for i := 0; i < count; i++ {
 | 
			
		||||
@@ -2442,7 +2442,7 @@ func processAttemptToDelete(count int) step {
 | 
			
		||||
						ctx.t.Errorf("expected at least %d pending changes, got %d", count, i+1)
 | 
			
		||||
						return
 | 
			
		||||
					}
 | 
			
		||||
					ctx.gc.attemptToDeleteWorker()
 | 
			
		||||
					ctx.gc.attemptToDeleteWorker(context.TODO())
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		},
 | 
			
		||||
 
 | 
			
		||||
@@ -349,6 +349,7 @@ type Controller struct {
 | 
			
		||||
 | 
			
		||||
// NewNodeLifecycleController returns a new taint controller.
 | 
			
		||||
func NewNodeLifecycleController(
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
	leaseInformer coordinformers.LeaseInformer,
 | 
			
		||||
	podInformer coreinformers.PodInformer,
 | 
			
		||||
	nodeInformer coreinformers.NodeInformer,
 | 
			
		||||
@@ -484,7 +485,7 @@ func NewNodeLifecycleController(
 | 
			
		||||
		podGetter := func(name, namespace string) (*v1.Pod, error) { return nc.podLister.Pods(namespace).Get(name) }
 | 
			
		||||
		nodeLister := nodeInformer.Lister()
 | 
			
		||||
		nodeGetter := func(name string) (*v1.Node, error) { return nodeLister.Get(name) }
 | 
			
		||||
		nc.taintManager = scheduler.NewNoExecuteTaintManager(kubeClient, podGetter, nodeGetter, nc.getPodsAssignedToNode)
 | 
			
		||||
		nc.taintManager = scheduler.NewNoExecuteTaintManager(ctx, kubeClient, podGetter, nodeGetter, nc.getPodsAssignedToNode)
 | 
			
		||||
		nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
 | 
			
		||||
			AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error {
 | 
			
		||||
				nc.taintManager.NodeUpdated(nil, node)
 | 
			
		||||
@@ -532,18 +533,18 @@ func NewNodeLifecycleController(
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
 | 
			
		||||
func (nc *Controller) Run(stopCh <-chan struct{}) {
 | 
			
		||||
func (nc *Controller) Run(ctx context.Context) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting node controller")
 | 
			
		||||
	defer klog.Infof("Shutting down node controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("taint", stopCh, nc.leaseInformerSynced, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("taint", ctx.Done(), nc.leaseInformerSynced, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if nc.runTaintManager {
 | 
			
		||||
		go nc.taintManager.Run(stopCh)
 | 
			
		||||
		go nc.taintManager.Run(ctx)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Close node update queue to cleanup go routine.
 | 
			
		||||
@@ -556,35 +557,35 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
 | 
			
		||||
		// the item is flagged when got from queue: if new event come, the new item will
 | 
			
		||||
		// be re-queued until "Done", so no more than one worker handle the same item and
 | 
			
		||||
		// no event missed.
 | 
			
		||||
		go wait.Until(nc.doNodeProcessingPassWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, nc.doNodeProcessingPassWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < podUpdateWorkerSize; i++ {
 | 
			
		||||
		go wait.Until(nc.doPodProcessingWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, nc.doPodProcessingWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if nc.runTaintManager {
 | 
			
		||||
		// Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated
 | 
			
		||||
		// taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints.
 | 
			
		||||
		go wait.Until(nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod)
 | 
			
		||||
	} else {
 | 
			
		||||
		// Managing eviction of nodes:
 | 
			
		||||
		// When we delete pods off a node, if the node was not empty at the time we then
 | 
			
		||||
		// queue an eviction watcher. If we hit an error, retry deletion.
 | 
			
		||||
		go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, nc.doEvictionPass, scheduler.NodeEvictionPeriod)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Incorporate the results of node health signal pushed from kubelet to master.
 | 
			
		||||
	go wait.Until(func() {
 | 
			
		||||
		if err := nc.monitorNodeHealth(); err != nil {
 | 
			
		||||
	go wait.UntilWithContext(ctx, func(ctx context.Context) {
 | 
			
		||||
		if err := nc.monitorNodeHealth(ctx); err != nil {
 | 
			
		||||
			klog.Errorf("Error monitoring node health: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}, nc.nodeMonitorPeriod, stopCh)
 | 
			
		||||
	}, nc.nodeMonitorPeriod)
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) doNodeProcessingPassWorker() {
 | 
			
		||||
func (nc *Controller) doNodeProcessingPassWorker(ctx context.Context) {
 | 
			
		||||
	for {
 | 
			
		||||
		obj, shutdown := nc.nodeUpdateQueue.Get()
 | 
			
		||||
		// "nodeUpdateQueue" will be shutdown when "stopCh" closed;
 | 
			
		||||
@@ -593,7 +594,7 @@ func (nc *Controller) doNodeProcessingPassWorker() {
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
		nodeName := obj.(string)
 | 
			
		||||
		if err := nc.doNoScheduleTaintingPass(nodeName); err != nil {
 | 
			
		||||
		if err := nc.doNoScheduleTaintingPass(ctx, nodeName); err != nil {
 | 
			
		||||
			klog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err)
 | 
			
		||||
			// TODO(k82cn): Add nodeName back to the queue
 | 
			
		||||
		}
 | 
			
		||||
@@ -607,7 +608,7 @@ func (nc *Controller) doNodeProcessingPassWorker() {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
 | 
			
		||||
func (nc *Controller) doNoScheduleTaintingPass(ctx context.Context, nodeName string) error {
 | 
			
		||||
	node, err := nc.nodeLister.Get(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// If node not found, just ignore it.
 | 
			
		||||
@@ -656,13 +657,13 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
 | 
			
		||||
	if len(taintsToAdd) == 0 && len(taintsToDel) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) {
 | 
			
		||||
	if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, taintsToAdd, taintsToDel, node) {
 | 
			
		||||
		return fmt.Errorf("failed to swap taints of node %+v", node)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) doNoExecuteTaintingPass() {
 | 
			
		||||
func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) {
 | 
			
		||||
	nc.evictorLock.Lock()
 | 
			
		||||
	defer nc.evictorLock.Unlock()
 | 
			
		||||
	for k := range nc.zoneNoExecuteTainter {
 | 
			
		||||
@@ -694,7 +695,7 @@ func (nc *Controller) doNoExecuteTaintingPass() {
 | 
			
		||||
				return true, 0
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			result := nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node)
 | 
			
		||||
			result := nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{&oppositeTaint}, node)
 | 
			
		||||
			if result {
 | 
			
		||||
				//count the evictionsNumber
 | 
			
		||||
				zone := utilnode.GetZoneKey(node)
 | 
			
		||||
@@ -706,7 +707,7 @@ func (nc *Controller) doNoExecuteTaintingPass() {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) doEvictionPass() {
 | 
			
		||||
func (nc *Controller) doEvictionPass(ctx context.Context) {
 | 
			
		||||
	nc.evictorLock.Lock()
 | 
			
		||||
	defer nc.evictorLock.Unlock()
 | 
			
		||||
	for k := range nc.zonePodEvictor {
 | 
			
		||||
@@ -724,7 +725,7 @@ func (nc *Controller) doEvictionPass() {
 | 
			
		||||
				utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err))
 | 
			
		||||
				return false, 0
 | 
			
		||||
			}
 | 
			
		||||
			remaining, err := nodeutil.DeletePods(nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore)
 | 
			
		||||
			remaining, err := nodeutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				// We are not setting eviction status here.
 | 
			
		||||
				// New pods will be handled by zonePodEvictor retry
 | 
			
		||||
@@ -752,7 +753,7 @@ func (nc *Controller) doEvictionPass() {
 | 
			
		||||
// monitorNodeHealth verifies node health are constantly updated by kubelet, and
 | 
			
		||||
// if not, post "NodeReady==ConditionUnknown".
 | 
			
		||||
// This function will taint nodes who are not ready or not reachable for a long period of time.
 | 
			
		||||
func (nc *Controller) monitorNodeHealth() error {
 | 
			
		||||
func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
 | 
			
		||||
	// We are listing nodes from local cache as we can tolerate some small delays
 | 
			
		||||
	// comparing to state from etcd and there is eventual consistency anyway.
 | 
			
		||||
	nodes, err := nc.nodeLister.List(labels.Everything())
 | 
			
		||||
@@ -771,7 +772,7 @@ func (nc *Controller) monitorNodeHealth() error {
 | 
			
		||||
		nc.knownNodeSet[added[i].Name] = added[i]
 | 
			
		||||
		nc.addPodEvictorForNewZone(added[i])
 | 
			
		||||
		if nc.runTaintManager {
 | 
			
		||||
			nc.markNodeAsReachable(added[i])
 | 
			
		||||
			nc.markNodeAsReachable(ctx, added[i])
 | 
			
		||||
		} else {
 | 
			
		||||
			nc.cancelPodEviction(added[i])
 | 
			
		||||
		}
 | 
			
		||||
@@ -790,12 +791,12 @@ func (nc *Controller) monitorNodeHealth() error {
 | 
			
		||||
		var currentReadyCondition *v1.NodeCondition
 | 
			
		||||
		node := nodes[i].DeepCopy()
 | 
			
		||||
		if err := wait.PollImmediate(retrySleepTime, retrySleepTime*scheduler.NodeHealthUpdateRetry, func() (bool, error) {
 | 
			
		||||
			gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeHealth(node)
 | 
			
		||||
			gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeHealth(ctx, node)
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				return true, nil
 | 
			
		||||
			}
 | 
			
		||||
			name := node.Name
 | 
			
		||||
			node, err = nc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
 | 
			
		||||
			node, err = nc.kubeClient.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name)
 | 
			
		||||
				return false, err
 | 
			
		||||
@@ -825,9 +826,9 @@ func (nc *Controller) monitorNodeHealth() error {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			if nc.runTaintManager {
 | 
			
		||||
				nc.processTaintBaseEviction(node, &observedReadyCondition)
 | 
			
		||||
				nc.processTaintBaseEviction(ctx, node, &observedReadyCondition)
 | 
			
		||||
			} else {
 | 
			
		||||
				if err := nc.processNoTaintBaseEviction(node, &observedReadyCondition, gracePeriod, pods); err != nil {
 | 
			
		||||
				if err := nc.processNoTaintBaseEviction(ctx, node, &observedReadyCondition, gracePeriod, pods); err != nil {
 | 
			
		||||
					utilruntime.HandleError(fmt.Errorf("unable to evict all pods from node %v: %v; queuing for retry", node.Name, err))
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
@@ -839,7 +840,7 @@ func (nc *Controller) monitorNodeHealth() error {
 | 
			
		||||
				nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
 | 
			
		||||
				fallthrough
 | 
			
		||||
			case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
 | 
			
		||||
				if err = nodeutil.MarkPodsNotReady(nc.kubeClient, nc.recorder, pods, node.Name); err != nil {
 | 
			
		||||
				if err = nodeutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, node.Name); err != nil {
 | 
			
		||||
					utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
 | 
			
		||||
					nc.nodesToRetry.Store(node.Name, struct{}{})
 | 
			
		||||
					continue
 | 
			
		||||
@@ -848,12 +849,12 @@ func (nc *Controller) monitorNodeHealth() error {
 | 
			
		||||
		}
 | 
			
		||||
		nc.nodesToRetry.Delete(node.Name)
 | 
			
		||||
	}
 | 
			
		||||
	nc.handleDisruption(zoneToNodeConditions, nodes)
 | 
			
		||||
	nc.handleDisruption(ctx, zoneToNodeConditions, nodes)
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondition *v1.NodeCondition) {
 | 
			
		||||
func (nc *Controller) processTaintBaseEviction(ctx context.Context, node *v1.Node, observedReadyCondition *v1.NodeCondition) {
 | 
			
		||||
	decisionTimestamp := nc.now()
 | 
			
		||||
	// Check eviction timeout against decisionTimestamp
 | 
			
		||||
	switch observedReadyCondition.Status {
 | 
			
		||||
@@ -861,7 +862,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi
 | 
			
		||||
		// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
 | 
			
		||||
		if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) {
 | 
			
		||||
			taintToAdd := *NotReadyTaintTemplate
 | 
			
		||||
			if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) {
 | 
			
		||||
			if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) {
 | 
			
		||||
				klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.")
 | 
			
		||||
			}
 | 
			
		||||
		} else if nc.markNodeForTainting(node, v1.ConditionFalse) {
 | 
			
		||||
@@ -874,7 +875,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi
 | 
			
		||||
		// We want to update the taint straight away if Node is already tainted with the UnreachableTaint
 | 
			
		||||
		if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) {
 | 
			
		||||
			taintToAdd := *UnreachableTaintTemplate
 | 
			
		||||
			if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) {
 | 
			
		||||
			if !nodeutil.SwapNodeControllerTaint(ctx, nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) {
 | 
			
		||||
				klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.")
 | 
			
		||||
			}
 | 
			
		||||
		} else if nc.markNodeForTainting(node, v1.ConditionUnknown) {
 | 
			
		||||
@@ -884,7 +885,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi
 | 
			
		||||
			)
 | 
			
		||||
		}
 | 
			
		||||
	case v1.ConditionTrue:
 | 
			
		||||
		removed, err := nc.markNodeAsReachable(node)
 | 
			
		||||
		removed, err := nc.markNodeAsReachable(ctx, node)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
 | 
			
		||||
		}
 | 
			
		||||
@@ -894,7 +895,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) processNoTaintBaseEviction(node *v1.Node, observedReadyCondition *v1.NodeCondition, gracePeriod time.Duration, pods []*v1.Pod) error {
 | 
			
		||||
func (nc *Controller) processNoTaintBaseEviction(ctx context.Context, node *v1.Node, observedReadyCondition *v1.NodeCondition, gracePeriod time.Duration, pods []*v1.Pod) error {
 | 
			
		||||
	decisionTimestamp := nc.now()
 | 
			
		||||
	nodeHealthData := nc.nodeHealthMap.getDeepCopy(node.Name)
 | 
			
		||||
	if nodeHealthData == nil {
 | 
			
		||||
@@ -904,7 +905,7 @@ func (nc *Controller) processNoTaintBaseEviction(node *v1.Node, observedReadyCon
 | 
			
		||||
	switch observedReadyCondition.Status {
 | 
			
		||||
	case v1.ConditionFalse:
 | 
			
		||||
		if decisionTimestamp.After(nodeHealthData.readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
 | 
			
		||||
			enqueued, err := nc.evictPods(node, pods)
 | 
			
		||||
			enqueued, err := nc.evictPods(ctx, node, pods)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
@@ -919,7 +920,7 @@ func (nc *Controller) processNoTaintBaseEviction(node *v1.Node, observedReadyCon
 | 
			
		||||
		}
 | 
			
		||||
	case v1.ConditionUnknown:
 | 
			
		||||
		if decisionTimestamp.After(nodeHealthData.probeTimestamp.Add(nc.podEvictionTimeout)) {
 | 
			
		||||
			enqueued, err := nc.evictPods(node, pods)
 | 
			
		||||
			enqueued, err := nc.evictPods(ctx, node, pods)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
@@ -953,7 +954,7 @@ func isNodeExcludedFromDisruptionChecks(node *v1.Node) bool {
 | 
			
		||||
 | 
			
		||||
// tryUpdateNodeHealth checks a given node's conditions and tries to update it. Returns grace period to
 | 
			
		||||
// which given node is entitled, state of current and last observed Ready Condition, and an error if it occurred.
 | 
			
		||||
func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {
 | 
			
		||||
func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {
 | 
			
		||||
	nodeHealth := nc.nodeHealthMap.getDeepCopy(node.Name)
 | 
			
		||||
	defer func() {
 | 
			
		||||
		nc.nodeHealthMap.set(node.Name, nodeHealth)
 | 
			
		||||
@@ -1102,7 +1103,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
 | 
			
		||||
		_, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
 | 
			
		||||
 | 
			
		||||
		if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) {
 | 
			
		||||
			if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
			if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
				klog.Errorf("Error updating node %s: %v", node.Name, err)
 | 
			
		||||
				return gracePeriod, observedReadyCondition, currentReadyCondition, err
 | 
			
		||||
			}
 | 
			
		||||
@@ -1119,7 +1120,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
 | 
			
		||||
	return gracePeriod, observedReadyCondition, currentReadyCondition, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) {
 | 
			
		||||
func (nc *Controller) handleDisruption(ctx context.Context, zoneToNodeConditions map[string][]*v1.NodeCondition, nodes []*v1.Node) {
 | 
			
		||||
	newZoneStates := map[string]ZoneState{}
 | 
			
		||||
	allAreFullyDisrupted := true
 | 
			
		||||
	for k, v := range zoneToNodeConditions {
 | 
			
		||||
@@ -1163,7 +1164,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod
 | 
			
		||||
			klog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.")
 | 
			
		||||
			for i := range nodes {
 | 
			
		||||
				if nc.runTaintManager {
 | 
			
		||||
					_, err := nc.markNodeAsReachable(nodes[i])
 | 
			
		||||
					_, err := nc.markNodeAsReachable(ctx, nodes[i])
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						klog.Errorf("Failed to remove taints from Node %v", nodes[i].Name)
 | 
			
		||||
					}
 | 
			
		||||
@@ -1227,7 +1228,7 @@ func (nc *Controller) podUpdated(oldPod, newPod *v1.Pod) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) doPodProcessingWorker() {
 | 
			
		||||
func (nc *Controller) doPodProcessingWorker(ctx context.Context) {
 | 
			
		||||
	for {
 | 
			
		||||
		obj, shutdown := nc.podUpdateQueue.Get()
 | 
			
		||||
		// "podUpdateQueue" will be shutdown when "stopCh" closed;
 | 
			
		||||
@@ -1237,7 +1238,7 @@ func (nc *Controller) doPodProcessingWorker() {
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		podItem := obj.(podUpdateItem)
 | 
			
		||||
		nc.processPod(podItem)
 | 
			
		||||
		nc.processPod(ctx, podItem)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1245,7 +1246,7 @@ func (nc *Controller) doPodProcessingWorker() {
 | 
			
		||||
// 1. for NodeReady=true node, taint eviction for this pod will be cancelled
 | 
			
		||||
// 2. for NodeReady=false or unknown node, taint eviction of pod will happen and pod will be marked as not ready
 | 
			
		||||
// 3. if node doesn't exist in cache, it will be skipped and handled later by doEvictionPass
 | 
			
		||||
func (nc *Controller) processPod(podItem podUpdateItem) {
 | 
			
		||||
func (nc *Controller) processPod(ctx context.Context, podItem podUpdateItem) {
 | 
			
		||||
	defer nc.podUpdateQueue.Done(podItem)
 | 
			
		||||
	pod, err := nc.podLister.Pods(podItem.namespace).Get(podItem.name)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -1286,7 +1287,7 @@ func (nc *Controller) processPod(podItem podUpdateItem) {
 | 
			
		||||
	// In taint-based eviction mode, only node updates are processed by NodeLifecycleController.
 | 
			
		||||
	// Pods are processed by TaintManager.
 | 
			
		||||
	if !nc.runTaintManager {
 | 
			
		||||
		if err := nc.processNoTaintBaseEviction(node, currentReadyCondition, nc.nodeMonitorGracePeriod, pods); err != nil {
 | 
			
		||||
		if err := nc.processNoTaintBaseEviction(ctx, node, currentReadyCondition, nc.nodeMonitorGracePeriod, pods); err != nil {
 | 
			
		||||
			klog.Warningf("Unable to process pod %+v eviction from node %v: %v.", podItem, nodeName, err)
 | 
			
		||||
			nc.podUpdateQueue.AddRateLimited(podItem)
 | 
			
		||||
			return
 | 
			
		||||
@@ -1294,7 +1295,7 @@ func (nc *Controller) processPod(podItem podUpdateItem) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if currentReadyCondition.Status != v1.ConditionTrue {
 | 
			
		||||
		if err := nodeutil.MarkPodsNotReady(nc.kubeClient, nc.recorder, pods, nodeName); err != nil {
 | 
			
		||||
		if err := nodeutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, nodeName); err != nil {
 | 
			
		||||
			klog.Warningf("Unable to mark pod %+v NotReady on node %v: %v.", podItem, nodeName, err)
 | 
			
		||||
			nc.podUpdateQueue.AddRateLimited(podItem)
 | 
			
		||||
		}
 | 
			
		||||
@@ -1421,14 +1422,14 @@ func (nc *Controller) cancelPodEviction(node *v1.Node) bool {
 | 
			
		||||
//   Returns false if the node name was already enqueued.
 | 
			
		||||
// - deletes pods immediately if node is already marked as evicted.
 | 
			
		||||
//   Returns false, because the node wasn't added to the queue.
 | 
			
		||||
func (nc *Controller) evictPods(node *v1.Node, pods []*v1.Pod) (bool, error) {
 | 
			
		||||
func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.Pod) (bool, error) {
 | 
			
		||||
	nc.evictorLock.Lock()
 | 
			
		||||
	defer nc.evictorLock.Unlock()
 | 
			
		||||
	status, ok := nc.nodeEvictionMap.getStatus(node.Name)
 | 
			
		||||
	if ok && status == evicted {
 | 
			
		||||
		// Node eviction already happened for this node.
 | 
			
		||||
		// Handling immediate pod deletion.
 | 
			
		||||
		_, err := nodeutil.DeletePods(nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore)
 | 
			
		||||
		_, err := nodeutil.DeletePods(ctx, nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -1458,15 +1459,15 @@ func (nc *Controller) markNodeForTainting(node *v1.Node, status v1.ConditionStat
 | 
			
		||||
	return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) {
 | 
			
		||||
func (nc *Controller) markNodeAsReachable(ctx context.Context, node *v1.Node) (bool, error) {
 | 
			
		||||
	nc.evictorLock.Lock()
 | 
			
		||||
	defer nc.evictorLock.Unlock()
 | 
			
		||||
	err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, UnreachableTaintTemplate)
 | 
			
		||||
	err := controller.RemoveTaintOffNode(ctx, nc.kubeClient, node.Name, node, UnreachableTaintTemplate)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
	err = controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, NotReadyTaintTemplate)
 | 
			
		||||
	err = controller.RemoveTaintOffNode(ctx, nc.kubeClient, node.Name, node, NotReadyTaintTemplate)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
 | 
			
		||||
		return false, err
 | 
			
		||||
 
 | 
			
		||||
@@ -95,7 +95,7 @@ func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNode
 | 
			
		||||
		nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
 | 
			
		||||
			uid, _ := value.UID.(string)
 | 
			
		||||
			pods, _ := nc.getPodsAssignedToNode(value.Value)
 | 
			
		||||
			nodeutil.DeletePods(fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore)
 | 
			
		||||
			nodeutil.DeletePods(context.TODO(), fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore)
 | 
			
		||||
			_ = nc.nodeEvictionMap.setStatus(value.Value, evicted)
 | 
			
		||||
			return true, 0
 | 
			
		||||
		})
 | 
			
		||||
@@ -144,6 +144,7 @@ func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeN
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newNodeLifecycleControllerFromClient(
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
	kubeClient clientset.Interface,
 | 
			
		||||
	podEvictionTimeout time.Duration,
 | 
			
		||||
	evictionLimiterQPS float32,
 | 
			
		||||
@@ -163,6 +164,7 @@ func newNodeLifecycleControllerFromClient(
 | 
			
		||||
	daemonSetInformer := factory.Apps().V1().DaemonSets()
 | 
			
		||||
 | 
			
		||||
	nc, err := NewNodeLifecycleController(
 | 
			
		||||
		ctx,
 | 
			
		||||
		leaseInformer,
 | 
			
		||||
		factory.Core().V1().Pods(),
 | 
			
		||||
		nodeInformer,
 | 
			
		||||
@@ -679,6 +681,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, item := range table {
 | 
			
		||||
		nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
			context.TODO(),
 | 
			
		||||
			item.fakeNodeHandler,
 | 
			
		||||
			evictionTimeout,
 | 
			
		||||
			testRateLimiterQPS,
 | 
			
		||||
@@ -698,7 +701,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if item.timeToPass > 0 {
 | 
			
		||||
@@ -713,7 +716,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		zones := testutil.GetZones(item.fakeNodeHandler)
 | 
			
		||||
@@ -726,7 +729,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
 | 
			
		||||
						t.Errorf("unexpected error: %v", err)
 | 
			
		||||
					}
 | 
			
		||||
					t.Logf("listed pods %d for node %v", len(pods), value.Value)
 | 
			
		||||
					nodeutil.DeletePods(item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
 | 
			
		||||
					nodeutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
 | 
			
		||||
					return true, 0
 | 
			
		||||
				})
 | 
			
		||||
			} else {
 | 
			
		||||
@@ -847,6 +850,7 @@ func TestPodStatusChange(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, item := range table {
 | 
			
		||||
		nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
			context.TODO(),
 | 
			
		||||
			item.fakeNodeHandler,
 | 
			
		||||
			evictionTimeout,
 | 
			
		||||
			testRateLimiterQPS,
 | 
			
		||||
@@ -863,7 +867,7 @@ func TestPodStatusChange(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if item.timeToPass > 0 {
 | 
			
		||||
@@ -874,7 +878,7 @@ func TestPodStatusChange(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		zones := testutil.GetZones(item.fakeNodeHandler)
 | 
			
		||||
@@ -885,7 +889,7 @@ func TestPodStatusChange(t *testing.T) {
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Errorf("unexpected error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				nodeutil.DeletePods(item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
 | 
			
		||||
				nodeutil.DeletePods(context.TODO(), item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
 | 
			
		||||
				return true, 0
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
@@ -1408,6 +1412,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
 | 
			
		||||
			Clientset: fake.NewSimpleClientset(&v1.PodList{Items: item.podList}),
 | 
			
		||||
		}
 | 
			
		||||
		nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
			context.TODO(),
 | 
			
		||||
			fakeNodeHandler,
 | 
			
		||||
			evictionTimeout,
 | 
			
		||||
			testRateLimiterQPS,
 | 
			
		||||
@@ -1430,7 +1435,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("%v: unexpected error: %v", item.description, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
@@ -1448,7 +1453,7 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("%v: unexpected error: %v", item.description, err)
 | 
			
		||||
		}
 | 
			
		||||
		for zone, state := range item.expectedFollowingStates {
 | 
			
		||||
@@ -1694,6 +1699,7 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	for i, item := range table {
 | 
			
		||||
		nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
			context.TODO(),
 | 
			
		||||
			item.fakeNodeHandler,
 | 
			
		||||
			5*time.Minute,
 | 
			
		||||
			testRateLimiterQPS,
 | 
			
		||||
@@ -1710,7 +1716,7 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if item.timeToPass > 0 {
 | 
			
		||||
@@ -1719,7 +1725,7 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
 | 
			
		||||
			if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
				t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
			if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
				t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
@@ -2237,6 +2243,7 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
 | 
			
		||||
	for _, item := range testcases {
 | 
			
		||||
		t.Run(item.description, func(t *testing.T) {
 | 
			
		||||
			nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
				context.TODO(),
 | 
			
		||||
				item.fakeNodeHandler,
 | 
			
		||||
				5*time.Minute,
 | 
			
		||||
				testRateLimiterQPS,
 | 
			
		||||
@@ -2256,7 +2263,7 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
 | 
			
		||||
			if err := nodeController.syncLeaseStore(item.lease); err != nil {
 | 
			
		||||
				t.Fatalf("unexpected error: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
			if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
				t.Fatalf("unexpected error: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if item.timeToPass > 0 {
 | 
			
		||||
@@ -2268,7 +2275,7 @@ func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) {
 | 
			
		||||
				if err := nodeController.syncLeaseStore(item.newLease); err != nil {
 | 
			
		||||
					t.Fatalf("unexpected error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
				if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
					t.Fatalf("unexpected error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
@@ -2401,6 +2408,7 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for i, item := range table {
 | 
			
		||||
		nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
			context.TODO(),
 | 
			
		||||
			item.fakeNodeHandler,
 | 
			
		||||
			5*time.Minute,
 | 
			
		||||
			testRateLimiterQPS,
 | 
			
		||||
@@ -2417,7 +2425,7 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
		if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
			t.Errorf("Case[%d] unexpected error: %v", i, err)
 | 
			
		||||
		}
 | 
			
		||||
		if item.timeToPass > 0 {
 | 
			
		||||
@@ -2426,7 +2434,7 @@ func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
 | 
			
		||||
			if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
				t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
			if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
				t.Errorf("Case[%d] unexpected error: %v", i, err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
@@ -2584,6 +2592,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
 | 
			
		||||
	for _, item := range table {
 | 
			
		||||
		t.Run(item.desc, func(t *testing.T) {
 | 
			
		||||
			nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
				context.TODO(),
 | 
			
		||||
				item.fakeNodeHandler,
 | 
			
		||||
				5*time.Minute,
 | 
			
		||||
				testRateLimiterQPS,
 | 
			
		||||
@@ -2606,7 +2615,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
 | 
			
		||||
				if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
 | 
			
		||||
					t.Errorf("unexpected error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
				if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
					t.Errorf("unexpected error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
@@ -2718,6 +2727,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	originalTaint := UnreachableTaintTemplate
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		evictionTimeout,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -2734,10 +2744,10 @@ func TestApplyNoExecuteTaints(t *testing.T) {
 | 
			
		||||
	if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass()
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass(context.TODO())
 | 
			
		||||
	node0, err := fakeNodeHandler.Get(context.TODO(), "node0", metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("Can't get current node0...")
 | 
			
		||||
@@ -2765,10 +2775,10 @@ func TestApplyNoExecuteTaints(t *testing.T) {
 | 
			
		||||
	if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass()
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass(context.TODO())
 | 
			
		||||
 | 
			
		||||
	node2, err = fakeNodeHandler.Get(context.TODO(), "node2", metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -2872,6 +2882,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		evictionTimeout,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -2889,10 +2900,10 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	// 1. monitor node health twice, add untainted node once
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -2986,14 +2997,14 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	// 3. start monitor node health again, add untainted node twice, construct UniqueQueue with duplicated node cache
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 4. do NoExecute taint pass
 | 
			
		||||
	// when processing with node0, condition.Status is NodeReady, and return true with default case
 | 
			
		||||
	// then remove the set value and queue value both, the taint job never stuck
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass()
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass(context.TODO())
 | 
			
		||||
 | 
			
		||||
	// 5. get node3 and node5, see if it has ready got NoExecute taint
 | 
			
		||||
	node3, err := fakeNodeHandler.Get(context.TODO(), "node3", metav1.GetOptions{})
 | 
			
		||||
@@ -3096,6 +3107,7 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
 | 
			
		||||
	updatedTaint := NotReadyTaintTemplate
 | 
			
		||||
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		evictionTimeout,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -3112,10 +3124,10 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
 | 
			
		||||
	if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass()
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass(context.TODO())
 | 
			
		||||
 | 
			
		||||
	node0, err := fakeNodeHandler.Get(context.TODO(), "node0", metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -3150,10 +3162,10 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
 | 
			
		||||
	if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass()
 | 
			
		||||
	nodeController.doNoExecuteTaintingPass(context.TODO())
 | 
			
		||||
 | 
			
		||||
	node0, err = fakeNodeHandler.Get(context.TODO(), "node0", metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -3200,6 +3212,7 @@ func TestTaintsNodeByCondition(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		evictionTimeout,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -3355,7 +3368,7 @@ func TestTaintsNodeByCondition(t *testing.T) {
 | 
			
		||||
		if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		nodeController.doNoScheduleTaintingPass(test.Node.Name)
 | 
			
		||||
		nodeController.doNoScheduleTaintingPass(context.TODO(), test.Node.Name)
 | 
			
		||||
		if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -3402,6 +3415,7 @@ func TestNodeEventGeneration(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		5*time.Minute,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -3420,7 +3434,7 @@ func TestNodeEventGeneration(t *testing.T) {
 | 
			
		||||
	if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(); err != nil {
 | 
			
		||||
	if err := nodeController.monitorNodeHealth(context.TODO()); err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	if len(fakeRecorder.Events) != 1 {
 | 
			
		||||
@@ -3475,6 +3489,7 @@ func TestReconcileNodeLabels(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		evictionTimeout,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -3618,6 +3633,7 @@ func TestTryUpdateNodeHealth(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeController, _ := newNodeLifecycleControllerFromClient(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		fakeNodeHandler,
 | 
			
		||||
		evictionTimeout,
 | 
			
		||||
		testRateLimiterQPS,
 | 
			
		||||
@@ -3790,7 +3806,7 @@ func TestTryUpdateNodeHealth(t *testing.T) {
 | 
			
		||||
				probeTimestamp:           test.node.CreationTimestamp,
 | 
			
		||||
				readyTransitionTimestamp: test.node.CreationTimestamp,
 | 
			
		||||
			})
 | 
			
		||||
			_, _, currentReadyCondition, err := nodeController.tryUpdateNodeHealth(test.node)
 | 
			
		||||
			_, _, currentReadyCondition, err := nodeController.tryUpdateNodeHealth(context.TODO(), test.node)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("unexpected error: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -99,8 +99,8 @@ type NoExecuteTaintManager struct {
 | 
			
		||||
	podUpdateQueue  workqueue.Interface
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func deletePodHandler(c clientset.Interface, emitEventFunc func(types.NamespacedName)) func(args *WorkArgs) error {
 | 
			
		||||
	return func(args *WorkArgs) error {
 | 
			
		||||
func deletePodHandler(c clientset.Interface, emitEventFunc func(types.NamespacedName)) func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
	return func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
		ns := args.NamespacedName.Namespace
 | 
			
		||||
		name := args.NamespacedName.Name
 | 
			
		||||
		klog.V(0).InfoS("NoExecuteTaintManager is deleting pod", "pod", args.NamespacedName.String())
 | 
			
		||||
@@ -109,7 +109,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced
 | 
			
		||||
		}
 | 
			
		||||
		var err error
 | 
			
		||||
		for i := 0; i < retries; i++ {
 | 
			
		||||
			err = c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
 | 
			
		||||
			err = c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
@@ -155,7 +155,7 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
 | 
			
		||||
 | 
			
		||||
// NewNoExecuteTaintManager creates a new NoExecuteTaintManager that will use passed clientset to
 | 
			
		||||
// communicate with the API server.
 | 
			
		||||
func NewNoExecuteTaintManager(c clientset.Interface, getPod GetPodFunc, getNode GetNodeFunc, getPodsAssignedToNode GetPodsByNodeNameFunc) *NoExecuteTaintManager {
 | 
			
		||||
func NewNoExecuteTaintManager(ctx context.Context, c clientset.Interface, getPod GetPodFunc, getNode GetNodeFunc, getPodsAssignedToNode GetPodsByNodeNameFunc) *NoExecuteTaintManager {
 | 
			
		||||
	eventBroadcaster := record.NewBroadcaster()
 | 
			
		||||
	recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "taint-controller"})
 | 
			
		||||
	eventBroadcaster.StartStructuredLogging(0)
 | 
			
		||||
@@ -183,7 +183,7 @@ func NewNoExecuteTaintManager(c clientset.Interface, getPod GetPodFunc, getNode
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
 | 
			
		||||
func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
 | 
			
		||||
func (tc *NoExecuteTaintManager) Run(ctx context.Context) {
 | 
			
		||||
	klog.V(0).InfoS("Starting NoExecuteTaintManager")
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < UpdateWorkerSize; i++ {
 | 
			
		||||
@@ -209,7 +209,7 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
 | 
			
		||||
				// tc.nodeUpdateQueue.Done is called by the nodeUpdateChannels worker
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}(stopCh)
 | 
			
		||||
	}(ctx.Done())
 | 
			
		||||
 | 
			
		||||
	go func(stopCh <-chan struct{}) {
 | 
			
		||||
		for {
 | 
			
		||||
@@ -231,17 +231,17 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
 | 
			
		||||
				// tc.podUpdateQueue.Done is called by the podUpdateChannels worker
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}(stopCh)
 | 
			
		||||
	}(ctx.Done())
 | 
			
		||||
 | 
			
		||||
	wg := sync.WaitGroup{}
 | 
			
		||||
	wg.Add(UpdateWorkerSize)
 | 
			
		||||
	for i := 0; i < UpdateWorkerSize; i++ {
 | 
			
		||||
		go tc.worker(i, wg.Done, stopCh)
 | 
			
		||||
		go tc.worker(ctx, i, wg.Done, ctx.Done())
 | 
			
		||||
	}
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *NoExecuteTaintManager) worker(worker int, done func(), stopCh <-chan struct{}) {
 | 
			
		||||
func (tc *NoExecuteTaintManager) worker(ctx context.Context, worker int, done func(), stopCh <-chan struct{}) {
 | 
			
		||||
	defer done()
 | 
			
		||||
 | 
			
		||||
	// When processing events we want to prioritize Node updates over Pod updates,
 | 
			
		||||
@@ -253,7 +253,7 @@ func (tc *NoExecuteTaintManager) worker(worker int, done func(), stopCh <-chan s
 | 
			
		||||
		case <-stopCh:
 | 
			
		||||
			return
 | 
			
		||||
		case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
 | 
			
		||||
			tc.handleNodeUpdate(nodeUpdate)
 | 
			
		||||
			tc.handleNodeUpdate(ctx, nodeUpdate)
 | 
			
		||||
			tc.nodeUpdateQueue.Done(nodeUpdate)
 | 
			
		||||
		case podUpdate := <-tc.podUpdateChannels[worker]:
 | 
			
		||||
			// If we found a Pod update we need to empty Node queue first.
 | 
			
		||||
@@ -261,14 +261,14 @@ func (tc *NoExecuteTaintManager) worker(worker int, done func(), stopCh <-chan s
 | 
			
		||||
			for {
 | 
			
		||||
				select {
 | 
			
		||||
				case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
 | 
			
		||||
					tc.handleNodeUpdate(nodeUpdate)
 | 
			
		||||
					tc.handleNodeUpdate(ctx, nodeUpdate)
 | 
			
		||||
					tc.nodeUpdateQueue.Done(nodeUpdate)
 | 
			
		||||
				default:
 | 
			
		||||
					break priority
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			// After Node queue is emptied we process podUpdate.
 | 
			
		||||
			tc.handlePodUpdate(podUpdate)
 | 
			
		||||
			tc.handlePodUpdate(ctx, podUpdate)
 | 
			
		||||
			tc.podUpdateQueue.Done(podUpdate)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -338,6 +338,7 @@ func (tc *NoExecuteTaintManager) cancelWorkWithEvent(nsName types.NamespacedName
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *NoExecuteTaintManager) processPodOnNode(
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
	podNamespacedName types.NamespacedName,
 | 
			
		||||
	nodeName string,
 | 
			
		||||
	tolerations []v1.Toleration,
 | 
			
		||||
@@ -352,7 +353,7 @@ func (tc *NoExecuteTaintManager) processPodOnNode(
 | 
			
		||||
		klog.V(2).InfoS("Not all taints are tolerated after update for pod on node", "pod", podNamespacedName.String(), "node", nodeName)
 | 
			
		||||
		// We're canceling scheduled work (if any), as we're going to delete the Pod right away.
 | 
			
		||||
		tc.cancelWorkWithEvent(podNamespacedName)
 | 
			
		||||
		tc.taintEvictionQueue.AddWork(NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), time.Now(), time.Now())
 | 
			
		||||
		tc.taintEvictionQueue.AddWork(ctx, NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), time.Now(), time.Now())
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	minTolerationTime := getMinTolerationTime(usedTolerations)
 | 
			
		||||
@@ -373,10 +374,10 @@ func (tc *NoExecuteTaintManager) processPodOnNode(
 | 
			
		||||
		}
 | 
			
		||||
		tc.cancelWorkWithEvent(podNamespacedName)
 | 
			
		||||
	}
 | 
			
		||||
	tc.taintEvictionQueue.AddWork(NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), startTime, triggerTime)
 | 
			
		||||
	tc.taintEvictionQueue.AddWork(ctx, NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), startTime, triggerTime)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) {
 | 
			
		||||
func (tc *NoExecuteTaintManager) handlePodUpdate(ctx context.Context, podUpdate podUpdateItem) {
 | 
			
		||||
	pod, err := tc.getPod(podUpdate.podName, podUpdate.podNamespace)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if apierrors.IsNotFound(err) {
 | 
			
		||||
@@ -413,10 +414,10 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) {
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	tc.processPodOnNode(podNamespacedName, nodeName, pod.Spec.Tolerations, taints, time.Now())
 | 
			
		||||
	tc.processPodOnNode(ctx, podNamespacedName, nodeName, pod.Spec.Tolerations, taints, time.Now())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
 | 
			
		||||
func (tc *NoExecuteTaintManager) handleNodeUpdate(ctx context.Context, nodeUpdate nodeUpdateItem) {
 | 
			
		||||
	node, err := tc.getNode(nodeUpdate.nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if apierrors.IsNotFound(err) {
 | 
			
		||||
@@ -468,7 +469,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	for _, pod := range pods {
 | 
			
		||||
		podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
 | 
			
		||||
		tc.processPodOnNode(podNamespacedName, node.Name, pod.Spec.Tolerations, taints, now)
 | 
			
		||||
		tc.processPodOnNode(ctx, podNamespacedName, node.Name, pod.Spec.Tolerations, taints, now)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -217,11 +217,11 @@ func TestCreatePod(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, item := range testCases {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		fakeClientset := fake.NewSimpleClientset()
 | 
			
		||||
		controller := NewNoExecuteTaintManager(fakeClientset, (&podHolder{pod: item.pod}).getPod, getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller := NewNoExecuteTaintManager(ctx, fakeClientset, (&podHolder{pod: item.pod}).getPod, getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
		go controller.Run(stopCh)
 | 
			
		||||
		go controller.Run(ctx)
 | 
			
		||||
		controller.taintedNodes = item.taintedNodes
 | 
			
		||||
		controller.PodUpdated(nil, item.pod)
 | 
			
		||||
		// wait a bit
 | 
			
		||||
@@ -236,16 +236,16 @@ func TestCreatePod(t *testing.T) {
 | 
			
		||||
		if podDeleted != item.expectDelete {
 | 
			
		||||
			t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
 | 
			
		||||
		}
 | 
			
		||||
		close(stopCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeletePod(t *testing.T) {
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	fakeClientset := fake.NewSimpleClientset()
 | 
			
		||||
	controller := NewNoExecuteTaintManager(fakeClientset, getPodFromClientset(fakeClientset), getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
	controller := NewNoExecuteTaintManager(context.TODO(), fakeClientset, getPodFromClientset(fakeClientset), getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
	controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	go controller.Run(context.TODO())
 | 
			
		||||
	controller.taintedNodes = map[string][]v1.Taint{
 | 
			
		||||
		"node1": {createNoExecuteTaint(1)},
 | 
			
		||||
	}
 | 
			
		||||
@@ -304,12 +304,12 @@ func TestUpdatePod(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, item := range testCases {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		fakeClientset := fake.NewSimpleClientset()
 | 
			
		||||
		holder := &podHolder{}
 | 
			
		||||
		controller := NewNoExecuteTaintManager(fakeClientset, holder.getPod, getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller := NewNoExecuteTaintManager(ctx, fakeClientset, holder.getPod, getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
		go controller.Run(stopCh)
 | 
			
		||||
		go controller.Run(ctx)
 | 
			
		||||
		controller.taintedNodes = item.taintedNodes
 | 
			
		||||
 | 
			
		||||
		holder.setPod(item.prevPod)
 | 
			
		||||
@@ -333,7 +333,7 @@ func TestUpdatePod(t *testing.T) {
 | 
			
		||||
		if podDeleted != item.expectDelete {
 | 
			
		||||
			t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
 | 
			
		||||
		}
 | 
			
		||||
		close(stopCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -371,11 +371,11 @@ func TestCreateNode(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, item := range testCases {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		fakeClientset := fake.NewSimpleClientset(&v1.PodList{Items: item.pods})
 | 
			
		||||
		controller := NewNoExecuteTaintManager(fakeClientset, getPodFromClientset(fakeClientset), (&nodeHolder{node: item.node}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller := NewNoExecuteTaintManager(ctx, fakeClientset, getPodFromClientset(fakeClientset), (&nodeHolder{node: item.node}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
		go controller.Run(stopCh)
 | 
			
		||||
		go controller.Run(ctx)
 | 
			
		||||
		controller.NodeUpdated(nil, item.node)
 | 
			
		||||
		// wait a bit
 | 
			
		||||
		time.Sleep(timeForControllerToProgress)
 | 
			
		||||
@@ -389,19 +389,19 @@ func TestCreateNode(t *testing.T) {
 | 
			
		||||
		if podDeleted != item.expectDelete {
 | 
			
		||||
			t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted)
 | 
			
		||||
		}
 | 
			
		||||
		close(stopCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteNode(t *testing.T) {
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	fakeClientset := fake.NewSimpleClientset()
 | 
			
		||||
	controller := NewNoExecuteTaintManager(fakeClientset, getPodFromClientset(fakeClientset), getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
	controller := NewNoExecuteTaintManager(ctx, fakeClientset, getPodFromClientset(fakeClientset), getNodeFromClientset(fakeClientset), getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
	controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
	controller.taintedNodes = map[string][]v1.Taint{
 | 
			
		||||
		"node1": {createNoExecuteTaint(1)},
 | 
			
		||||
	}
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	go controller.Run(ctx)
 | 
			
		||||
	controller.NodeUpdated(testutil.NewNode("node1"), nil)
 | 
			
		||||
	// wait a bit to see if nothing will panic
 | 
			
		||||
	time.Sleep(timeForControllerToProgress)
 | 
			
		||||
@@ -410,7 +410,7 @@ func TestDeleteNode(t *testing.T) {
 | 
			
		||||
		t.Error("Node should have been deleted from taintedNodes list")
 | 
			
		||||
	}
 | 
			
		||||
	controller.taintedNodesLock.Unlock()
 | 
			
		||||
	close(stopCh)
 | 
			
		||||
	cancel()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestUpdateNode(t *testing.T) {
 | 
			
		||||
@@ -494,9 +494,9 @@ func TestUpdateNode(t *testing.T) {
 | 
			
		||||
	for _, item := range testCases {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		fakeClientset := fake.NewSimpleClientset(&v1.PodList{Items: item.pods})
 | 
			
		||||
		controller := NewNoExecuteTaintManager(fakeClientset, getPodFromClientset(fakeClientset), (&nodeHolder{node: item.newNode}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller := NewNoExecuteTaintManager(context.TODO(), fakeClientset, getPodFromClientset(fakeClientset), (&nodeHolder{node: item.newNode}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
		go controller.Run(stopCh)
 | 
			
		||||
		go controller.Run(context.TODO())
 | 
			
		||||
		controller.NodeUpdated(item.oldNode, item.newNode)
 | 
			
		||||
		// wait a bit
 | 
			
		||||
		time.Sleep(timeForControllerToProgress)
 | 
			
		||||
@@ -537,16 +537,16 @@ func TestUpdateNodeWithMultipleTaints(t *testing.T) {
 | 
			
		||||
	singleTaintedNode := testutil.NewNode("node1")
 | 
			
		||||
	singleTaintedNode.Spec.Taints = []v1.Taint{taint1}
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	fakeClientset := fake.NewSimpleClientset(pod)
 | 
			
		||||
	holder := &nodeHolder{node: untaintedNode}
 | 
			
		||||
	controller := NewNoExecuteTaintManager(fakeClientset, getPodFromClientset(fakeClientset), (holder).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
	controller := NewNoExecuteTaintManager(context.TODO(), fakeClientset, getPodFromClientset(fakeClientset), (holder).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
	controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	go controller.Run(context.TODO())
 | 
			
		||||
 | 
			
		||||
	// no taint
 | 
			
		||||
	holder.setNode(untaintedNode)
 | 
			
		||||
	controller.handleNodeUpdate(nodeUpdateItem{"node1"})
 | 
			
		||||
	controller.handleNodeUpdate(ctx, nodeUpdateItem{"node1"})
 | 
			
		||||
	// verify pod is not queued for deletion
 | 
			
		||||
	if controller.taintEvictionQueue.GetWorkerUnsafe(podNamespacedName.String()) != nil {
 | 
			
		||||
		t.Fatalf("pod queued for deletion with no taints")
 | 
			
		||||
@@ -554,7 +554,7 @@ func TestUpdateNodeWithMultipleTaints(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// no taint -> infinitely tolerated taint
 | 
			
		||||
	holder.setNode(singleTaintedNode)
 | 
			
		||||
	controller.handleNodeUpdate(nodeUpdateItem{"node1"})
 | 
			
		||||
	controller.handleNodeUpdate(ctx, nodeUpdateItem{"node1"})
 | 
			
		||||
	// verify pod is not queued for deletion
 | 
			
		||||
	if controller.taintEvictionQueue.GetWorkerUnsafe(podNamespacedName.String()) != nil {
 | 
			
		||||
		t.Fatalf("pod queued for deletion with permanently tolerated taint")
 | 
			
		||||
@@ -562,7 +562,7 @@ func TestUpdateNodeWithMultipleTaints(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// infinitely tolerated taint -> temporarily tolerated taint
 | 
			
		||||
	holder.setNode(doubleTaintedNode)
 | 
			
		||||
	controller.handleNodeUpdate(nodeUpdateItem{"node1"})
 | 
			
		||||
	controller.handleNodeUpdate(ctx, nodeUpdateItem{"node1"})
 | 
			
		||||
	// verify pod is queued for deletion
 | 
			
		||||
	if controller.taintEvictionQueue.GetWorkerUnsafe(podNamespacedName.String()) == nil {
 | 
			
		||||
		t.Fatalf("pod not queued for deletion after addition of temporarily tolerated taint")
 | 
			
		||||
@@ -570,7 +570,7 @@ func TestUpdateNodeWithMultipleTaints(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// temporarily tolerated taint -> infinitely tolerated taint
 | 
			
		||||
	holder.setNode(singleTaintedNode)
 | 
			
		||||
	controller.handleNodeUpdate(nodeUpdateItem{"node1"})
 | 
			
		||||
	controller.handleNodeUpdate(ctx, nodeUpdateItem{"node1"})
 | 
			
		||||
	// verify pod is not queued for deletion
 | 
			
		||||
	if controller.taintEvictionQueue.GetWorkerUnsafe(podNamespacedName.String()) != nil {
 | 
			
		||||
		t.Fatalf("pod queued for deletion after removal of temporarily tolerated taint")
 | 
			
		||||
@@ -582,7 +582,7 @@ func TestUpdateNodeWithMultipleTaints(t *testing.T) {
 | 
			
		||||
			t.Error("Unexpected deletion")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	close(stopCh)
 | 
			
		||||
	cancel()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestUpdateNodeWithMultiplePods(t *testing.T) {
 | 
			
		||||
@@ -628,9 +628,9 @@ func TestUpdateNodeWithMultiplePods(t *testing.T) {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		fakeClientset := fake.NewSimpleClientset(&v1.PodList{Items: item.pods})
 | 
			
		||||
		sort.Sort(item.expectedDeleteTimes)
 | 
			
		||||
		controller := NewNoExecuteTaintManager(fakeClientset, getPodFromClientset(fakeClientset), (&nodeHolder{node: item.newNode}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller := NewNoExecuteTaintManager(context.TODO(), fakeClientset, getPodFromClientset(fakeClientset), (&nodeHolder{node: item.newNode}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
		go controller.Run(stopCh)
 | 
			
		||||
		go controller.Run(context.TODO())
 | 
			
		||||
		controller.NodeUpdated(item.oldNode, item.newNode)
 | 
			
		||||
 | 
			
		||||
		startedAt := time.Now()
 | 
			
		||||
@@ -828,9 +828,9 @@ func TestEventualConsistency(t *testing.T) {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		fakeClientset := fake.NewSimpleClientset(&v1.PodList{Items: item.pods})
 | 
			
		||||
		holder := &podHolder{}
 | 
			
		||||
		controller := NewNoExecuteTaintManager(fakeClientset, holder.getPod, (&nodeHolder{node: item.newNode}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller := NewNoExecuteTaintManager(context.TODO(), fakeClientset, holder.getPod, (&nodeHolder{node: item.newNode}).getNode, getPodsAssignedToNode(fakeClientset))
 | 
			
		||||
		controller.recorder = testutil.NewFakeRecorder()
 | 
			
		||||
		go controller.Run(stopCh)
 | 
			
		||||
		go controller.Run(context.TODO())
 | 
			
		||||
 | 
			
		||||
		if item.prevPod != nil {
 | 
			
		||||
			holder.setPod(item.prevPod)
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package scheduler
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
@@ -49,13 +50,13 @@ type TimedWorker struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// createWorker creates a TimedWorker that will execute `f` not earlier than `fireAt`.
 | 
			
		||||
func createWorker(args *WorkArgs, createdAt time.Time, fireAt time.Time, f func(args *WorkArgs) error, clock clock.WithDelayedExecution) *TimedWorker {
 | 
			
		||||
func createWorker(ctx context.Context, args *WorkArgs, createdAt time.Time, fireAt time.Time, f func(ctx context.Context, args *WorkArgs) error, clock clock.WithDelayedExecution) *TimedWorker {
 | 
			
		||||
	delay := fireAt.Sub(createdAt)
 | 
			
		||||
	if delay <= 0 {
 | 
			
		||||
		go f(args)
 | 
			
		||||
		go f(ctx, args)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	timer := clock.AfterFunc(delay, func() { f(args) })
 | 
			
		||||
	timer := clock.AfterFunc(delay, func() { f(ctx, args) })
 | 
			
		||||
	return &TimedWorker{
 | 
			
		||||
		WorkItem:  args,
 | 
			
		||||
		CreatedAt: createdAt,
 | 
			
		||||
@@ -76,13 +77,13 @@ type TimedWorkerQueue struct {
 | 
			
		||||
	sync.Mutex
 | 
			
		||||
	// map of workers keyed by string returned by 'KeyFromWorkArgs' from the given worker.
 | 
			
		||||
	workers  map[string]*TimedWorker
 | 
			
		||||
	workFunc func(args *WorkArgs) error
 | 
			
		||||
	workFunc func(ctx context.Context, args *WorkArgs) error
 | 
			
		||||
	clock    clock.WithDelayedExecution
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateWorkerQueue creates a new TimedWorkerQueue for workers that will execute
 | 
			
		||||
// given function `f`.
 | 
			
		||||
func CreateWorkerQueue(f func(args *WorkArgs) error) *TimedWorkerQueue {
 | 
			
		||||
func CreateWorkerQueue(f func(ctx context.Context, args *WorkArgs) error) *TimedWorkerQueue {
 | 
			
		||||
	return &TimedWorkerQueue{
 | 
			
		||||
		workers:  make(map[string]*TimedWorker),
 | 
			
		||||
		workFunc: f,
 | 
			
		||||
@@ -90,9 +91,9 @@ func CreateWorkerQueue(f func(args *WorkArgs) error) *TimedWorkerQueue {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (q *TimedWorkerQueue) getWrappedWorkerFunc(key string) func(args *WorkArgs) error {
 | 
			
		||||
	return func(args *WorkArgs) error {
 | 
			
		||||
		err := q.workFunc(args)
 | 
			
		||||
func (q *TimedWorkerQueue) getWrappedWorkerFunc(key string) func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
	return func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
		err := q.workFunc(ctx, args)
 | 
			
		||||
		q.Lock()
 | 
			
		||||
		defer q.Unlock()
 | 
			
		||||
		if err == nil {
 | 
			
		||||
@@ -107,7 +108,7 @@ func (q *TimedWorkerQueue) getWrappedWorkerFunc(key string) func(args *WorkArgs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddWork adds a work to the WorkerQueue which will be executed not earlier than `fireAt`.
 | 
			
		||||
func (q *TimedWorkerQueue) AddWork(args *WorkArgs, createdAt time.Time, fireAt time.Time) {
 | 
			
		||||
func (q *TimedWorkerQueue) AddWork(ctx context.Context, args *WorkArgs, createdAt time.Time, fireAt time.Time) {
 | 
			
		||||
	key := args.KeyFromWorkArgs()
 | 
			
		||||
	klog.V(4).Infof("Adding TimedWorkerQueue item %v at %v to be fired at %v", key, createdAt, fireAt)
 | 
			
		||||
 | 
			
		||||
@@ -117,7 +118,7 @@ func (q *TimedWorkerQueue) AddWork(args *WorkArgs, createdAt time.Time, fireAt t
 | 
			
		||||
		klog.Warningf("Trying to add already existing work for %+v. Skipping.", args)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	worker := createWorker(args, createdAt, fireAt, q.getWrappedWorkerFunc(key), q.clock)
 | 
			
		||||
	worker := createWorker(ctx, args, createdAt, fireAt, q.getWrappedWorkerFunc(key), q.clock)
 | 
			
		||||
	q.workers[key] = worker
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package scheduler
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"sync/atomic"
 | 
			
		||||
	"testing"
 | 
			
		||||
@@ -29,23 +30,23 @@ func TestExecute(t *testing.T) {
 | 
			
		||||
	testVal := int32(0)
 | 
			
		||||
	wg := sync.WaitGroup{}
 | 
			
		||||
	wg.Add(5)
 | 
			
		||||
	queue := CreateWorkerQueue(func(args *WorkArgs) error {
 | 
			
		||||
	queue := CreateWorkerQueue(func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
		atomic.AddInt32(&testVal, 1)
 | 
			
		||||
		wg.Done()
 | 
			
		||||
		return nil
 | 
			
		||||
	})
 | 
			
		||||
	now := time.Now()
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, now)
 | 
			
		||||
	// Adding the same thing second time should be no-op
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, now)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, now)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, now)
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
	lastVal := atomic.LoadInt32(&testVal)
 | 
			
		||||
	if lastVal != 5 {
 | 
			
		||||
@@ -57,7 +58,7 @@ func TestExecuteDelayed(t *testing.T) {
 | 
			
		||||
	testVal := int32(0)
 | 
			
		||||
	wg := sync.WaitGroup{}
 | 
			
		||||
	wg.Add(5)
 | 
			
		||||
	queue := CreateWorkerQueue(func(args *WorkArgs) error {
 | 
			
		||||
	queue := CreateWorkerQueue(func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
		atomic.AddInt32(&testVal, 1)
 | 
			
		||||
		wg.Done()
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -66,16 +67,16 @@ func TestExecuteDelayed(t *testing.T) {
 | 
			
		||||
	then := now.Add(10 * time.Second)
 | 
			
		||||
	fakeClock := testingclock.NewFakeClock(now)
 | 
			
		||||
	queue.clock = fakeClock
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	fakeClock.Step(11 * time.Second)
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
	lastVal := atomic.LoadInt32(&testVal)
 | 
			
		||||
@@ -88,7 +89,7 @@ func TestCancel(t *testing.T) {
 | 
			
		||||
	testVal := int32(0)
 | 
			
		||||
	wg := sync.WaitGroup{}
 | 
			
		||||
	wg.Add(3)
 | 
			
		||||
	queue := CreateWorkerQueue(func(args *WorkArgs) error {
 | 
			
		||||
	queue := CreateWorkerQueue(func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
		atomic.AddInt32(&testVal, 1)
 | 
			
		||||
		wg.Done()
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -97,16 +98,16 @@ func TestCancel(t *testing.T) {
 | 
			
		||||
	then := now.Add(10 * time.Second)
 | 
			
		||||
	fakeClock := testingclock.NewFakeClock(now)
 | 
			
		||||
	queue.clock = fakeClock
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.CancelWork(NewWorkArgs("2", "2").KeyFromWorkArgs())
 | 
			
		||||
	queue.CancelWork(NewWorkArgs("4", "4").KeyFromWorkArgs())
 | 
			
		||||
	fakeClock.Step(11 * time.Second)
 | 
			
		||||
@@ -121,7 +122,7 @@ func TestCancelAndReadd(t *testing.T) {
 | 
			
		||||
	testVal := int32(0)
 | 
			
		||||
	wg := sync.WaitGroup{}
 | 
			
		||||
	wg.Add(4)
 | 
			
		||||
	queue := CreateWorkerQueue(func(args *WorkArgs) error {
 | 
			
		||||
	queue := CreateWorkerQueue(func(ctx context.Context, args *WorkArgs) error {
 | 
			
		||||
		atomic.AddInt32(&testVal, 1)
 | 
			
		||||
		wg.Done()
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -130,19 +131,19 @@ func TestCancelAndReadd(t *testing.T) {
 | 
			
		||||
	then := now.Add(10 * time.Second)
 | 
			
		||||
	fakeClock := testingclock.NewFakeClock(now)
 | 
			
		||||
	queue.clock = fakeClock
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("1", "1"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("3", "3"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("4", "4"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("5", "5"), now, then)
 | 
			
		||||
	queue.CancelWork(NewWorkArgs("2", "2").KeyFromWorkArgs())
 | 
			
		||||
	queue.CancelWork(NewWorkArgs("4", "4").KeyFromWorkArgs())
 | 
			
		||||
	queue.AddWork(NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	queue.AddWork(context.TODO(), NewWorkArgs("2", "2"), now, then)
 | 
			
		||||
	fakeClock.Step(11 * time.Second)
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
	lastVal := atomic.LoadInt32(&testVal)
 | 
			
		||||
 
 | 
			
		||||
@@ -61,7 +61,7 @@ type PodGCController struct {
 | 
			
		||||
	terminatedPodThreshold int
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer,
 | 
			
		||||
func NewPodGC(ctx context.Context, kubeClient clientset.Interface, podInformer coreinformers.PodInformer,
 | 
			
		||||
	nodeInformer coreinformers.NodeInformer, terminatedPodThreshold int) *PodGCController {
 | 
			
		||||
	if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
 | 
			
		||||
		ratelimiter.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
 | 
			
		||||
@@ -76,30 +76,30 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
 | 
			
		||||
		nodeQueue:              workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
 | 
			
		||||
		deletePod: func(namespace, name string) error {
 | 
			
		||||
			klog.InfoS("PodGC is force deleting Pod", "pod", klog.KRef(namespace, name))
 | 
			
		||||
			return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
 | 
			
		||||
			return kubeClient.CoreV1().Pods(namespace).Delete(ctx, name, *metav1.NewDeleteOptions(0))
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return gcc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (gcc *PodGCController) Run(stop <-chan struct{}) {
 | 
			
		||||
func (gcc *PodGCController) Run(ctx context.Context) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting GC controller")
 | 
			
		||||
	defer gcc.nodeQueue.ShutDown()
 | 
			
		||||
	defer klog.Infof("Shutting down GC controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("GC", stop, gcc.podListerSynced, gcc.nodeListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("GC", ctx.Done(), gcc.podListerSynced, gcc.nodeListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go wait.Until(gcc.gc, gcCheckPeriod, stop)
 | 
			
		||||
	go wait.UntilWithContext(ctx, gcc.gc, gcCheckPeriod)
 | 
			
		||||
 | 
			
		||||
	<-stop
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (gcc *PodGCController) gc() {
 | 
			
		||||
func (gcc *PodGCController) gc(ctx context.Context) {
 | 
			
		||||
	pods, err := gcc.podLister.List(labels.Everything())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Error while listing all pods: %v", err)
 | 
			
		||||
@@ -113,7 +113,7 @@ func (gcc *PodGCController) gc() {
 | 
			
		||||
	if gcc.terminatedPodThreshold > 0 {
 | 
			
		||||
		gcc.gcTerminated(pods)
 | 
			
		||||
	}
 | 
			
		||||
	gcc.gcOrphaned(pods, nodes)
 | 
			
		||||
	gcc.gcOrphaned(ctx, pods, nodes)
 | 
			
		||||
	gcc.gcUnscheduledTerminating(pods)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -157,7 +157,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// gcOrphaned deletes pods that are bound to nodes that don't exist.
 | 
			
		||||
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
 | 
			
		||||
func (gcc *PodGCController) gcOrphaned(ctx context.Context, pods []*v1.Pod, nodes []*v1.Node) {
 | 
			
		||||
	klog.V(4).Infof("GC'ing orphaned")
 | 
			
		||||
	existingNodeNames := sets.NewString()
 | 
			
		||||
	for _, node := range nodes {
 | 
			
		||||
@@ -170,7 +170,7 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// Check if nodes are still missing after quarantine period
 | 
			
		||||
	deletedNodesNames, quit := gcc.discoverDeletedNodes(existingNodeNames)
 | 
			
		||||
	deletedNodesNames, quit := gcc.discoverDeletedNodes(ctx, existingNodeNames)
 | 
			
		||||
	if quit {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@@ -188,7 +188,7 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String) (sets.String, bool) {
 | 
			
		||||
func (gcc *PodGCController) discoverDeletedNodes(ctx context.Context, existingNodeNames sets.String) (sets.String, bool) {
 | 
			
		||||
	deletedNodesNames := sets.NewString()
 | 
			
		||||
	for gcc.nodeQueue.Len() > 0 {
 | 
			
		||||
		item, quit := gcc.nodeQueue.Get()
 | 
			
		||||
@@ -197,7 +197,7 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String)
 | 
			
		||||
		}
 | 
			
		||||
		nodeName := item.(string)
 | 
			
		||||
		if !existingNodeNames.Has(nodeName) {
 | 
			
		||||
			exists, err := gcc.checkIfNodeExists(nodeName)
 | 
			
		||||
			exists, err := gcc.checkIfNodeExists(ctx, nodeName)
 | 
			
		||||
			switch {
 | 
			
		||||
			case err != nil:
 | 
			
		||||
				klog.ErrorS(err, "Error while getting node", "node", nodeName)
 | 
			
		||||
@@ -211,8 +211,8 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String)
 | 
			
		||||
	return deletedNodesNames, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (gcc *PodGCController) checkIfNodeExists(name string) (bool, error) {
 | 
			
		||||
	_, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
 | 
			
		||||
func (gcc *PodGCController) checkIfNodeExists(ctx context.Context, name string) (bool, error) {
 | 
			
		||||
	_, fetchErr := gcc.kubeClient.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
 | 
			
		||||
	if errors.IsNotFound(fetchErr) {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -43,7 +43,7 @@ func NewFromClient(kubeClient clientset.Interface, terminatedPodThreshold int) (
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
 | 
			
		||||
	podInformer := informerFactory.Core().V1().Pods()
 | 
			
		||||
	nodeInformer := informerFactory.Core().V1().Nodes()
 | 
			
		||||
	controller := NewPodGC(kubeClient, podInformer, nodeInformer, terminatedPodThreshold)
 | 
			
		||||
	controller := NewPodGC(context.TODO(), kubeClient, podInformer, nodeInformer, terminatedPodThreshold)
 | 
			
		||||
	controller.podListerSynced = alwaysReady
 | 
			
		||||
	return controller, podInformer, nodeInformer
 | 
			
		||||
}
 | 
			
		||||
@@ -145,7 +145,7 @@ func TestGCTerminated(t *testing.T) {
 | 
			
		||||
				})
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			gcc.gc()
 | 
			
		||||
			gcc.gc(context.TODO())
 | 
			
		||||
 | 
			
		||||
			if pass := compareStringSetToList(test.deletedPodNames, deletedPodNames); !pass {
 | 
			
		||||
				t.Errorf("[%v]pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v",
 | 
			
		||||
@@ -336,7 +336,7 @@ func TestGCOrphaned(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// First GC of orphaned pods
 | 
			
		||||
			gcc.gc()
 | 
			
		||||
			gcc.gc(context.TODO())
 | 
			
		||||
			if len(deletedPodNames) > 0 {
 | 
			
		||||
				t.Errorf("no pods should be deleted at this point.\n\tactual: %v", deletedPodNames)
 | 
			
		||||
			}
 | 
			
		||||
@@ -367,7 +367,7 @@ func TestGCOrphaned(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// Actual pod deletion
 | 
			
		||||
			gcc.gc()
 | 
			
		||||
			gcc.gc(context.TODO())
 | 
			
		||||
 | 
			
		||||
			if pass := compareStringSetToList(test.deletedPodNames, deletedPodNames); !pass {
 | 
			
		||||
				t.Errorf("pod's deleted expected and actual did not match.\n\texpected: %v\n\tactual: %v",
 | 
			
		||||
 
 | 
			
		||||
@@ -88,7 +88,7 @@ type Controller struct {
 | 
			
		||||
	// missingUsageQueue holds objects that are missing the initial usage information
 | 
			
		||||
	missingUsageQueue workqueue.RateLimitingInterface
 | 
			
		||||
	// To allow injection of syncUsage for testing.
 | 
			
		||||
	syncHandler func(key string) error
 | 
			
		||||
	syncHandler func(ctx context.Context, key string) error
 | 
			
		||||
	// function that controls full recalculation of quota usage
 | 
			
		||||
	resyncPeriod controller.ResyncPeriodFunc
 | 
			
		||||
	// knows how to calculate usage
 | 
			
		||||
@@ -236,8 +236,8 @@ func (rq *Controller) addQuota(obj interface{}) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
 | 
			
		||||
func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func() {
 | 
			
		||||
	workFunc := func() bool {
 | 
			
		||||
func (rq *Controller) worker(ctx context.Context, queue workqueue.RateLimitingInterface) func(context.Context) {
 | 
			
		||||
	workFunc := func(ctx context.Context) bool {
 | 
			
		||||
		key, quit := queue.Get()
 | 
			
		||||
		if quit {
 | 
			
		||||
			return true
 | 
			
		||||
@@ -245,7 +245,7 @@ func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func() {
 | 
			
		||||
		defer queue.Done(key)
 | 
			
		||||
		rq.workerLock.RLock()
 | 
			
		||||
		defer rq.workerLock.RUnlock()
 | 
			
		||||
		err := rq.syncHandler(key.(string))
 | 
			
		||||
		err := rq.syncHandler(ctx, key.(string))
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			queue.Forget(key)
 | 
			
		||||
			return false
 | 
			
		||||
@@ -255,9 +255,9 @@ func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func() {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return func() {
 | 
			
		||||
	return func(ctx context.Context) {
 | 
			
		||||
		for {
 | 
			
		||||
			if quit := workFunc(); quit {
 | 
			
		||||
			if quit := workFunc(ctx); quit {
 | 
			
		||||
				klog.Infof("resource quota controller worker shutting down")
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
@@ -266,7 +266,7 @@ func (rq *Controller) worker(queue workqueue.RateLimitingInterface) func() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run begins quota controller using the specified number of workers
 | 
			
		||||
func (rq *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (rq *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer rq.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
@@ -274,29 +274,29 @@ func (rq *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
	defer klog.Infof("Shutting down resource quota controller")
 | 
			
		||||
 | 
			
		||||
	if rq.quotaMonitor != nil {
 | 
			
		||||
		go rq.quotaMonitor.Run(stopCh)
 | 
			
		||||
		go rq.quotaMonitor.Run(ctx.Done())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("resource quota", stopCh, rq.informerSyncedFuncs...) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("resource quota", ctx.Done(), rq.informerSyncedFuncs...) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// the workers that chug through the quota calculation backlog
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(rq.worker(rq.queue), time.Second, stopCh)
 | 
			
		||||
		go wait.Until(rq.worker(rq.missingUsageQueue), time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, rq.worker(ctx, rq.queue), time.Second)
 | 
			
		||||
		go wait.UntilWithContext(ctx, rq.worker(ctx, rq.missingUsageQueue), time.Second)
 | 
			
		||||
	}
 | 
			
		||||
	// the timer for how often we do a full recalculation across all quotas
 | 
			
		||||
	if rq.resyncPeriod() > 0 {
 | 
			
		||||
		go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
 | 
			
		||||
		go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), ctx.Done())
 | 
			
		||||
	} else {
 | 
			
		||||
		klog.Warningf("periodic quota controller resync disabled")
 | 
			
		||||
	}
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// syncResourceQuotaFromKey syncs a quota key
 | 
			
		||||
func (rq *Controller) syncResourceQuotaFromKey(key string) (err error) {
 | 
			
		||||
func (rq *Controller) syncResourceQuotaFromKey(ctx context.Context, key string) (err error) {
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
		klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
 | 
			
		||||
@@ -315,11 +315,11 @@ func (rq *Controller) syncResourceQuotaFromKey(key string) (err error) {
 | 
			
		||||
		klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	return rq.syncResourceQuota(resourceQuota)
 | 
			
		||||
	return rq.syncResourceQuota(ctx, resourceQuota)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
 | 
			
		||||
func (rq *Controller) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
 | 
			
		||||
func (rq *Controller) syncResourceQuota(ctx context.Context, resourceQuota *v1.ResourceQuota) (err error) {
 | 
			
		||||
	// quota is dirty if any part of spec hard limits differs from the status hard limits
 | 
			
		||||
	statusLimitsDirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
 | 
			
		||||
 | 
			
		||||
@@ -361,7 +361,7 @@ func (rq *Controller) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err er
 | 
			
		||||
 | 
			
		||||
	// there was a change observed by this controller that requires we update quota
 | 
			
		||||
	if dirty {
 | 
			
		||||
		_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(context.TODO(), usage, metav1.UpdateOptions{})
 | 
			
		||||
		_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(ctx, usage, metav1.UpdateOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			errs = append(errs, err)
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package resourcequota
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"net/http/httptest"
 | 
			
		||||
@@ -782,7 +783,7 @@ func TestSyncResourceQuota(t *testing.T) {
 | 
			
		||||
		qc := setupQuotaController(t, kubeClient, mockListerForResourceFunc(listersForResourceConfig), mockDiscoveryFunc)
 | 
			
		||||
		defer close(qc.stop)
 | 
			
		||||
 | 
			
		||||
		if err := qc.syncResourceQuota(&testCase.quota); err != nil {
 | 
			
		||||
		if err := qc.syncResourceQuota(context.TODO(), &testCase.quota); err != nil {
 | 
			
		||||
			if len(testCase.expectedError) == 0 || !strings.Contains(err.Error(), testCase.expectedError) {
 | 
			
		||||
				t.Fatalf("test: %s, unexpected error: %v", testName, err)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -98,7 +98,7 @@ type ServiceAccountsController struct {
 | 
			
		||||
	serviceAccountsToEnsure []v1.ServiceAccount
 | 
			
		||||
 | 
			
		||||
	// To allow injection for testing.
 | 
			
		||||
	syncHandler func(key string) error
 | 
			
		||||
	syncHandler func(ctx context.Context, key string) error
 | 
			
		||||
 | 
			
		||||
	saLister       corelisters.ServiceAccountLister
 | 
			
		||||
	saListerSynced cache.InformerSynced
 | 
			
		||||
@@ -110,22 +110,22 @@ type ServiceAccountsController struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run runs the ServiceAccountsController blocks until receiving signal from stopCh.
 | 
			
		||||
func (c *ServiceAccountsController) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (c *ServiceAccountsController) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer c.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting service account controller")
 | 
			
		||||
	defer klog.Infof("Shutting down service account controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("service account", stopCh, c.saListerSynced, c.nsListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("service account", ctx.Done(), c.saListerSynced, c.nsListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(c.runWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, c.runWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// serviceAccountDeleted reacts to a ServiceAccount deletion by recreating a default ServiceAccount in the namespace if needed
 | 
			
		||||
@@ -158,20 +158,20 @@ func (c *ServiceAccountsController) namespaceUpdated(oldObj interface{}, newObj
 | 
			
		||||
	c.queue.Add(newNamespace.Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *ServiceAccountsController) runWorker() {
 | 
			
		||||
	for c.processNextWorkItem() {
 | 
			
		||||
func (c *ServiceAccountsController) runWorker(ctx context.Context) {
 | 
			
		||||
	for c.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// processNextWorkItem deals with one key off the queue.  It returns false when it's time to quit.
 | 
			
		||||
func (c *ServiceAccountsController) processNextWorkItem() bool {
 | 
			
		||||
func (c *ServiceAccountsController) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	key, quit := c.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer c.queue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := c.syncHandler(key.(string))
 | 
			
		||||
	err := c.syncHandler(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		c.queue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -182,7 +182,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool {
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
func (c *ServiceAccountsController) syncNamespace(key string) error {
 | 
			
		||||
func (c *ServiceAccountsController) syncNamespace(ctx context.Context, key string) error {
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
		klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
 | 
			
		||||
@@ -213,7 +213,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
 | 
			
		||||
		// TODO eliminate this once the fake client can handle creation without NS
 | 
			
		||||
		sa.Namespace = ns.Name
 | 
			
		||||
 | 
			
		||||
		if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(context.TODO(), &sa, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
 | 
			
		||||
		if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(ctx, &sa, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
 | 
			
		||||
			// we can safely ignore terminating namespace errors
 | 
			
		||||
			if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
 | 
			
		||||
				createFailures = append(createFailures, err)
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package serviceaccount
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
@@ -176,8 +177,8 @@ func TestServiceAccountCreation(t *testing.T) {
 | 
			
		||||
		nsStore := nsInformer.Informer().GetStore()
 | 
			
		||||
 | 
			
		||||
		syncCalls := make(chan struct{})
 | 
			
		||||
		controller.syncHandler = func(key string) error {
 | 
			
		||||
			err := controller.syncNamespace(key)
 | 
			
		||||
		controller.syncHandler = func(ctx context.Context, key string) error {
 | 
			
		||||
			err := controller.syncNamespace(ctx, key)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Logf("%s: %v", k, err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -187,7 +188,7 @@ func TestServiceAccountCreation(t *testing.T) {
 | 
			
		||||
		}
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		defer close(stopCh)
 | 
			
		||||
		go controller.Run(1, stopCh)
 | 
			
		||||
		go controller.Run(context.TODO(), 1)
 | 
			
		||||
 | 
			
		||||
		if tc.ExistingNamespace != nil {
 | 
			
		||||
			nsStore.Add(tc.ExistingNamespace)
 | 
			
		||||
 
 | 
			
		||||
@@ -78,7 +78,7 @@ func NewStorageVersionGC(clientset kubernetes.Interface, leaseInformer coordinfo
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run starts one worker.
 | 
			
		||||
func (c *Controller) Run(stopCh <-chan struct{}) {
 | 
			
		||||
func (c *Controller) Run(ctx context.Context) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer c.leaseQueue.ShutDown()
 | 
			
		||||
	defer c.storageVersionQueue.ShutDown()
 | 
			
		||||
@@ -86,7 +86,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) {
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting storage version garbage collector")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForCacheSync(stopCh, c.leasesSynced, c.storageVersionSynced) {
 | 
			
		||||
	if !cache.WaitForCacheSync(ctx.Done(), c.leasesSynced, c.storageVersionSynced) {
 | 
			
		||||
		utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
@@ -96,25 +96,25 @@ func (c *Controller) Run(stopCh <-chan struct{}) {
 | 
			
		||||
	// runLeaseWorker handles legit identity lease deletion, while runStorageVersionWorker
 | 
			
		||||
	// handles storageversion creation/update with non-existing id. The latter should rarely
 | 
			
		||||
	// happen. It's okay for the two workers to conflict on update.
 | 
			
		||||
	go wait.Until(c.runLeaseWorker, time.Second, stopCh)
 | 
			
		||||
	go wait.Until(c.runStorageVersionWorker, time.Second, stopCh)
 | 
			
		||||
	go wait.UntilWithContext(ctx, c.runLeaseWorker, time.Second)
 | 
			
		||||
	go wait.UntilWithContext(ctx, c.runStorageVersionWorker, time.Second)
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) runLeaseWorker() {
 | 
			
		||||
	for c.processNextLease() {
 | 
			
		||||
func (c *Controller) runLeaseWorker(ctx context.Context) {
 | 
			
		||||
	for c.processNextLease(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) processNextLease() bool {
 | 
			
		||||
func (c *Controller) processNextLease(ctx context.Context) bool {
 | 
			
		||||
	key, quit := c.leaseQueue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer c.leaseQueue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := c.processDeletedLease(key.(string))
 | 
			
		||||
	err := c.processDeletedLease(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		c.leaseQueue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -125,19 +125,19 @@ func (c *Controller) processNextLease() bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) runStorageVersionWorker() {
 | 
			
		||||
	for c.processNextStorageVersion() {
 | 
			
		||||
func (c *Controller) runStorageVersionWorker(ctx context.Context) {
 | 
			
		||||
	for c.processNextStorageVersion(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) processNextStorageVersion() bool {
 | 
			
		||||
func (c *Controller) processNextStorageVersion(ctx context.Context) bool {
 | 
			
		||||
	key, quit := c.storageVersionQueue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer c.storageVersionQueue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := c.syncStorageVersion(key.(string))
 | 
			
		||||
	err := c.syncStorageVersion(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		c.storageVersionQueue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -148,8 +148,8 @@ func (c *Controller) processNextStorageVersion() bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) processDeletedLease(name string) error {
 | 
			
		||||
	_, err := c.kubeclientset.CoordinationV1().Leases(metav1.NamespaceSystem).Get(context.TODO(), name, metav1.GetOptions{})
 | 
			
		||||
func (c *Controller) processDeletedLease(ctx context.Context, name string) error {
 | 
			
		||||
	_, err := c.kubeclientset.CoordinationV1().Leases(metav1.NamespaceSystem).Get(ctx, name, metav1.GetOptions{})
 | 
			
		||||
	// the lease isn't deleted, nothing we need to do here
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -158,7 +158,7 @@ func (c *Controller) processDeletedLease(name string) error {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	// the frequency of this call won't be too high because we only trigger on identity lease deletions
 | 
			
		||||
	storageVersionList, err := c.kubeclientset.InternalV1alpha1().StorageVersions().List(context.TODO(), metav1.ListOptions{})
 | 
			
		||||
	storageVersionList, err := c.kubeclientset.InternalV1alpha1().StorageVersions().List(ctx, metav1.ListOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
@@ -177,7 +177,7 @@ func (c *Controller) processDeletedLease(name string) error {
 | 
			
		||||
		if !hasStaleRecord {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if err := c.updateOrDeleteStorageVersion(&sv, serverStorageVersions); err != nil {
 | 
			
		||||
		if err := c.updateOrDeleteStorageVersion(ctx, &sv, serverStorageVersions); err != nil {
 | 
			
		||||
			errors = append(errors, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -185,8 +185,8 @@ func (c *Controller) processDeletedLease(name string) error {
 | 
			
		||||
	return utilerrors.NewAggregate(errors)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) syncStorageVersion(name string) error {
 | 
			
		||||
	sv, err := c.kubeclientset.InternalV1alpha1().StorageVersions().Get(context.TODO(), name, metav1.GetOptions{})
 | 
			
		||||
func (c *Controller) syncStorageVersion(ctx context.Context, name string) error {
 | 
			
		||||
	sv, err := c.kubeclientset.InternalV1alpha1().StorageVersions().Get(ctx, name, metav1.GetOptions{})
 | 
			
		||||
	if apierrors.IsNotFound(err) {
 | 
			
		||||
		// The problematic storage version that was added/updated recently is gone.
 | 
			
		||||
		// Nothing we need to do here.
 | 
			
		||||
@@ -199,7 +199,7 @@ func (c *Controller) syncStorageVersion(name string) error {
 | 
			
		||||
	hasInvalidID := false
 | 
			
		||||
	var serverStorageVersions []apiserverinternalv1alpha1.ServerStorageVersion
 | 
			
		||||
	for _, v := range sv.Status.StorageVersions {
 | 
			
		||||
		lease, err := c.kubeclientset.CoordinationV1().Leases(metav1.NamespaceSystem).Get(context.TODO(), v.APIServerID, metav1.GetOptions{})
 | 
			
		||||
		lease, err := c.kubeclientset.CoordinationV1().Leases(metav1.NamespaceSystem).Get(ctx, v.APIServerID, metav1.GetOptions{})
 | 
			
		||||
		if err != nil || lease == nil || lease.Labels == nil ||
 | 
			
		||||
			lease.Labels[controlplane.IdentityLeaseComponentLabelKey] != controlplane.KubeAPIServer {
 | 
			
		||||
			// We cannot find a corresponding identity lease from apiserver as well.
 | 
			
		||||
@@ -212,7 +212,7 @@ func (c *Controller) syncStorageVersion(name string) error {
 | 
			
		||||
	if !hasInvalidID {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return c.updateOrDeleteStorageVersion(sv, serverStorageVersions)
 | 
			
		||||
	return c.updateOrDeleteStorageVersion(ctx, sv, serverStorageVersions)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) onAddStorageVersion(obj interface{}) {
 | 
			
		||||
@@ -266,14 +266,14 @@ func (c *Controller) enqueueLease(obj *coordinationv1.Lease) {
 | 
			
		||||
	c.leaseQueue.Add(obj.Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) updateOrDeleteStorageVersion(sv *apiserverinternalv1alpha1.StorageVersion, serverStorageVersions []apiserverinternalv1alpha1.ServerStorageVersion) error {
 | 
			
		||||
func (c *Controller) updateOrDeleteStorageVersion(ctx context.Context, sv *apiserverinternalv1alpha1.StorageVersion, serverStorageVersions []apiserverinternalv1alpha1.ServerStorageVersion) error {
 | 
			
		||||
	if len(serverStorageVersions) == 0 {
 | 
			
		||||
		return c.kubeclientset.InternalV1alpha1().StorageVersions().Delete(
 | 
			
		||||
			context.TODO(), sv.Name, metav1.DeleteOptions{})
 | 
			
		||||
			ctx, sv.Name, metav1.DeleteOptions{})
 | 
			
		||||
	}
 | 
			
		||||
	sv.Status.StorageVersions = serverStorageVersions
 | 
			
		||||
	storageversion.SetCommonEncodingVersion(sv)
 | 
			
		||||
	_, err := c.kubeclientset.InternalV1alpha1().StorageVersions().UpdateStatus(
 | 
			
		||||
		context.TODO(), sv, metav1.UpdateOptions{})
 | 
			
		||||
		ctx, sv, metav1.UpdateOptions{})
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -114,22 +114,22 @@ var (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Run begins watching and syncing.
 | 
			
		||||
func (ttlc *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (ttlc *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer ttlc.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting TTL controller")
 | 
			
		||||
	defer klog.Infof("Shutting down TTL controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("TTL", stopCh, ttlc.hasSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("TTL", ctx.Done(), ttlc.hasSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(ttlc.worker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, ttlc.worker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ttlc *Controller) addNode(obj interface{}) {
 | 
			
		||||
@@ -201,19 +201,19 @@ func (ttlc *Controller) enqueueNode(node *v1.Node) {
 | 
			
		||||
	ttlc.queue.Add(key)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ttlc *Controller) worker() {
 | 
			
		||||
	for ttlc.processItem() {
 | 
			
		||||
func (ttlc *Controller) worker(ctx context.Context) {
 | 
			
		||||
	for ttlc.processItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ttlc *Controller) processItem() bool {
 | 
			
		||||
func (ttlc *Controller) processItem(ctx context.Context) bool {
 | 
			
		||||
	key, quit := ttlc.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer ttlc.queue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := ttlc.updateNodeIfNeeded(key.(string))
 | 
			
		||||
	err := ttlc.updateNodeIfNeeded(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		ttlc.queue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -254,7 +254,7 @@ func setIntAnnotation(node *v1.Node, annotationKey string, value int) {
 | 
			
		||||
	node.Annotations[annotationKey] = strconv.Itoa(value)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ttlc *Controller) patchNodeWithAnnotation(node *v1.Node, annotationKey string, value int) error {
 | 
			
		||||
func (ttlc *Controller) patchNodeWithAnnotation(ctx context.Context, node *v1.Node, annotationKey string, value int) error {
 | 
			
		||||
	oldData, err := json.Marshal(node)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
@@ -268,7 +268,7 @@ func (ttlc *Controller) patchNodeWithAnnotation(node *v1.Node, annotationKey str
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
 | 
			
		||||
	_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(2).InfoS("Failed to change ttl annotation for node", "node", klog.KObj(node), "err", err)
 | 
			
		||||
		return err
 | 
			
		||||
@@ -277,7 +277,7 @@ func (ttlc *Controller) patchNodeWithAnnotation(node *v1.Node, annotationKey str
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ttlc *Controller) updateNodeIfNeeded(key string) error {
 | 
			
		||||
func (ttlc *Controller) updateNodeIfNeeded(ctx context.Context, key string) error {
 | 
			
		||||
	node, err := ttlc.nodeStore.Get(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if apierrors.IsNotFound(err) {
 | 
			
		||||
@@ -292,5 +292,5 @@ func (ttlc *Controller) updateNodeIfNeeded(key string) error {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ttlc.patchNodeWithAnnotation(node.DeepCopy(), v1.ObjectTTLAnnotationKey, desiredTTL)
 | 
			
		||||
	return ttlc.patchNodeWithAnnotation(ctx, node.DeepCopy(), v1.ObjectTTLAnnotationKey, desiredTTL)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package ttl
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/api/core/v1"
 | 
			
		||||
@@ -78,7 +79,7 @@ func TestPatchNode(t *testing.T) {
 | 
			
		||||
		ttlController := &Controller{
 | 
			
		||||
			kubeClient: fakeClient,
 | 
			
		||||
		}
 | 
			
		||||
		err := ttlController.patchNodeWithAnnotation(testCase.node, v1.ObjectTTLAnnotationKey, testCase.ttlSeconds)
 | 
			
		||||
		err := ttlController.patchNodeWithAnnotation(context.TODO(), testCase.node, v1.ObjectTTLAnnotationKey, testCase.ttlSeconds)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("%d: unexpected error: %v", i, err)
 | 
			
		||||
			continue
 | 
			
		||||
@@ -137,7 +138,7 @@ func TestUpdateNodeIfNeeded(t *testing.T) {
 | 
			
		||||
			nodeStore:         listers.NewNodeLister(nodeStore),
 | 
			
		||||
			desiredTTLSeconds: testCase.desiredTTL,
 | 
			
		||||
		}
 | 
			
		||||
		if err := ttlController.updateNodeIfNeeded(testCase.node.Name); err != nil {
 | 
			
		||||
		if err := ttlController.updateNodeIfNeeded(context.TODO(), testCase.node.Name); err != nil {
 | 
			
		||||
			t.Errorf("%d: unexpected error: %v", i, err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -102,22 +102,22 @@ func New(jobInformer batchinformers.JobInformer, client clientset.Interface) *Co
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run starts the workers to clean up Jobs.
 | 
			
		||||
func (tc *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (tc *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer tc.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting TTL after finished controller")
 | 
			
		||||
	defer klog.Infof("Shutting down TTL after finished controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("TTL after finished", stopCh, tc.jListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("TTL after finished", ctx.Done(), tc.jListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(tc.worker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, tc.worker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *Controller) addJob(obj interface{}) {
 | 
			
		||||
@@ -159,19 +159,19 @@ func (tc *Controller) enqueueAfter(job *batch.Job, after time.Duration) {
 | 
			
		||||
	tc.queue.AddAfter(key, after)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *Controller) worker() {
 | 
			
		||||
	for tc.processNextWorkItem() {
 | 
			
		||||
func (tc *Controller) worker(ctx context.Context) {
 | 
			
		||||
	for tc.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (tc *Controller) processNextWorkItem() bool {
 | 
			
		||||
func (tc *Controller) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	key, quit := tc.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer tc.queue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := tc.processJob(key.(string))
 | 
			
		||||
	err := tc.processJob(ctx, key.(string))
 | 
			
		||||
	tc.handleErr(err, key)
 | 
			
		||||
 | 
			
		||||
	return true
 | 
			
		||||
@@ -192,7 +192,7 @@ func (tc *Controller) handleErr(err error, key interface{}) {
 | 
			
		||||
// its TTL hasn't expired, it will be added to the queue after the TTL is expected
 | 
			
		||||
// to expire.
 | 
			
		||||
// This function is not meant to be invoked concurrently with the same key.
 | 
			
		||||
func (tc *Controller) processJob(key string) error {
 | 
			
		||||
func (tc *Controller) processJob(ctx context.Context, key string) error {
 | 
			
		||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
@@ -218,7 +218,7 @@ func (tc *Controller) processJob(key string) error {
 | 
			
		||||
	// Before deleting the Job, do a final sanity check.
 | 
			
		||||
	// If TTL is modified before we do this check, we cannot be sure if the TTL truly expires.
 | 
			
		||||
	// The latest Job may have a different UID, but it's fine because the checks will be run again.
 | 
			
		||||
	fresh, err := tc.client.BatchV1().Jobs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
 | 
			
		||||
	fresh, err := tc.client.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
 | 
			
		||||
	if errors.IsNotFound(err) {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -239,7 +239,7 @@ func (tc *Controller) processJob(key string) error {
 | 
			
		||||
		Preconditions:     &metav1.Preconditions{UID: &fresh.UID},
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(4).Infof("Cleaning up Job %s/%s", namespace, name)
 | 
			
		||||
	if err := tc.client.BatchV1().Jobs(fresh.Namespace).Delete(context.TODO(), fresh.Name, options); err != nil {
 | 
			
		||||
	if err := tc.client.BatchV1().Jobs(fresh.Namespace).Delete(ctx, fresh.Name, options); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	metrics.JobDeletionDurationSeconds.Observe(time.Since(*expiredAt).Seconds())
 | 
			
		||||
 
 | 
			
		||||
@@ -43,7 +43,7 @@ import (
 | 
			
		||||
// DeletePods will delete all pods from master running on given node,
 | 
			
		||||
// and return true if any pods were deleted, or were found pending
 | 
			
		||||
// deletion.
 | 
			
		||||
func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
 | 
			
		||||
func DeletePods(ctx context.Context, kubeClient clientset.Interface, pods []*v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
 | 
			
		||||
	remaining := false
 | 
			
		||||
	var updateErrList []error
 | 
			
		||||
 | 
			
		||||
@@ -60,7 +60,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
 | 
			
		||||
		// Pod will be modified, so making copy is required.
 | 
			
		||||
		pod := pods[i].DeepCopy()
 | 
			
		||||
		// Set reason and message in the pod object.
 | 
			
		||||
		if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
 | 
			
		||||
		if _, err := SetPodTerminationReason(ctx, kubeClient, pod, nodeName); err != nil {
 | 
			
		||||
			if apierrors.IsConflict(err) {
 | 
			
		||||
				updateErrList = append(updateErrList,
 | 
			
		||||
					fmt.Errorf("update status failed for pod %q: %v", format.Pod(pod), err))
 | 
			
		||||
@@ -80,7 +80,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
 | 
			
		||||
 | 
			
		||||
		klog.V(2).InfoS("Starting deletion of pod", "pod", klog.KObj(pod))
 | 
			
		||||
		recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
 | 
			
		||||
		if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil {
 | 
			
		||||
		if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
 | 
			
		||||
			if apierrors.IsNotFound(err) {
 | 
			
		||||
				// NotFound error means that pod was already deleted.
 | 
			
		||||
				// There is nothing left to do with this pod.
 | 
			
		||||
@@ -100,7 +100,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
 | 
			
		||||
// SetPodTerminationReason attempts to set a reason and message in the
 | 
			
		||||
// pod status, updates it in the apiserver, and returns an error if it
 | 
			
		||||
// encounters one.
 | 
			
		||||
func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
 | 
			
		||||
func SetPodTerminationReason(ctx context.Context, kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
 | 
			
		||||
	if pod.Status.Reason == nodepkg.NodeUnreachablePodReason {
 | 
			
		||||
		return pod, nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -110,7 +110,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
 | 
			
		||||
 | 
			
		||||
	var updatedPod *v1.Pod
 | 
			
		||||
	var err error
 | 
			
		||||
	if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
	if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return updatedPod, nil
 | 
			
		||||
@@ -118,7 +118,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
 | 
			
		||||
 | 
			
		||||
// MarkPodsNotReady updates ready status of given pods running on
 | 
			
		||||
// given node from master return true if success
 | 
			
		||||
func MarkPodsNotReady(kubeClient clientset.Interface, recorder record.EventRecorder, pods []*v1.Pod, nodeName string) error {
 | 
			
		||||
func MarkPodsNotReady(ctx context.Context, kubeClient clientset.Interface, recorder record.EventRecorder, pods []*v1.Pod, nodeName string) error {
 | 
			
		||||
	klog.V(2).InfoS("Update ready status of pods on node", "node", nodeName)
 | 
			
		||||
 | 
			
		||||
	errMsg := []string{}
 | 
			
		||||
@@ -138,7 +138,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, recorder record.EventRecor
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				klog.V(2).InfoS("Updating ready status of pod to false", "pod", pod.Name)
 | 
			
		||||
				_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
 | 
			
		||||
				_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					if apierrors.IsNotFound(err) {
 | 
			
		||||
						// NotFound error means that pod was already deleted.
 | 
			
		||||
@@ -190,13 +190,13 @@ func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newSta
 | 
			
		||||
 | 
			
		||||
// SwapNodeControllerTaint returns true in case of success and false
 | 
			
		||||
// otherwise.
 | 
			
		||||
func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taintsToRemove []*v1.Taint, node *v1.Node) bool {
 | 
			
		||||
func SwapNodeControllerTaint(ctx context.Context, kubeClient clientset.Interface, taintsToAdd, taintsToRemove []*v1.Taint, node *v1.Node) bool {
 | 
			
		||||
	for _, taintToAdd := range taintsToAdd {
 | 
			
		||||
		now := metav1.Now()
 | 
			
		||||
		taintToAdd.TimeAdded = &now
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := controller.AddOrUpdateTaintOnNode(kubeClient, node.Name, taintsToAdd...)
 | 
			
		||||
	err := controller.AddOrUpdateTaintOnNode(ctx, kubeClient, node.Name, taintsToAdd...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		utilruntime.HandleError(
 | 
			
		||||
			fmt.Errorf(
 | 
			
		||||
@@ -208,7 +208,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(4).InfoS("Added taint to node", "taint", taintsToAdd, "node", node.Name)
 | 
			
		||||
 | 
			
		||||
	err = controller.RemoveTaintOffNode(kubeClient, node.Name, node, taintsToRemove...)
 | 
			
		||||
	err = controller.RemoveTaintOffNode(ctx, kubeClient, node.Name, node, taintsToRemove...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		utilruntime.HandleError(
 | 
			
		||||
			fmt.Errorf(
 | 
			
		||||
 
 | 
			
		||||
@@ -45,7 +45,7 @@ import (
 | 
			
		||||
 | 
			
		||||
// Controller creates PVCs for ephemeral inline volumes in a pod spec.
 | 
			
		||||
type Controller interface {
 | 
			
		||||
	Run(workers int, stopCh <-chan struct{})
 | 
			
		||||
	Run(ctx context.Context, workers int)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type ephemeralController struct {
 | 
			
		||||
@@ -163,37 +163,37 @@ func (ec *ephemeralController) onPVCDelete(obj interface{}) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ec *ephemeralController) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (ec *ephemeralController) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer runtime.HandleCrash()
 | 
			
		||||
	defer ec.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting ephemeral volume controller")
 | 
			
		||||
	defer klog.Infof("Shutting down ephemeral volume controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("ephemeral", stopCh, ec.podSynced, ec.pvcsSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("ephemeral", ctx.Done(), ec.podSynced, ec.pvcsSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(ec.runWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, ec.runWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ec *ephemeralController) runWorker() {
 | 
			
		||||
	for ec.processNextWorkItem() {
 | 
			
		||||
func (ec *ephemeralController) runWorker(ctx context.Context) {
 | 
			
		||||
	for ec.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ec *ephemeralController) processNextWorkItem() bool {
 | 
			
		||||
func (ec *ephemeralController) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	key, shutdown := ec.queue.Get()
 | 
			
		||||
	if shutdown {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer ec.queue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := ec.syncHandler(key.(string))
 | 
			
		||||
	err := ec.syncHandler(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		ec.queue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -207,7 +207,7 @@ func (ec *ephemeralController) processNextWorkItem() bool {
 | 
			
		||||
 | 
			
		||||
// syncHandler is invoked for each pod which might need to be processed.
 | 
			
		||||
// If an error is returned from this function, the pod will be requeued.
 | 
			
		||||
func (ec *ephemeralController) syncHandler(key string) error {
 | 
			
		||||
func (ec *ephemeralController) syncHandler(ctx context.Context, key string) error {
 | 
			
		||||
	namespace, name, err := kcache.SplitMetaNamespaceKey(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
@@ -229,7 +229,7 @@ func (ec *ephemeralController) syncHandler(key string) error {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, vol := range pod.Spec.Volumes {
 | 
			
		||||
		if err := ec.handleVolume(pod, vol); err != nil {
 | 
			
		||||
		if err := ec.handleVolume(ctx, pod, vol); err != nil {
 | 
			
		||||
			ec.recorder.Event(pod, v1.EventTypeWarning, events.FailedBinding, fmt.Sprintf("ephemeral volume %s: %v", vol.Name, err))
 | 
			
		||||
			return fmt.Errorf("pod %s, ephemeral volume %s: %v", key, vol.Name, err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -239,7 +239,7 @@ func (ec *ephemeralController) syncHandler(key string) error {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// handleEphemeralVolume is invoked for each volume of a pod.
 | 
			
		||||
func (ec *ephemeralController) handleVolume(pod *v1.Pod, vol v1.Volume) error {
 | 
			
		||||
func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vol v1.Volume) error {
 | 
			
		||||
	klog.V(5).Infof("ephemeral: checking volume %s", vol.Name)
 | 
			
		||||
	if vol.Ephemeral == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -280,7 +280,7 @@ func (ec *ephemeralController) handleVolume(pod *v1.Pod, vol v1.Volume) error {
 | 
			
		||||
		Spec: vol.Ephemeral.VolumeClaimTemplate.Spec,
 | 
			
		||||
	}
 | 
			
		||||
	ephemeralvolumemetrics.EphemeralVolumeCreateAttempts.Inc()
 | 
			
		||||
	_, err = ec.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
 | 
			
		||||
	_, err = ec.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		ephemeralvolumemetrics.EphemeralVolumeCreateFailures.Inc()
 | 
			
		||||
		return fmt.Errorf("create PVC %s: %v", pvcName, err)
 | 
			
		||||
 
 | 
			
		||||
@@ -160,7 +160,7 @@ func TestSyncHandler(t *testing.T) {
 | 
			
		||||
			informerFactory.WaitForCacheSync(ctx.Done())
 | 
			
		||||
			cache.WaitForCacheSync(ctx.Done(), podInformer.Informer().HasSynced, pvcInformer.Informer().HasSynced)
 | 
			
		||||
 | 
			
		||||
			err = ec.syncHandler(tc.podKey)
 | 
			
		||||
			err = ec.syncHandler(context.TODO(), tc.podKey)
 | 
			
		||||
			if err != nil && !tc.expectedError {
 | 
			
		||||
				t.Fatalf("unexpected error while running handler: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -60,7 +60,7 @@ const (
 | 
			
		||||
 | 
			
		||||
// ExpandController expands the pvs
 | 
			
		||||
type ExpandController interface {
 | 
			
		||||
	Run(stopCh <-chan struct{})
 | 
			
		||||
	Run(ctx context.Context)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CSINameTranslator can get the CSI Driver name based on the in-tree plugin name
 | 
			
		||||
@@ -188,14 +188,14 @@ func (expc *expandController) enqueuePVC(obj interface{}) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (expc *expandController) processNextWorkItem() bool {
 | 
			
		||||
func (expc *expandController) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	key, shutdown := expc.queue.Get()
 | 
			
		||||
	if shutdown {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer expc.queue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := expc.syncHandler(key.(string))
 | 
			
		||||
	err := expc.syncHandler(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		expc.queue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -209,7 +209,7 @@ func (expc *expandController) processNextWorkItem() bool {
 | 
			
		||||
 | 
			
		||||
// syncHandler performs actual expansion of volume. If an error is returned
 | 
			
		||||
// from this function - PVC will be requeued for resizing.
 | 
			
		||||
func (expc *expandController) syncHandler(key string) error {
 | 
			
		||||
func (expc *expandController) syncHandler(ctx context.Context, key string) error {
 | 
			
		||||
	namespace, name, err := kcache.SplitMetaNamespaceKey(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
@@ -223,7 +223,7 @@ func (expc *expandController) syncHandler(key string) error {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pv, err := expc.getPersistentVolume(pvc)
 | 
			
		||||
	pv, err := expc.getPersistentVolume(ctx, pvc)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Error getting Persistent Volume for PVC %q (uid: %q) from informer : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), pvc.UID, err)
 | 
			
		||||
		return err
 | 
			
		||||
@@ -320,32 +320,32 @@ func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.Persi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO make concurrency configurable (workers argument). previously, nestedpendingoperations spawned unlimited goroutines
 | 
			
		||||
func (expc *expandController) Run(stopCh <-chan struct{}) {
 | 
			
		||||
func (expc *expandController) Run(ctx context.Context) {
 | 
			
		||||
	defer runtime.HandleCrash()
 | 
			
		||||
	defer expc.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting expand controller")
 | 
			
		||||
	defer klog.Infof("Shutting down expand controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced, expc.pvSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < defaultWorkerCount; i++ {
 | 
			
		||||
		go wait.Until(expc.runWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, expc.runWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (expc *expandController) runWorker() {
 | 
			
		||||
	for expc.processNextWorkItem() {
 | 
			
		||||
func (expc *expandController) runWorker(ctx context.Context) {
 | 
			
		||||
	for expc.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (expc *expandController) getPersistentVolume(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
 | 
			
		||||
func (expc *expandController) getPersistentVolume(ctx context.Context, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
 | 
			
		||||
	volumeName := pvc.Spec.VolumeName
 | 
			
		||||
	pv, err := expc.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volumeName, metav1.GetOptions{})
 | 
			
		||||
	pv, err := expc.kubeClient.CoreV1().PersistentVolumes().Get(ctx, volumeName, metav1.GetOptions{})
 | 
			
		||||
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("failed to get PV %q: %v", volumeName, err)
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package expand
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
@@ -157,7 +158,7 @@ func TestSyncHandler(t *testing.T) {
 | 
			
		||||
			return true, pvc, nil
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		err = expController.syncHandler(test.pvcKey)
 | 
			
		||||
		err = expController.syncHandler(context.TODO(), test.pvcKey)
 | 
			
		||||
		if err != nil && !test.hasError {
 | 
			
		||||
			t.Fatalf("for: %s; unexpected error while running handler : %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package persistentvolume
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"strings"
 | 
			
		||||
@@ -490,11 +491,11 @@ func claimWithAccessMode(modes []v1.PersistentVolumeAccessMode, claims []*v1.Per
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testSyncClaim(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
 | 
			
		||||
	return ctrl.syncClaim(test.initialClaims[0])
 | 
			
		||||
	return ctrl.syncClaim(context.TODO(), test.initialClaims[0])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testSyncClaimError(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
 | 
			
		||||
	err := ctrl.syncClaim(test.initialClaims[0])
 | 
			
		||||
	err := ctrl.syncClaim(context.TODO(), test.initialClaims[0])
 | 
			
		||||
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -503,7 +504,7 @@ func testSyncClaimError(ctrl *PersistentVolumeController, reactor *pvtesting.Vol
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func testSyncVolume(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
 | 
			
		||||
	return ctrl.syncVolume(test.initialVolumes[0])
 | 
			
		||||
	return ctrl.syncVolume(context.TODO(), test.initialVolumes[0])
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type operationType string
 | 
			
		||||
@@ -797,7 +798,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
 | 
			
		||||
				claim := obj.(*v1.PersistentVolumeClaim)
 | 
			
		||||
				// Simulate "claim updated" event
 | 
			
		||||
				ctrl.claims.Update(claim)
 | 
			
		||||
				err = ctrl.syncClaim(claim)
 | 
			
		||||
				err = ctrl.syncClaim(context.TODO(), claim)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					if err == pvtesting.ErrVersionConflict {
 | 
			
		||||
						// Ignore version errors
 | 
			
		||||
@@ -814,7 +815,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
 | 
			
		||||
				volume := obj.(*v1.PersistentVolume)
 | 
			
		||||
				// Simulate "volume updated" event
 | 
			
		||||
				ctrl.volumes.store.Update(volume)
 | 
			
		||||
				err = ctrl.syncVolume(volume)
 | 
			
		||||
				err = ctrl.syncVolume(context.TODO(), volume)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					if err == pvtesting.ErrVersionConflict {
 | 
			
		||||
						// Ignore version errors
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package persistentvolume
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
@@ -652,7 +653,7 @@ func TestDisablingDynamicProvisioner(t *testing.T) {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Construct PersistentVolume controller failed: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	retVal := ctrl.provisionClaim(nil)
 | 
			
		||||
	retVal := ctrl.provisionClaim(context.TODO(), nil)
 | 
			
		||||
	if retVal != nil {
 | 
			
		||||
		t.Errorf("Expected nil return but got %v", retVal)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -249,12 +249,12 @@ type PersistentVolumeController struct {
 | 
			
		||||
// these events.
 | 
			
		||||
// For easier readability, it was split into syncUnboundClaim and syncBoundClaim
 | 
			
		||||
// methods.
 | 
			
		||||
func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClaim) error {
 | 
			
		||||
func (ctrl *PersistentVolumeController) syncClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	klog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim))
 | 
			
		||||
 | 
			
		||||
	// Set correct "migrated-to" annotations on PVC and update in API server if
 | 
			
		||||
	// necessary
 | 
			
		||||
	newClaim, err := ctrl.updateClaimMigrationAnnotations(claim)
 | 
			
		||||
	newClaim, err := ctrl.updateClaimMigrationAnnotations(ctx, claim)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Nothing was saved; we will fall back into the same
 | 
			
		||||
		// condition in the next call to this method
 | 
			
		||||
@@ -263,7 +263,7 @@ func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClai
 | 
			
		||||
	claim = newClaim
 | 
			
		||||
 | 
			
		||||
	if !metav1.HasAnnotation(claim.ObjectMeta, pvutil.AnnBindCompleted) {
 | 
			
		||||
		return ctrl.syncUnboundClaim(claim)
 | 
			
		||||
		return ctrl.syncUnboundClaim(ctx, claim)
 | 
			
		||||
	} else {
 | 
			
		||||
		return ctrl.syncBoundClaim(claim)
 | 
			
		||||
	}
 | 
			
		||||
@@ -330,7 +330,7 @@ func (ctrl *PersistentVolumeController) emitEventForUnboundDelayBindingClaim(cla
 | 
			
		||||
 | 
			
		||||
// syncUnboundClaim is the main controller method to decide what to do with an
 | 
			
		||||
// unbound claim.
 | 
			
		||||
func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVolumeClaim) error {
 | 
			
		||||
func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	// This is a new PVC that has not completed binding
 | 
			
		||||
	// OBSERVATION: pvc is "Pending"
 | 
			
		||||
	if claim.Spec.VolumeName == "" {
 | 
			
		||||
@@ -356,7 +356,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
 | 
			
		||||
					return err
 | 
			
		||||
				}
 | 
			
		||||
			case storagehelpers.GetPersistentVolumeClaimClass(claim) != "":
 | 
			
		||||
				if err = ctrl.provisionClaim(claim); err != nil {
 | 
			
		||||
				if err = ctrl.provisionClaim(ctx, claim); err != nil {
 | 
			
		||||
					return err
 | 
			
		||||
				}
 | 
			
		||||
				return nil
 | 
			
		||||
@@ -539,12 +539,12 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum
 | 
			
		||||
// It's invoked by appropriate cache.Controller callbacks when a volume is
 | 
			
		||||
// created, updated or periodically synced. We do not differentiate between
 | 
			
		||||
// these events.
 | 
			
		||||
func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) error {
 | 
			
		||||
func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume *v1.PersistentVolume) error {
 | 
			
		||||
	klog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume))
 | 
			
		||||
 | 
			
		||||
	// Set correct "migrated-to" annotations on PV and update in API server if
 | 
			
		||||
	// necessary
 | 
			
		||||
	newVolume, err := ctrl.updateVolumeMigrationAnnotations(volume)
 | 
			
		||||
	newVolume, err := ctrl.updateVolumeMigrationAnnotations(ctx, volume)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Nothing was saved; we will fall back into the same
 | 
			
		||||
		// condition in the next call to this method
 | 
			
		||||
@@ -1451,7 +1451,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu
 | 
			
		||||
 | 
			
		||||
// provisionClaim starts new asynchronous operation to provision a claim if
 | 
			
		||||
// provisioning is enabled.
 | 
			
		||||
func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolumeClaim) error {
 | 
			
		||||
func (ctrl *PersistentVolumeController) provisionClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	if !ctrl.enableDynamicProvisioning {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
@@ -1474,9 +1474,9 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
 | 
			
		||||
		ctrl.operationTimestamps.AddIfNotExist(claimKey, ctrl.getProvisionerName(plugin, storageClass), "provision")
 | 
			
		||||
		var err error
 | 
			
		||||
		if plugin == nil {
 | 
			
		||||
			_, err = ctrl.provisionClaimOperationExternal(claim, storageClass)
 | 
			
		||||
			_, err = ctrl.provisionClaimOperationExternal(ctx, claim, storageClass)
 | 
			
		||||
		} else {
 | 
			
		||||
			_, err = ctrl.provisionClaimOperation(claim, plugin, storageClass)
 | 
			
		||||
			_, err = ctrl.provisionClaimOperation(ctx, claim, plugin, storageClass)
 | 
			
		||||
		}
 | 
			
		||||
		// if error happened, record an error count metric
 | 
			
		||||
		// timestamp entry will remain in cache until a success binding has happened
 | 
			
		||||
@@ -1491,6 +1491,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
 | 
			
		||||
// provisionClaimOperation provisions a volume. This method is running in
 | 
			
		||||
// standalone goroutine and already has all necessary locks.
 | 
			
		||||
func (ctrl *PersistentVolumeController) provisionClaimOperation(
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
	claim *v1.PersistentVolumeClaim,
 | 
			
		||||
	plugin vol.ProvisionableVolumePlugin,
 | 
			
		||||
	storageClass *storage.StorageClass) (string, error) {
 | 
			
		||||
@@ -1513,7 +1514,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
 | 
			
		||||
	klog.V(4).Infof("provisionClaimOperation [%s]: plugin name: %s, provisioner name: %s", claimToClaimKey(claim), pluginName, provisionerName)
 | 
			
		||||
 | 
			
		||||
	// Add provisioner annotation to be consistent with external provisioner workflow
 | 
			
		||||
	newClaim, err := ctrl.setClaimProvisioner(claim, provisionerName)
 | 
			
		||||
	newClaim, err := ctrl.setClaimProvisioner(ctx, claim, provisionerName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Save failed, the controller will retry in the next sync
 | 
			
		||||
		klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err)
 | 
			
		||||
@@ -1696,6 +1697,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
 | 
			
		||||
// provisionClaimOperationExternal provisions a volume using external provisioner async-ly
 | 
			
		||||
// This method will be running in a standalone go-routine scheduled in "provisionClaim"
 | 
			
		||||
func (ctrl *PersistentVolumeController) provisionClaimOperationExternal(
 | 
			
		||||
	ctx context.Context,
 | 
			
		||||
	claim *v1.PersistentVolumeClaim,
 | 
			
		||||
	storageClass *storage.StorageClass) (string, error) {
 | 
			
		||||
	claimClass := storagehelpers.GetPersistentVolumeClaimClass(claim)
 | 
			
		||||
@@ -1714,7 +1716,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperationExternal(
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// Add provisioner annotation so external provisioners know when to start
 | 
			
		||||
	newClaim, err := ctrl.setClaimProvisioner(claim, provisionerName)
 | 
			
		||||
	newClaim, err := ctrl.setClaimProvisioner(ctx, claim, provisionerName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Save failed, the controller will retry in the next sync
 | 
			
		||||
		klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err)
 | 
			
		||||
 
 | 
			
		||||
@@ -204,7 +204,7 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (boo
 | 
			
		||||
 | 
			
		||||
// updateVolume runs in worker thread and handles "volume added",
 | 
			
		||||
// "volume updated" and "periodic sync" events.
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume) {
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) {
 | 
			
		||||
	// Store the new volume version in the cache and do not process it if this
 | 
			
		||||
	// is an old version.
 | 
			
		||||
	new, err := ctrl.storeVolumeUpdate(volume)
 | 
			
		||||
@@ -215,7 +215,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = ctrl.syncVolume(volume)
 | 
			
		||||
	err = ctrl.syncVolume(ctx, volume)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if errors.IsConflict(err) {
 | 
			
		||||
			// Version conflict error happens quite often and the controller
 | 
			
		||||
@@ -252,7 +252,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
 | 
			
		||||
 | 
			
		||||
// updateClaim runs in worker thread and handles "claim added",
 | 
			
		||||
// "claim updated" and "periodic sync" events.
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeClaim) {
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
 | 
			
		||||
	// Store the new claim version in the cache and do not process it if this is
 | 
			
		||||
	// an old version.
 | 
			
		||||
	new, err := ctrl.storeClaimUpdate(claim)
 | 
			
		||||
@@ -262,7 +262,7 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
 | 
			
		||||
	if !new {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	err = ctrl.syncClaim(claim)
 | 
			
		||||
	err = ctrl.syncClaim(ctx, claim)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if errors.IsConflict(err) {
 | 
			
		||||
			// Version conflict error happens quite often and the controller
 | 
			
		||||
@@ -300,7 +300,7 @@ func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeCl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run starts all of this controller's control loops
 | 
			
		||||
func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
 | 
			
		||||
func (ctrl *PersistentVolumeController) Run(ctx context.Context) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer ctrl.claimQueue.ShutDown()
 | 
			
		||||
	defer ctrl.volumeQueue.ShutDown()
 | 
			
		||||
@@ -308,22 +308,22 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
 | 
			
		||||
	klog.Infof("Starting persistent volume controller")
 | 
			
		||||
	defer klog.Infof("Shutting down persistent volume controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctrl.initializeCaches(ctrl.volumeLister, ctrl.claimLister)
 | 
			
		||||
 | 
			
		||||
	go wait.Until(ctrl.resync, ctrl.resyncPeriod, stopCh)
 | 
			
		||||
	go wait.Until(ctrl.volumeWorker, time.Second, stopCh)
 | 
			
		||||
	go wait.Until(ctrl.claimWorker, time.Second, stopCh)
 | 
			
		||||
	go wait.Until(ctrl.resync, ctrl.resyncPeriod, ctx.Done())
 | 
			
		||||
	go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second)
 | 
			
		||||
	go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second)
 | 
			
		||||
 | 
			
		||||
	metrics.Register(ctrl.volumes.store, ctrl.claims, &ctrl.volumePluginMgr)
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx context.Context, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
 | 
			
		||||
	// TODO: update[Claim|Volume]MigrationAnnotations can be optimized to not
 | 
			
		||||
	// copy the claim/volume if no modifications are required. Though this
 | 
			
		||||
	// requires some refactoring as well as an interesting change in the
 | 
			
		||||
@@ -335,7 +335,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v
 | 
			
		||||
	if !modified {
 | 
			
		||||
		return claimClone, nil
 | 
			
		||||
	}
 | 
			
		||||
	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -346,13 +346,13 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(claim *v
 | 
			
		||||
	return newClaim, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(volume *v1.PersistentVolume) (*v1.PersistentVolume, error) {
 | 
			
		||||
func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotations(ctx context.Context, volume *v1.PersistentVolume) (*v1.PersistentVolume, error) {
 | 
			
		||||
	volumeClone := volume.DeepCopy()
 | 
			
		||||
	modified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false)
 | 
			
		||||
	if !modified {
 | 
			
		||||
		return volumeClone, nil
 | 
			
		||||
	}
 | 
			
		||||
	newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{})
 | 
			
		||||
	newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(ctx, volumeClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -424,8 +424,8 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam
 | 
			
		||||
 | 
			
		||||
// volumeWorker processes items from volumeQueue. It must run only once,
 | 
			
		||||
// syncVolume is not assured to be reentrant.
 | 
			
		||||
func (ctrl *PersistentVolumeController) volumeWorker() {
 | 
			
		||||
	workFunc := func() bool {
 | 
			
		||||
func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
 | 
			
		||||
	workFunc := func(ctx context.Context) bool {
 | 
			
		||||
		keyObj, quit := ctrl.volumeQueue.Get()
 | 
			
		||||
		if quit {
 | 
			
		||||
			return true
 | 
			
		||||
@@ -443,7 +443,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			// The volume still exists in informer cache, the event must have
 | 
			
		||||
			// been add/update/sync
 | 
			
		||||
			ctrl.updateVolume(volume)
 | 
			
		||||
			ctrl.updateVolume(ctx, volume)
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		if !errors.IsNotFound(err) {
 | 
			
		||||
@@ -473,7 +473,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	for {
 | 
			
		||||
		if quit := workFunc(); quit {
 | 
			
		||||
		if quit := workFunc(ctx); quit {
 | 
			
		||||
			klog.Infof("volume worker queue shutting down")
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
@@ -482,7 +482,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
 | 
			
		||||
 | 
			
		||||
// claimWorker processes items from claimQueue. It must run only once,
 | 
			
		||||
// syncClaim is not reentrant.
 | 
			
		||||
func (ctrl *PersistentVolumeController) claimWorker() {
 | 
			
		||||
func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
 | 
			
		||||
	workFunc := func() bool {
 | 
			
		||||
		keyObj, quit := ctrl.claimQueue.Get()
 | 
			
		||||
		if quit {
 | 
			
		||||
@@ -501,7 +501,7 @@ func (ctrl *PersistentVolumeController) claimWorker() {
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			// The claim still exists in informer cache, the event must have
 | 
			
		||||
			// been add/update/sync
 | 
			
		||||
			ctrl.updateClaim(claim)
 | 
			
		||||
			ctrl.updateClaim(ctx, claim)
 | 
			
		||||
			return false
 | 
			
		||||
		}
 | 
			
		||||
		if !errors.IsNotFound(err) {
 | 
			
		||||
@@ -564,7 +564,7 @@ func (ctrl *PersistentVolumeController) resync() {
 | 
			
		||||
 | 
			
		||||
// setClaimProvisioner saves
 | 
			
		||||
// claim.Annotations["volume.kubernetes.io/storage-provisioner"] = class.Provisioner
 | 
			
		||||
func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.PersistentVolumeClaim, provisionerName string) (*v1.PersistentVolumeClaim, error) {
 | 
			
		||||
func (ctrl *PersistentVolumeController) setClaimProvisioner(ctx context.Context, claim *v1.PersistentVolumeClaim, provisionerName string) (*v1.PersistentVolumeClaim, error) {
 | 
			
		||||
	if val, ok := claim.Annotations[pvutil.AnnStorageProvisioner]; ok && val == provisionerName {
 | 
			
		||||
		// annotation is already set, nothing to do
 | 
			
		||||
		return claim, nil
 | 
			
		||||
@@ -577,7 +577,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent
 | 
			
		||||
	metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnBetaStorageProvisioner, provisionerName)
 | 
			
		||||
	metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnStorageProvisioner, provisionerName)
 | 
			
		||||
	updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
 | 
			
		||||
	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return newClaim, err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package persistentvolume
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
@@ -341,10 +342,10 @@ func TestControllerSync(t *testing.T) {
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Start the controller
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		informers.Start(stopCh)
 | 
			
		||||
		informers.WaitForCacheSync(stopCh)
 | 
			
		||||
		go ctrl.Run(stopCh)
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
		informers.Start(ctx.Done())
 | 
			
		||||
		informers.WaitForCacheSync(ctx.Done())
 | 
			
		||||
		go ctrl.Run(ctx)
 | 
			
		||||
 | 
			
		||||
		// Wait for the controller to pass initial sync and fill its caches.
 | 
			
		||||
		err = wait.Poll(10*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
 | 
			
		||||
@@ -369,7 +370,7 @@ func TestControllerSync(t *testing.T) {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Failed to run test %s: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
		close(stopCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
 | 
			
		||||
		evaluateTestResults(ctrl, reactor.VolumeReactor, test, t)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -100,31 +100,31 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run runs the controller goroutines.
 | 
			
		||||
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (c *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer c.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.InfoS("Starting PVC protection controller")
 | 
			
		||||
	defer klog.InfoS("Shutting down PVC protection controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("PVC protection", stopCh, c.pvcListerSynced, c.podListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(c.runWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, c.runWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) runWorker() {
 | 
			
		||||
	for c.processNextWorkItem() {
 | 
			
		||||
func (c *Controller) runWorker(ctx context.Context) {
 | 
			
		||||
	for c.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// processNextWorkItem deals with one pvcKey off the queue.  It returns false when it's time to quit.
 | 
			
		||||
func (c *Controller) processNextWorkItem() bool {
 | 
			
		||||
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	pvcKey, quit := c.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
@@ -137,7 +137,7 @@ func (c *Controller) processNextWorkItem() bool {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = c.processPVC(pvcNamespace, pvcName)
 | 
			
		||||
	err = c.processPVC(ctx, pvcNamespace, pvcName)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		c.queue.Forget(pvcKey)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -149,7 +149,7 @@ func (c *Controller) processNextWorkItem() bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
 | 
			
		||||
func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName string) error {
 | 
			
		||||
	klog.V(4).InfoS("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName))
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
@@ -168,12 +168,12 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
 | 
			
		||||
	if protectionutil.IsDeletionCandidate(pvc, volumeutil.PVCProtectionFinalizer) {
 | 
			
		||||
		// PVC should be deleted. Check if it's used and remove finalizer if
 | 
			
		||||
		// it's not.
 | 
			
		||||
		isUsed, err := c.isBeingUsed(pvc)
 | 
			
		||||
		isUsed, err := c.isBeingUsed(ctx, pvc)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		if !isUsed {
 | 
			
		||||
			return c.removeFinalizer(pvc)
 | 
			
		||||
			return c.removeFinalizer(ctx, pvc)
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(2).InfoS("Keeping PVC because it is being used", "PVC", klog.KObj(pvc))
 | 
			
		||||
	}
 | 
			
		||||
@@ -183,19 +183,19 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
 | 
			
		||||
		// finalizer should be added by admission plugin, this is just to add
 | 
			
		||||
		// the finalizer to old PVCs that were created before the admission
 | 
			
		||||
		// plugin was enabled.
 | 
			
		||||
		return c.addFinalizer(pvc)
 | 
			
		||||
		return c.addFinalizer(ctx, pvc)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
 | 
			
		||||
func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
 | 
			
		||||
	if !c.storageObjectInUseProtectionEnabled {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	claimClone := pvc.DeepCopy()
 | 
			
		||||
	claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.ErrorS(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc))
 | 
			
		||||
		return err
 | 
			
		||||
@@ -204,10 +204,10 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
 | 
			
		||||
func (c *Controller) removeFinalizer(ctx context.Context, pvc *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	claimClone := pvc.DeepCopy()
 | 
			
		||||
	claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.ErrorS(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc))
 | 
			
		||||
		return err
 | 
			
		||||
@@ -216,7 +216,7 @@ func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
	// Look for a Pod using pvc in the Informer's cache. If one is found the
 | 
			
		||||
	// correct decision to keep pvc is taken without doing an expensive live
 | 
			
		||||
	// list.
 | 
			
		||||
@@ -231,7 +231,7 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
	// mean such a Pod doesn't exist: it might just not be in the cache yet. To
 | 
			
		||||
	// be 100% confident that it is safe to delete pvc make sure no Pod is using
 | 
			
		||||
	// it among those returned by a live list.
 | 
			
		||||
	return c.askAPIServer(pvc)
 | 
			
		||||
	return c.askAPIServer(ctx, pvc)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
@@ -260,10 +260,10 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
	return false, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) askAPIServer(pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeClaim) (bool, error) {
 | 
			
		||||
	klog.V(4).InfoS("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc))
 | 
			
		||||
 | 
			
		||||
	podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(context.TODO(), metav1.ListOptions{})
 | 
			
		||||
	podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, fmt.Errorf("live list of pods failed: %s", err.Error())
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package pvcprotection
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
@@ -476,7 +477,7 @@ func TestPVCProtectionController(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
			if ctrl.queue.Len() > 0 {
 | 
			
		||||
				klog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
 | 
			
		||||
				ctrl.processNextWorkItem()
 | 
			
		||||
				ctrl.processNextWorkItem(context.TODO())
 | 
			
		||||
			}
 | 
			
		||||
			if ctrl.queue.Len() > 0 {
 | 
			
		||||
				// There is still some work in the queue, process it now
 | 
			
		||||
 
 | 
			
		||||
@@ -76,31 +76,31 @@ func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Run runs the controller goroutines.
 | 
			
		||||
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
 | 
			
		||||
func (c *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
	defer c.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Infof("Starting PV protection controller")
 | 
			
		||||
	defer klog.Infof("Shutting down PV protection controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("PV protection", stopCh, c.pvListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("PV protection", ctx.Done(), c.pvListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(c.runWorker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, c.runWorker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) runWorker() {
 | 
			
		||||
	for c.processNextWorkItem() {
 | 
			
		||||
func (c *Controller) runWorker(ctx context.Context) {
 | 
			
		||||
	for c.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// processNextWorkItem deals with one pvcKey off the queue.  It returns false when it's time to quit.
 | 
			
		||||
func (c *Controller) processNextWorkItem() bool {
 | 
			
		||||
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	pvKey, quit := c.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
@@ -109,7 +109,7 @@ func (c *Controller) processNextWorkItem() bool {
 | 
			
		||||
 | 
			
		||||
	pvName := pvKey.(string)
 | 
			
		||||
 | 
			
		||||
	err := c.processPV(pvName)
 | 
			
		||||
	err := c.processPV(ctx, pvName)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		c.queue.Forget(pvKey)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -121,7 +121,7 @@ func (c *Controller) processNextWorkItem() bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) processPV(pvName string) error {
 | 
			
		||||
func (c *Controller) processPV(ctx context.Context, pvName string) error {
 | 
			
		||||
	klog.V(4).Infof("Processing PV %s", pvName)
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
@@ -142,7 +142,7 @@ func (c *Controller) processPV(pvName string) error {
 | 
			
		||||
		// it's not.
 | 
			
		||||
		isUsed := c.isBeingUsed(pv)
 | 
			
		||||
		if !isUsed {
 | 
			
		||||
			return c.removeFinalizer(pv)
 | 
			
		||||
			return c.removeFinalizer(ctx, pv)
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(4).Infof("Keeping PV %s because it is being used", pvName)
 | 
			
		||||
	}
 | 
			
		||||
@@ -152,19 +152,19 @@ func (c *Controller) processPV(pvName string) error {
 | 
			
		||||
		// finalizer should be added by admission plugin, this is just to add
 | 
			
		||||
		// the finalizer to old PVs that were created before the admission
 | 
			
		||||
		// plugin was enabled.
 | 
			
		||||
		return c.addFinalizer(pv)
 | 
			
		||||
		return c.addFinalizer(ctx, pv)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
 | 
			
		||||
func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
 | 
			
		||||
	// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
 | 
			
		||||
	if !c.storageObjectInUseProtectionEnabled {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	pvClone := pv.DeepCopy()
 | 
			
		||||
	pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{})
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
 | 
			
		||||
		return err
 | 
			
		||||
@@ -173,10 +173,10 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error {
 | 
			
		||||
func (c *Controller) removeFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
 | 
			
		||||
	pvClone := pv.DeepCopy()
 | 
			
		||||
	pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumes().Update(context.TODO(), pvClone, metav1.UpdateOptions{})
 | 
			
		||||
	_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
 | 
			
		||||
		return err
 | 
			
		||||
 
 | 
			
		||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
			
		||||
package pvprotection
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
@@ -247,7 +248,7 @@ func TestPVProtectionController(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
			if ctrl.queue.Len() > 0 {
 | 
			
		||||
				klog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
 | 
			
		||||
				ctrl.processNextWorkItem()
 | 
			
		||||
				ctrl.processNextWorkItem(context.TODO())
 | 
			
		||||
			}
 | 
			
		||||
			if ctrl.queue.Len() > 0 {
 | 
			
		||||
				// There is still some work in the queue, process it now
 | 
			
		||||
 
 | 
			
		||||
@@ -185,7 +185,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Fatalf("error building controller context: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if err := startControllers(cloud, controllerContext, c, ctx.Done(), controllerInitializers, healthzHandler); err != nil {
 | 
			
		||||
		if err := startControllers(ctx, cloud, controllerContext, c, ctx.Done(), controllerInitializers, healthzHandler); err != nil {
 | 
			
		||||
			klog.Fatalf("error running controllers: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -262,7 +262,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// startControllers starts the cloud specific controller loops.
 | 
			
		||||
func startControllers(cloud cloudprovider.Interface, ctx genericcontrollermanager.ControllerContext, c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}, controllers map[string]InitFunc, healthzHandler *controllerhealthz.MutableHealthzHandler) error {
 | 
			
		||||
func startControllers(ctx context.Context, cloud cloudprovider.Interface, controllerContext genericcontrollermanager.ControllerContext, c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}, controllers map[string]InitFunc, healthzHandler *controllerhealthz.MutableHealthzHandler) error {
 | 
			
		||||
	// Initialize the cloud provider with a reference to the clientBuilder
 | 
			
		||||
	cloud.Initialize(c.ClientBuilder, stopCh)
 | 
			
		||||
	// Set the informer on the user cloud object
 | 
			
		||||
@@ -277,7 +277,7 @@ func startControllers(cloud cloudprovider.Interface, ctx genericcontrollermanage
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		klog.V(1).Infof("Starting %q", controllerName)
 | 
			
		||||
		ctrl, started, err := initFn(ctx)
 | 
			
		||||
		ctrl, started, err := initFn(ctx, controllerContext)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("Error starting %q", controllerName)
 | 
			
		||||
			return err
 | 
			
		||||
@@ -309,7 +309,7 @@ func startControllers(cloud cloudprovider.Interface, ctx genericcontrollermanage
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.SharedInformers.Start(stopCh)
 | 
			
		||||
	ctx.InformerFactory.Start(ctx.Stop)
 | 
			
		||||
	controllerContext.InformerFactory.Start(controllerContext.Stop)
 | 
			
		||||
 | 
			
		||||
	select {}
 | 
			
		||||
}
 | 
			
		||||
@@ -324,7 +324,7 @@ type InitCloudFunc func(config *cloudcontrollerconfig.CompletedConfig) cloudprov
 | 
			
		||||
// that requests no additional features from the controller manager.
 | 
			
		||||
// Any error returned will cause the controller process to `Fatal`
 | 
			
		||||
// The bool indicates whether the controller was enabled.
 | 
			
		||||
type InitFunc func(ctx genericcontrollermanager.ControllerContext) (controller controller.Interface, enabled bool, err error)
 | 
			
		||||
type InitFunc func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller controller.Interface, enabled bool, err error)
 | 
			
		||||
 | 
			
		||||
// InitFuncConstructor is used to construct InitFunc
 | 
			
		||||
type InitFuncConstructor func(initcontext ControllerInitContext, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface) InitFunc
 | 
			
		||||
@@ -359,29 +359,29 @@ type ControllerInitContext struct {
 | 
			
		||||
 | 
			
		||||
// StartCloudNodeControllerWrapper is used to take cloud cofig as input and start cloud node controller
 | 
			
		||||
func StartCloudNodeControllerWrapper(initContext ControllerInitContext, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface) InitFunc {
 | 
			
		||||
	return func(ctx genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startCloudNodeController(initContext, completedConfig, cloud, ctx.Stop)
 | 
			
		||||
	return func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startCloudNodeController(ctx, initContext, completedConfig, cloud)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StartCloudNodeLifecycleControllerWrapper is used to take cloud cofig as input and start cloud node lifecycle controller
 | 
			
		||||
func StartCloudNodeLifecycleControllerWrapper(initContext ControllerInitContext, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface) InitFunc {
 | 
			
		||||
	return func(ctx genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startCloudNodeLifecycleController(initContext, completedConfig, cloud, ctx.Stop)
 | 
			
		||||
	return func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startCloudNodeLifecycleController(ctx, initContext, completedConfig, cloud)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StartServiceControllerWrapper is used to take cloud cofig as input and start service controller
 | 
			
		||||
func StartServiceControllerWrapper(initContext ControllerInitContext, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface) InitFunc {
 | 
			
		||||
	return func(ctx genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startServiceController(initContext, completedConfig, cloud, ctx.Stop)
 | 
			
		||||
	return func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startServiceController(ctx, initContext, completedConfig, cloud)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StartRouteControllerWrapper is used to take cloud cofig as input and start route controller
 | 
			
		||||
func StartRouteControllerWrapper(initContext ControllerInitContext, completedConfig *cloudcontrollerconfig.CompletedConfig, cloud cloudprovider.Interface) InitFunc {
 | 
			
		||||
	return func(ctx genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startRouteController(initContext, completedConfig, cloud, ctx.Stop)
 | 
			
		||||
	return func(ctx context.Context, controllerContext genericcontrollermanager.ControllerContext) (controller.Interface, bool, error) {
 | 
			
		||||
		return startRouteController(ctx, initContext, completedConfig, cloud)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -21,6 +21,7 @@ limitations under the License.
 | 
			
		||||
package app
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net"
 | 
			
		||||
	"strings"
 | 
			
		||||
@@ -39,52 +40,52 @@ import (
 | 
			
		||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func startCloudNodeController(initContext ControllerInitContext, ctx *config.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (controller.Interface, bool, error) {
 | 
			
		||||
func startCloudNodeController(ctx context.Context, initContext ControllerInitContext, completedConfig *config.CompletedConfig, cloud cloudprovider.Interface) (controller.Interface, bool, error) {
 | 
			
		||||
	// Start the CloudNodeController
 | 
			
		||||
	nodeController, err := cloudnodecontroller.NewCloudNodeController(
 | 
			
		||||
		ctx.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		completedConfig.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		// cloud node controller uses existing cluster role from node-controller
 | 
			
		||||
		ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		completedConfig.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		cloud,
 | 
			
		||||
		ctx.ComponentConfig.NodeStatusUpdateFrequency.Duration,
 | 
			
		||||
		completedConfig.ComponentConfig.NodeStatusUpdateFrequency.Duration,
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Warningf("failed to start cloud node controller: %s", err)
 | 
			
		||||
		return nil, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go nodeController.Run(stopCh)
 | 
			
		||||
	go nodeController.Run(ctx.Done())
 | 
			
		||||
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func startCloudNodeLifecycleController(initContext ControllerInitContext, ctx *config.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (controller.Interface, bool, error) {
 | 
			
		||||
func startCloudNodeLifecycleController(ctx context.Context, initContext ControllerInitContext, completedConfig *config.CompletedConfig, cloud cloudprovider.Interface) (controller.Interface, bool, error) {
 | 
			
		||||
	// Start the cloudNodeLifecycleController
 | 
			
		||||
	cloudNodeLifecycleController, err := cloudnodelifecyclecontroller.NewCloudNodeLifecycleController(
 | 
			
		||||
		ctx.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		completedConfig.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		// cloud node lifecycle controller uses existing cluster role from node-controller
 | 
			
		||||
		ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		completedConfig.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		cloud,
 | 
			
		||||
		ctx.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
 | 
			
		||||
		completedConfig.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Warningf("failed to start cloud node lifecycle controller: %s", err)
 | 
			
		||||
		return nil, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go cloudNodeLifecycleController.Run(stopCh)
 | 
			
		||||
	go cloudNodeLifecycleController.Run(ctx)
 | 
			
		||||
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func startServiceController(initContext ControllerInitContext, ctx *config.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (controller.Interface, bool, error) {
 | 
			
		||||
func startServiceController(ctx context.Context, initContext ControllerInitContext, completedConfig *config.CompletedConfig, cloud cloudprovider.Interface) (controller.Interface, bool, error) {
 | 
			
		||||
	// Start the service controller
 | 
			
		||||
	serviceController, err := servicecontroller.New(
 | 
			
		||||
		cloud,
 | 
			
		||||
		ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		ctx.SharedInformers.Core().V1().Services(),
 | 
			
		||||
		ctx.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		ctx.ComponentConfig.KubeCloudShared.ClusterName,
 | 
			
		||||
		completedConfig.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		completedConfig.SharedInformers.Core().V1().Services(),
 | 
			
		||||
		completedConfig.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		completedConfig.ComponentConfig.KubeCloudShared.ClusterName,
 | 
			
		||||
		utilfeature.DefaultFeatureGate,
 | 
			
		||||
	)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -93,14 +94,14 @@ func startServiceController(initContext ControllerInitContext, ctx *config.Compl
 | 
			
		||||
		return nil, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go serviceController.Run(stopCh, int(ctx.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
 | 
			
		||||
	go serviceController.Run(ctx, int(completedConfig.ComponentConfig.ServiceController.ConcurrentServiceSyncs))
 | 
			
		||||
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func startRouteController(initContext ControllerInitContext, ctx *config.CompletedConfig, cloud cloudprovider.Interface, stopCh <-chan struct{}) (controller.Interface, bool, error) {
 | 
			
		||||
	if !ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
 | 
			
		||||
		klog.Infof("Will not configure cloud provider routes, --configure-cloud-routes: %v", ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
 | 
			
		||||
func startRouteController(ctx context.Context, initContext ControllerInitContext, completedConfig *config.CompletedConfig, cloud cloudprovider.Interface) (controller.Interface, bool, error) {
 | 
			
		||||
	if !completedConfig.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
 | 
			
		||||
		klog.Infof("Will not configure cloud provider routes, --configure-cloud-routes: %v", completedConfig.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
 | 
			
		||||
		return nil, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -112,7 +113,7 @@ func startRouteController(initContext ControllerInitContext, ctx *config.Complet
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// failure: bad cidrs in config
 | 
			
		||||
	clusterCIDRs, dualStack, err := processCIDRs(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)
 | 
			
		||||
	clusterCIDRs, dualStack, err := processCIDRs(completedConfig.ComponentConfig.KubeCloudShared.ClusterCIDR)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, false, err
 | 
			
		||||
	}
 | 
			
		||||
@@ -134,12 +135,12 @@ func startRouteController(initContext ControllerInitContext, ctx *config.Complet
 | 
			
		||||
 | 
			
		||||
	routeController := routecontroller.New(
 | 
			
		||||
		routes,
 | 
			
		||||
		ctx.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		ctx.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		ctx.ComponentConfig.KubeCloudShared.ClusterName,
 | 
			
		||||
		completedConfig.ClientBuilder.ClientOrDie(initContext.ClientName),
 | 
			
		||||
		completedConfig.SharedInformers.Core().V1().Nodes(),
 | 
			
		||||
		completedConfig.ComponentConfig.KubeCloudShared.ClusterName,
 | 
			
		||||
		clusterCIDRs,
 | 
			
		||||
	)
 | 
			
		||||
	go routeController.Run(stopCh, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
 | 
			
		||||
	go routeController.Run(ctx, completedConfig.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration)
 | 
			
		||||
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -103,7 +103,7 @@ func NewCloudNodeLifecycleController(
 | 
			
		||||
 | 
			
		||||
// Run starts the main loop for this controller. Run is blocking so should
 | 
			
		||||
// be called via a goroutine
 | 
			
		||||
func (c *CloudNodeLifecycleController) Run(stopCh <-chan struct{}) {
 | 
			
		||||
func (c *CloudNodeLifecycleController) Run(ctx context.Context) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
 | 
			
		||||
	// The following loops run communicate with the APIServer with a worst case complexity
 | 
			
		||||
@@ -112,13 +112,13 @@ func (c *CloudNodeLifecycleController) Run(stopCh <-chan struct{}) {
 | 
			
		||||
 | 
			
		||||
	// Start a loop to periodically check if any nodes have been
 | 
			
		||||
	// deleted or shutdown from the cloudprovider
 | 
			
		||||
	wait.Until(c.MonitorNodes, c.nodeMonitorPeriod, stopCh)
 | 
			
		||||
	wait.UntilWithContext(ctx, c.MonitorNodes, c.nodeMonitorPeriod)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MonitorNodes checks to see if nodes in the cluster have been deleted
 | 
			
		||||
// or shutdown. If deleted, it deletes the node resource. If shutdown it
 | 
			
		||||
// applies a shutdown taint to the node
 | 
			
		||||
func (c *CloudNodeLifecycleController) MonitorNodes() {
 | 
			
		||||
func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) {
 | 
			
		||||
	nodes, err := c.nodeLister.List(labels.Everything())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("error listing nodes from cache: %s", err)
 | 
			
		||||
@@ -143,7 +143,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() {
 | 
			
		||||
 | 
			
		||||
		// At this point the node has NotReady status, we need to check if the node has been removed
 | 
			
		||||
		// from the cloud provider. If node cannot be found in cloudprovider, then delete the node
 | 
			
		||||
		exists, err := ensureNodeExistsByProviderID(context.TODO(), c.cloud, node)
 | 
			
		||||
		exists, err := ensureNodeExistsByProviderID(ctx, c.cloud, node)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("error checking if node %s exists: %v", node.Name, err)
 | 
			
		||||
			continue
 | 
			
		||||
@@ -164,14 +164,14 @@ func (c *CloudNodeLifecycleController) MonitorNodes() {
 | 
			
		||||
			c.recorder.Eventf(ref, v1.EventTypeNormal, deleteNodeEvent,
 | 
			
		||||
				"Deleting node %s because it does not exist in the cloud provider", node.Name)
 | 
			
		||||
 | 
			
		||||
			if err := c.kubeClient.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}); err != nil {
 | 
			
		||||
			if err := c.kubeClient.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{}); err != nil {
 | 
			
		||||
				klog.Errorf("unable to delete node %q: %v", node.Name, err)
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			// Node exists. We need to check this to get taint working in similar in all cloudproviders
 | 
			
		||||
			// current problem is that shutdown nodes are not working in similar way ie. all cloudproviders
 | 
			
		||||
			// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
 | 
			
		||||
			shutdown, err := shutdownInCloudProvider(context.TODO(), c.cloud, node)
 | 
			
		||||
			shutdown, err := shutdownInCloudProvider(ctx, c.cloud, node)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				klog.Errorf("error checking if node %s is shutdown: %v", node.Name, err)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -504,6 +504,8 @@ func Test_NodesDeleted(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, testcase := range testcases {
 | 
			
		||||
		t.Run(testcase.name, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			clientset := fake.NewSimpleClientset(testcase.existingNode)
 | 
			
		||||
			informer := informers.NewSharedInformerFactory(clientset, time.Second)
 | 
			
		||||
			nodeInformer := informer.Core().V1().Nodes()
 | 
			
		||||
@@ -523,9 +525,9 @@ func Test_NodesDeleted(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			w := eventBroadcaster.StartLogging(klog.Infof)
 | 
			
		||||
			defer w.Stop()
 | 
			
		||||
			cloudNodeLifecycleController.MonitorNodes()
 | 
			
		||||
			cloudNodeLifecycleController.MonitorNodes(ctx)
 | 
			
		||||
 | 
			
		||||
			updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), testcase.existingNode.Name, metav1.GetOptions{})
 | 
			
		||||
			updatedNode, err := clientset.CoreV1().Nodes().Get(ctx, testcase.existingNode.Name, metav1.GetOptions{})
 | 
			
		||||
			if testcase.expectedDeleted != apierrors.IsNotFound(err) {
 | 
			
		||||
				t.Fatalf("unexpected error happens when getting the node: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -732,7 +734,7 @@ func Test_NodesShutdown(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			w := eventBroadcaster.StartLogging(klog.Infof)
 | 
			
		||||
			defer w.Stop()
 | 
			
		||||
			cloudNodeLifecycleController.MonitorNodes()
 | 
			
		||||
			cloudNodeLifecycleController.MonitorNodes(context.TODO())
 | 
			
		||||
 | 
			
		||||
			updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), testcase.existingNode.Name, metav1.GetOptions{})
 | 
			
		||||
			if testcase.expectedDeleted != apierrors.IsNotFound(err) {
 | 
			
		||||
 
 | 
			
		||||
@@ -95,13 +95,13 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform
 | 
			
		||||
	return rc
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) {
 | 
			
		||||
func (rc *RouteController) Run(ctx context.Context, syncPeriod time.Duration) {
 | 
			
		||||
	defer utilruntime.HandleCrash()
 | 
			
		||||
 | 
			
		||||
	klog.Info("Starting route controller")
 | 
			
		||||
	defer klog.Info("Shutting down route controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("route", stopCh, rc.nodeListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("route", ctx.Done(), rc.nodeListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -115,16 +115,16 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration)
 | 
			
		||||
	// We should have a watch on node and if we observe a new node (with CIDR?)
 | 
			
		||||
	// trigger reconciliation for that node.
 | 
			
		||||
	go wait.NonSlidingUntil(func() {
 | 
			
		||||
		if err := rc.reconcileNodeRoutes(); err != nil {
 | 
			
		||||
		if err := rc.reconcileNodeRoutes(ctx); err != nil {
 | 
			
		||||
			klog.Errorf("Couldn't reconcile node routes: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}, syncPeriod, stopCh)
 | 
			
		||||
	}, syncPeriod, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (rc *RouteController) reconcileNodeRoutes() error {
 | 
			
		||||
	routeList, err := rc.routes.ListRoutes(context.TODO(), rc.clusterName)
 | 
			
		||||
func (rc *RouteController) reconcileNodeRoutes(ctx context.Context) error {
 | 
			
		||||
	routeList, err := rc.routes.ListRoutes(ctx, rc.clusterName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("error listing routes: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -132,10 +132,10 @@ func (rc *RouteController) reconcileNodeRoutes() error {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("error listing nodes: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	return rc.reconcile(nodes, routeList)
 | 
			
		||||
	return rc.reconcile(ctx, nodes, routeList)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.Route) error {
 | 
			
		||||
func (rc *RouteController) reconcile(ctx context.Context, nodes []*v1.Node, routes []*cloudprovider.Route) error {
 | 
			
		||||
	var l sync.Mutex
 | 
			
		||||
	// for each node a map of podCIDRs and their created status
 | 
			
		||||
	nodeRoutesStatuses := make(map[types.NodeName]map[string]bool)
 | 
			
		||||
@@ -192,7 +192,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
 | 
			
		||||
					// CreateRoute calls in flight.
 | 
			
		||||
					rateLimiter <- struct{}{}
 | 
			
		||||
					klog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
 | 
			
		||||
					err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
 | 
			
		||||
					err := rc.routes.CreateRoute(ctx, rc.clusterName, nameHint, route)
 | 
			
		||||
					<-rateLimiter
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
 | 
			
		||||
@@ -245,7 +245,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
 | 
			
		||||
					// respect the rate limiter
 | 
			
		||||
					rateLimiter <- struct{}{}
 | 
			
		||||
					klog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
 | 
			
		||||
					if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
 | 
			
		||||
					if err := rc.routes.DeleteRoute(ctx, rc.clusterName, route); err != nil {
 | 
			
		||||
						klog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
 | 
			
		||||
					} else {
 | 
			
		||||
						klog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
 | 
			
		||||
 
 | 
			
		||||
@@ -348,6 +348,8 @@ func TestReconcile(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for i, testCase := range testCases {
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		defer cancel()
 | 
			
		||||
		cloud := &fakecloud.Cloud{RouteMap: make(map[string]*fakecloud.Route)}
 | 
			
		||||
		for _, route := range testCase.initialRoutes {
 | 
			
		||||
			fakeRoute := &fakecloud.Route{}
 | 
			
		||||
@@ -370,7 +372,7 @@ func TestReconcile(t *testing.T) {
 | 
			
		||||
		informerFactory := informers.NewSharedInformerFactory(testCase.clientset, 0)
 | 
			
		||||
		rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidrs)
 | 
			
		||||
		rc.nodeListerSynced = alwaysReady
 | 
			
		||||
		if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
 | 
			
		||||
		if err := rc.reconcile(ctx, testCase.nodes, testCase.initialRoutes); err != nil {
 | 
			
		||||
			t.Errorf("%d. Error from rc.reconcile(): %v", i, err)
 | 
			
		||||
		}
 | 
			
		||||
		for _, action := range testCase.clientset.Actions() {
 | 
			
		||||
@@ -409,7 +411,7 @@ func TestReconcile(t *testing.T) {
 | 
			
		||||
		for {
 | 
			
		||||
			select {
 | 
			
		||||
			case <-tick.C:
 | 
			
		||||
				if finalRoutes, err = routes.ListRoutes(context.TODO(), cluster); err == nil && routeListEqual(finalRoutes, testCase.expectedRoutes) {
 | 
			
		||||
				if finalRoutes, err = routes.ListRoutes(ctx, cluster); err == nil && routeListEqual(finalRoutes, testCase.expectedRoutes) {
 | 
			
		||||
					break poll
 | 
			
		||||
				}
 | 
			
		||||
			case <-timeoutChan:
 | 
			
		||||
 
 | 
			
		||||
@@ -223,25 +223,25 @@ func (s *Controller) enqueueService(obj interface{}) {
 | 
			
		||||
//
 | 
			
		||||
// It's an error to call Run() more than once for a given ServiceController
 | 
			
		||||
// object.
 | 
			
		||||
func (s *Controller) Run(stopCh <-chan struct{}, workers int) {
 | 
			
		||||
func (s *Controller) Run(ctx context.Context, workers int) {
 | 
			
		||||
	defer runtime.HandleCrash()
 | 
			
		||||
	defer s.queue.ShutDown()
 | 
			
		||||
 | 
			
		||||
	klog.Info("Starting service controller")
 | 
			
		||||
	defer klog.Info("Shutting down service controller")
 | 
			
		||||
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("service", stopCh, s.serviceListerSynced, s.nodeListerSynced) {
 | 
			
		||||
	if !cache.WaitForNamedCacheSync("service", ctx.Done(), s.serviceListerSynced, s.nodeListerSynced) {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < workers; i++ {
 | 
			
		||||
		go wait.Until(s.worker, time.Second, stopCh)
 | 
			
		||||
		go wait.UntilWithContext(ctx, s.worker, time.Second)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go s.nodeSyncLoop(workers)
 | 
			
		||||
	go wait.Until(s.triggerNodeSync, nodeSyncPeriod, stopCh)
 | 
			
		||||
	go s.nodeSyncLoop(ctx, workers)
 | 
			
		||||
	go wait.Until(s.triggerNodeSync, nodeSyncPeriod, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	<-stopCh
 | 
			
		||||
	<-ctx.Done()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// triggerNodeSync triggers a nodeSync asynchronously
 | 
			
		||||
@@ -276,29 +276,29 @@ func (s *Controller) triggerNodeSync() {
 | 
			
		||||
 | 
			
		||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
 | 
			
		||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
 | 
			
		||||
func (s *Controller) worker() {
 | 
			
		||||
	for s.processNextWorkItem() {
 | 
			
		||||
func (s *Controller) worker(ctx context.Context) {
 | 
			
		||||
	for s.processNextWorkItem(ctx) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// nodeSyncLoop takes nodeSync signal and triggers nodeSync
 | 
			
		||||
func (s *Controller) nodeSyncLoop(workers int) {
 | 
			
		||||
func (s *Controller) nodeSyncLoop(ctx context.Context, workers int) {
 | 
			
		||||
	klog.V(4).Info("nodeSyncLoop Started")
 | 
			
		||||
	for range s.nodeSyncCh {
 | 
			
		||||
		klog.V(4).Info("nodeSync has been triggered")
 | 
			
		||||
		s.nodeSyncInternal(workers)
 | 
			
		||||
		s.nodeSyncInternal(ctx, workers)
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Info("s.nodeSyncCh is closed. Exiting nodeSyncLoop")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Controller) processNextWorkItem() bool {
 | 
			
		||||
func (s *Controller) processNextWorkItem(ctx context.Context) bool {
 | 
			
		||||
	key, quit := s.queue.Get()
 | 
			
		||||
	if quit {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer s.queue.Done(key)
 | 
			
		||||
 | 
			
		||||
	err := s.syncService(key.(string))
 | 
			
		||||
	err := s.syncService(ctx, key.(string))
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		s.queue.Forget(key)
 | 
			
		||||
		return true
 | 
			
		||||
@@ -325,7 +325,7 @@ func (s *Controller) init() error {
 | 
			
		||||
 | 
			
		||||
// processServiceCreateOrUpdate operates loadbalancers for the incoming service accordingly.
 | 
			
		||||
// Returns an error if processing the service update failed.
 | 
			
		||||
func (s *Controller) processServiceCreateOrUpdate(service *v1.Service, key string) error {
 | 
			
		||||
func (s *Controller) processServiceCreateOrUpdate(ctx context.Context, service *v1.Service, key string) error {
 | 
			
		||||
	// TODO(@MrHohn): Remove the cache once we get rid of the non-finalizer deletion
 | 
			
		||||
	// path. Ref https://github.com/kubernetes/enhancements/issues/980.
 | 
			
		||||
	cachedService := s.cache.getOrCreate(key)
 | 
			
		||||
@@ -333,14 +333,14 @@ func (s *Controller) processServiceCreateOrUpdate(service *v1.Service, key strin
 | 
			
		||||
		// This happens only when a service is deleted and re-created
 | 
			
		||||
		// in a short period, which is only possible when it doesn't
 | 
			
		||||
		// contain finalizer.
 | 
			
		||||
		if err := s.processLoadBalancerDelete(cachedService.state, key); err != nil {
 | 
			
		||||
		if err := s.processLoadBalancerDelete(ctx, cachedService.state, key); err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// Always cache the service, we need the info for service deletion in case
 | 
			
		||||
	// when load balancer cleanup is not handled via finalizer.
 | 
			
		||||
	cachedService.state = service
 | 
			
		||||
	op, err := s.syncLoadBalancerIfNeeded(service, key)
 | 
			
		||||
	op, err := s.syncLoadBalancerIfNeeded(ctx, service, key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		s.eventRecorder.Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", "Error syncing load balancer: %v", err)
 | 
			
		||||
		return err
 | 
			
		||||
@@ -363,7 +363,7 @@ const (
 | 
			
		||||
// syncLoadBalancerIfNeeded ensures that service's status is synced up with loadbalancer
 | 
			
		||||
// i.e. creates loadbalancer for service if requested and deletes loadbalancer if the service
 | 
			
		||||
// doesn't want a loadbalancer no more. Returns whatever error occurred.
 | 
			
		||||
func (s *Controller) syncLoadBalancerIfNeeded(service *v1.Service, key string) (loadBalancerOperation, error) {
 | 
			
		||||
func (s *Controller) syncLoadBalancerIfNeeded(ctx context.Context, service *v1.Service, key string) (loadBalancerOperation, error) {
 | 
			
		||||
	// Note: It is safe to just call EnsureLoadBalancer.  But, on some clouds that requires a delete & create,
 | 
			
		||||
	// which may involve service interruption.  Also, we would like user-friendly events.
 | 
			
		||||
 | 
			
		||||
@@ -377,14 +377,14 @@ func (s *Controller) syncLoadBalancerIfNeeded(service *v1.Service, key string) (
 | 
			
		||||
		// Delete the load balancer if service no longer wants one, or if service needs cleanup.
 | 
			
		||||
		op = deleteLoadBalancer
 | 
			
		||||
		newStatus = &v1.LoadBalancerStatus{}
 | 
			
		||||
		_, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service)
 | 
			
		||||
		_, exists, err := s.balancer.GetLoadBalancer(ctx, s.clusterName, service)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return op, fmt.Errorf("failed to check if load balancer exists before cleanup: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if exists {
 | 
			
		||||
			klog.V(2).Infof("Deleting existing load balancer for service %s", key)
 | 
			
		||||
			s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
 | 
			
		||||
			if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil {
 | 
			
		||||
			if err := s.balancer.EnsureLoadBalancerDeleted(ctx, s.clusterName, service); err != nil {
 | 
			
		||||
				return op, fmt.Errorf("failed to delete load balancer: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
@@ -404,7 +404,7 @@ func (s *Controller) syncLoadBalancerIfNeeded(service *v1.Service, key string) (
 | 
			
		||||
		if err := s.addFinalizer(service); err != nil {
 | 
			
		||||
			return op, fmt.Errorf("failed to add load balancer cleanup finalizer: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		newStatus, err = s.ensureLoadBalancer(service)
 | 
			
		||||
		newStatus, err = s.ensureLoadBalancer(ctx, service)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if err == cloudprovider.ImplementedElsewhere {
 | 
			
		||||
				// ImplementedElsewhere indicates that the ensureLoadBalancer is a nop and the
 | 
			
		||||
@@ -435,7 +435,7 @@ func (s *Controller) syncLoadBalancerIfNeeded(service *v1.Service, key string) (
 | 
			
		||||
	return op, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Controller) ensureLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) {
 | 
			
		||||
func (s *Controller) ensureLoadBalancer(ctx context.Context, service *v1.Service) (*v1.LoadBalancerStatus, error) {
 | 
			
		||||
	nodes, err := listWithPredicate(s.nodeLister, s.getNodeConditionPredicate())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
@@ -449,7 +449,7 @@ func (s *Controller) ensureLoadBalancer(service *v1.Service) (*v1.LoadBalancerSt
 | 
			
		||||
	// - Only one protocol supported per service
 | 
			
		||||
	// - Not all cloud providers support all protocols and the next step is expected to return
 | 
			
		||||
	//   an error for unsupported protocols
 | 
			
		||||
	return s.balancer.EnsureLoadBalancer(context.TODO(), s.clusterName, service, nodes)
 | 
			
		||||
	return s.balancer.EnsureLoadBalancer(ctx, s.clusterName, service, nodes)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
 | 
			
		||||
@@ -713,7 +713,7 @@ func nodeReadyConditionStatus(node *v1.Node) v1.ConditionStatus {
 | 
			
		||||
 | 
			
		||||
// nodeSyncInternal handles updating the hosts pointed to by all load
 | 
			
		||||
// balancers whenever the set of nodes in the cluster changes.
 | 
			
		||||
func (s *Controller) nodeSyncInternal(workers int) {
 | 
			
		||||
func (s *Controller) nodeSyncInternal(ctx context.Context, workers int) {
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
		latency := time.Since(startTime).Seconds()
 | 
			
		||||
@@ -724,7 +724,7 @@ func (s *Controller) nodeSyncInternal(workers int) {
 | 
			
		||||
	if !s.needFullSyncAndUnmark() {
 | 
			
		||||
		// The set of nodes in the cluster hasn't changed, but we can retry
 | 
			
		||||
		// updating any services that we failed to update last time around.
 | 
			
		||||
		s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, workers)
 | 
			
		||||
		s.servicesToUpdate = s.updateLoadBalancerHosts(ctx, s.servicesToUpdate, workers)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Infof("Syncing backends for all LB services.")
 | 
			
		||||
@@ -733,7 +733,7 @@ func (s *Controller) nodeSyncInternal(workers int) {
 | 
			
		||||
	// round.
 | 
			
		||||
	s.servicesToUpdate = s.cache.allServices()
 | 
			
		||||
	numServices := len(s.servicesToUpdate)
 | 
			
		||||
	s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, workers)
 | 
			
		||||
	s.servicesToUpdate = s.updateLoadBalancerHosts(ctx, s.servicesToUpdate, workers)
 | 
			
		||||
	klog.V(2).Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
 | 
			
		||||
		numServices-len(s.servicesToUpdate), numServices)
 | 
			
		||||
}
 | 
			
		||||
@@ -761,7 +761,7 @@ func (s *Controller) nodeSyncService(svc *v1.Service) bool {
 | 
			
		||||
// updateLoadBalancerHosts updates all existing load balancers so that
 | 
			
		||||
// they will match the latest list of nodes with input number of workers.
 | 
			
		||||
// Returns the list of services that couldn't be updated.
 | 
			
		||||
func (s *Controller) updateLoadBalancerHosts(services []*v1.Service, workers int) (servicesToRetry []*v1.Service) {
 | 
			
		||||
func (s *Controller) updateLoadBalancerHosts(ctx context.Context, services []*v1.Service, workers int) (servicesToRetry []*v1.Service) {
 | 
			
		||||
	klog.V(4).Infof("Running updateLoadBalancerHosts(len(services)==%d, workers==%d)", len(services), workers)
 | 
			
		||||
 | 
			
		||||
	// lock for servicesToRetry
 | 
			
		||||
@@ -775,7 +775,7 @@ func (s *Controller) updateLoadBalancerHosts(services []*v1.Service, workers int
 | 
			
		||||
		servicesToRetry = append(servicesToRetry, services[piece])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	workqueue.ParallelizeUntil(context.TODO(), workers, len(services), doWork)
 | 
			
		||||
	workqueue.ParallelizeUntil(ctx, workers, len(services), doWork)
 | 
			
		||||
	klog.V(4).Infof("Finished updateLoadBalancerHosts")
 | 
			
		||||
	return servicesToRetry
 | 
			
		||||
}
 | 
			
		||||
@@ -831,7 +831,7 @@ func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool {
 | 
			
		||||
// syncService will sync the Service with the given key if it has had its expectations fulfilled,
 | 
			
		||||
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
 | 
			
		||||
// invoked concurrently with the same key.
 | 
			
		||||
func (s *Controller) syncService(key string) error {
 | 
			
		||||
func (s *Controller) syncService(ctx context.Context, key string) error {
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
	defer func() {
 | 
			
		||||
		klog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime))
 | 
			
		||||
@@ -847,17 +847,17 @@ func (s *Controller) syncService(key string) error {
 | 
			
		||||
	switch {
 | 
			
		||||
	case errors.IsNotFound(err):
 | 
			
		||||
		// service absence in store means watcher caught the deletion, ensure LB info is cleaned
 | 
			
		||||
		err = s.processServiceDeletion(key)
 | 
			
		||||
		err = s.processServiceDeletion(ctx, key)
 | 
			
		||||
	case err != nil:
 | 
			
		||||
		runtime.HandleError(fmt.Errorf("Unable to retrieve service %v from store: %v", key, err))
 | 
			
		||||
	default:
 | 
			
		||||
		err = s.processServiceCreateOrUpdate(service, key)
 | 
			
		||||
		err = s.processServiceCreateOrUpdate(ctx, service, key)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Controller) processServiceDeletion(key string) error {
 | 
			
		||||
func (s *Controller) processServiceDeletion(ctx context.Context, key string) error {
 | 
			
		||||
	cachedService, ok := s.cache.get(key)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		// Cache does not contains the key means:
 | 
			
		||||
@@ -867,20 +867,20 @@ func (s *Controller) processServiceDeletion(key string) error {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Infof("Service %v has been deleted. Attempting to cleanup load balancer resources", key)
 | 
			
		||||
	if err := s.processLoadBalancerDelete(cachedService.state, key); err != nil {
 | 
			
		||||
	if err := s.processLoadBalancerDelete(ctx, cachedService.state, key); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	s.cache.delete(key)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (s *Controller) processLoadBalancerDelete(service *v1.Service, key string) error {
 | 
			
		||||
func (s *Controller) processLoadBalancerDelete(ctx context.Context, service *v1.Service, key string) error {
 | 
			
		||||
	// delete load balancer info only if the service type is LoadBalancer
 | 
			
		||||
	if !wantsLoadBalancer(service) {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
 | 
			
		||||
	if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil {
 | 
			
		||||
	if err := s.balancer.EnsureLoadBalancerDeleted(ctx, s.clusterName, service); err != nil {
 | 
			
		||||
		s.eventRecorder.Eventf(service, v1.EventTypeWarning, "DeleteLoadBalancerFailed", "Error deleting load balancer: %v", err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -368,15 +368,17 @@ func TestSyncLoadBalancerIfNeeded(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			controller, cloud, client := newController()
 | 
			
		||||
			cloud.Exists = tc.lbExists
 | 
			
		||||
			key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name)
 | 
			
		||||
			if _, err := client.CoreV1().Services(tc.service.Namespace).Create(context.TODO(), tc.service, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
			if _, err := client.CoreV1().Services(tc.service.Namespace).Create(ctx, tc.service, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
				t.Fatalf("Failed to prepare service %s for testing: %v", key, err)
 | 
			
		||||
			}
 | 
			
		||||
			client.ClearActions()
 | 
			
		||||
 | 
			
		||||
			op, err := controller.syncLoadBalancerIfNeeded(tc.service, key)
 | 
			
		||||
			op, err := controller.syncLoadBalancerIfNeeded(ctx, tc.service, key)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Got error: %v, want nil", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -548,10 +550,12 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	for _, item := range table {
 | 
			
		||||
		t.Run(item.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			controller, cloud, _ := newController()
 | 
			
		||||
			controller.nodeLister = newFakeNodeLister(nil, nodes...)
 | 
			
		||||
 | 
			
		||||
			if servicesToRetry := controller.updateLoadBalancerHosts(item.services, item.workers); servicesToRetry != nil {
 | 
			
		||||
			if servicesToRetry := controller.updateLoadBalancerHosts(ctx, item.services, item.workers); servicesToRetry != nil {
 | 
			
		||||
				t.Errorf("for case %q, unexpected servicesToRetry: %v", item.desc, servicesToRetry)
 | 
			
		||||
			}
 | 
			
		||||
			compareUpdateCalls(t, item.expectedUpdateCalls, cloud.UpdateCalls)
 | 
			
		||||
@@ -638,8 +642,10 @@ func TestNodeChangesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			controller.nodeLister = newFakeNodeLister(tc.nodeListerErr, tc.nodes...)
 | 
			
		||||
			servicesToRetry := controller.updateLoadBalancerHosts(services, tc.worker)
 | 
			
		||||
			servicesToRetry := controller.updateLoadBalancerHosts(ctx, services, tc.worker)
 | 
			
		||||
			compareServiceList(t, tc.expectedRetryServices, servicesToRetry)
 | 
			
		||||
			compareUpdateCalls(t, tc.expectedUpdateCalls, cloud.UpdateCalls)
 | 
			
		||||
			cloud.UpdateCalls = []fakecloud.UpdateBalancerCall{}
 | 
			
		||||
@@ -772,11 +778,13 @@ func TestProcessServiceCreateOrUpdate(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		defer cancel()
 | 
			
		||||
		newSvc := tc.updateFn(tc.svc)
 | 
			
		||||
		if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
		if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(ctx, tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
			t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err)
 | 
			
		||||
		}
 | 
			
		||||
		obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key)
 | 
			
		||||
		obtErr := controller.processServiceCreateOrUpdate(ctx, newSvc, tc.key)
 | 
			
		||||
		if err := tc.expectedFn(newSvc, obtErr); err != nil {
 | 
			
		||||
			t.Errorf("%v processServiceCreateOrUpdate() %v", tc.testName, err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -810,6 +818,8 @@ func TestProcessServiceCreateOrUpdateK8sError(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			svc := newService(svcName, types.UID("123"), v1.ServiceTypeLoadBalancer)
 | 
			
		||||
			// Preset finalizer so k8s error only happens when patching status.
 | 
			
		||||
			svc.Finalizers = []string{servicehelper.LoadBalancerCleanupFinalizer}
 | 
			
		||||
@@ -818,7 +828,7 @@ func TestProcessServiceCreateOrUpdateK8sError(t *testing.T) {
 | 
			
		||||
				return true, nil, tc.k8sErr
 | 
			
		||||
			})
 | 
			
		||||
 | 
			
		||||
			if err := controller.processServiceCreateOrUpdate(svc, svcName); !reflect.DeepEqual(err, tc.expectErr) {
 | 
			
		||||
			if err := controller.processServiceCreateOrUpdate(ctx, svc, svcName); !reflect.DeepEqual(err, tc.expectErr) {
 | 
			
		||||
				t.Fatalf("processServiceCreateOrUpdate() = %v, want %v", err, tc.expectErr)
 | 
			
		||||
			}
 | 
			
		||||
			if tc.expectErr == nil {
 | 
			
		||||
@@ -905,9 +915,11 @@ func TestSyncService(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		tc.updateFn()
 | 
			
		||||
		obtainedErr := controller.syncService(tc.key)
 | 
			
		||||
		obtainedErr := controller.syncService(ctx, tc.key)
 | 
			
		||||
 | 
			
		||||
		//expected matches obtained ??.
 | 
			
		||||
		if exp := tc.expectedFn(obtainedErr); exp != nil {
 | 
			
		||||
@@ -991,10 +1003,13 @@ func TestProcessServiceDeletion(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		//Create a new controller.
 | 
			
		||||
		controller, cloud, _ = newController()
 | 
			
		||||
		tc.updateFn(controller)
 | 
			
		||||
		obtainedErr := controller.processServiceDeletion(svcKey)
 | 
			
		||||
		obtainedErr := controller.processServiceDeletion(ctx, svcKey)
 | 
			
		||||
		if err := tc.expectedFn(obtainedErr); err != nil {
 | 
			
		||||
			t.Errorf("%v processServiceDeletion() %v", tc.testName, err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -1388,11 +1403,13 @@ func TestAddFinalizer(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			c := fake.NewSimpleClientset()
 | 
			
		||||
			s := &Controller{
 | 
			
		||||
				kubeClient: c,
 | 
			
		||||
			}
 | 
			
		||||
			if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
			if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(ctx, tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
				t.Fatalf("Failed to prepare service for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := s.addFinalizer(tc.svc); err != nil {
 | 
			
		||||
@@ -1442,11 +1459,13 @@ func TestRemoveFinalizer(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			c := fake.NewSimpleClientset()
 | 
			
		||||
			s := &Controller{
 | 
			
		||||
				kubeClient: c,
 | 
			
		||||
			}
 | 
			
		||||
			if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
			if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(ctx, tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
				t.Fatalf("Failed to prepare service for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := s.removeFinalizer(tc.svc); err != nil {
 | 
			
		||||
@@ -1542,11 +1561,13 @@ func TestPatchStatus(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tc := range testCases {
 | 
			
		||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			c := fake.NewSimpleClientset()
 | 
			
		||||
			s := &Controller{
 | 
			
		||||
				kubeClient: c,
 | 
			
		||||
			}
 | 
			
		||||
			if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(context.TODO(), tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
			if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(ctx, tc.svc, metav1.CreateOptions{}); err != nil {
 | 
			
		||||
				t.Fatalf("Failed to prepare service for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -114,13 +114,13 @@ func TestDualStackEndpoints(t *testing.T) {
 | 
			
		||||
		client,
 | 
			
		||||
		1*time.Second)
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	// Start informer and controllers
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	// use only one worker to serialize the updates
 | 
			
		||||
	go epController.Run(1, stopCh)
 | 
			
		||||
	go epsController.Run(1, stopCh)
 | 
			
		||||
	go epController.Run(ctx, 1)
 | 
			
		||||
	go epsController.Run(1, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	var testcases = []struct {
 | 
			
		||||
		name           string
 | 
			
		||||
 
 | 
			
		||||
@@ -56,10 +56,10 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
		0)
 | 
			
		||||
 | 
			
		||||
	// Start informer and controllers
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go epController.Run(1, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go epController.Run(ctx, 1)
 | 
			
		||||
 | 
			
		||||
	// Create namespace
 | 
			
		||||
	ns := framework.CreateTestingNamespace("test-endpoints-updates", server, t)
 | 
			
		||||
@@ -83,7 +83,7 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	createdPod, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
 | 
			
		||||
	createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -93,14 +93,14 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
		Phase:  v1.PodRunning,
 | 
			
		||||
		PodIPs: []v1.PodIP{{IP: "1.1.1.1"}, {IP: "2001:db8::"}},
 | 
			
		||||
	}
 | 
			
		||||
	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), createdPod, metav1.UpdateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Create a service associated to the pod
 | 
			
		||||
	svc := newService(ns.Name, "foo1")
 | 
			
		||||
	svc1, err := client.CoreV1().Services(ns.Name).Create(context.TODO(), svc, metav1.CreateOptions{})
 | 
			
		||||
	svc1, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create service %s: %v", svc.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -108,7 +108,7 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
	// Obtain ResourceVersion of the new endpoint created
 | 
			
		||||
	var resVersion string
 | 
			
		||||
	if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
 | 
			
		||||
		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc.Name, metav1.GetOptions{})
 | 
			
		||||
		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Logf("error fetching endpoints: %v", err)
 | 
			
		||||
			return false, nil
 | 
			
		||||
@@ -121,7 +121,7 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Force recomputation on the endpoint controller
 | 
			
		||||
	svc1.SetAnnotations(map[string]string{"foo": "bar"})
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), svc1, metav1.UpdateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Update(ctx, svc1, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to update service %s: %v", svc1.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -131,13 +131,13 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
	// was recomputed before asserting, since we only have 1 worker
 | 
			
		||||
	// in the endpoint controller
 | 
			
		||||
	svc2 := newService(ns.Name, "foo2")
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), svc2, metav1.CreateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc2, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create service %s: %v", svc.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
 | 
			
		||||
		_, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc2.Name, metav1.GetOptions{})
 | 
			
		||||
		_, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc2.Name, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Logf("error fetching endpoints: %v", err)
 | 
			
		||||
			return false, nil
 | 
			
		||||
@@ -149,7 +149,7 @@ func TestEndpointUpdates(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// the endpoint controller should not update the endpoint created for the original
 | 
			
		||||
	// service since nothing has changed, the resource version has to be the same
 | 
			
		||||
	endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc.Name, metav1.GetOptions{})
 | 
			
		||||
	endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("error fetching endpoints: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -185,10 +185,10 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
 | 
			
		||||
		0)
 | 
			
		||||
 | 
			
		||||
	// Start informer and controllers
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go epController.Run(1, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go epController.Run(ctx, 1)
 | 
			
		||||
 | 
			
		||||
	// Create namespace
 | 
			
		||||
	ns := framework.CreateTestingNamespace("test-endpoints-terminating", server, t)
 | 
			
		||||
@@ -232,13 +232,13 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	createdPod, err := client.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
 | 
			
		||||
	createdPod, err := client.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod %s: %v", pod.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	createdPod.Status = pod.Status
 | 
			
		||||
	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(context.TODO(), createdPod, metav1.UpdateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Pods(ns.Name).UpdateStatus(ctx, createdPod, metav1.UpdateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -261,14 +261,14 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), svc, metav1.CreateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create service %s: %v", svc.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// poll until associated Endpoints to the previously created Service exists
 | 
			
		||||
	if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
 | 
			
		||||
		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc.Name, metav1.GetOptions{})
 | 
			
		||||
		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
@@ -287,7 +287,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
 | 
			
		||||
		t.Fatalf("endpoints not found: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = client.CoreV1().Pods(ns.Name).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
 | 
			
		||||
	err = client.CoreV1().Pods(ns.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("error deleting test pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -296,7 +296,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
 | 
			
		||||
	if err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
 | 
			
		||||
		// Ensure that the recently deleted Pod exists but with a deletion timestamp. If the Pod does not exist,
 | 
			
		||||
		// we should fail the test since it is no longer validating against a terminating pod.
 | 
			
		||||
		pod, err := client.CoreV1().Pods(ns.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
 | 
			
		||||
		pod, err := client.CoreV1().Pods(ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
 | 
			
		||||
		if apierrors.IsNotFound(err) {
 | 
			
		||||
			return false, fmt.Errorf("expected Pod %q to exist with deletion timestamp but was not found: %v", pod.Name, err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -308,7 +308,7 @@ func TestEndpointWithTerminatingPod(t *testing.T) {
 | 
			
		||||
			return false, errors.New("pod did not have deletion timestamp set")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(context.TODO(), svc.Name, metav1.GetOptions{})
 | 
			
		||||
		endpoints, err := client.CoreV1().Endpoints(ns.Name).Get(ctx, svc.Name, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -77,12 +77,12 @@ func TestEndpointSliceMirroring(t *testing.T) {
 | 
			
		||||
		1*time.Second)
 | 
			
		||||
 | 
			
		||||
	// Start informer and controllers
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go epController.Run(5, stopCh)
 | 
			
		||||
	go epsController.Run(5, stopCh)
 | 
			
		||||
	go epsmController.Run(5, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go epController.Run(ctx, 5)
 | 
			
		||||
	go epsController.Run(5, ctx.Done())
 | 
			
		||||
	go epsmController.Run(5, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		testName                     string
 | 
			
		||||
@@ -180,7 +180,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
 | 
			
		||||
			if tc.service != nil {
 | 
			
		||||
				resourceName = tc.service.Name
 | 
			
		||||
				tc.service.Namespace = ns.Name
 | 
			
		||||
				_, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), tc.service, metav1.CreateOptions{})
 | 
			
		||||
				_, err = client.CoreV1().Services(ns.Name).Create(ctx, tc.service, metav1.CreateOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Fatalf("Error creating service: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
@@ -189,7 +189,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
 | 
			
		||||
			if tc.customEndpoints != nil {
 | 
			
		||||
				resourceName = tc.customEndpoints.Name
 | 
			
		||||
				tc.customEndpoints.Namespace = ns.Name
 | 
			
		||||
				_, err = client.CoreV1().Endpoints(ns.Name).Create(context.TODO(), tc.customEndpoints, metav1.CreateOptions{})
 | 
			
		||||
				_, err = client.CoreV1().Endpoints(ns.Name).Create(ctx, tc.customEndpoints, metav1.CreateOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Fatalf("Error creating endpoints: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
@@ -197,7 +197,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
 | 
			
		||||
				lSelector := discovery.LabelServiceName + "=" + resourceName
 | 
			
		||||
				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: lSelector})
 | 
			
		||||
				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Logf("Error listing EndpointSlices: %v", err)
 | 
			
		||||
					return false, err
 | 
			
		||||
@@ -255,10 +255,10 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
 | 
			
		||||
		1*time.Second)
 | 
			
		||||
 | 
			
		||||
	// Start informer and controllers
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go epsmController.Run(1, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go epsmController.Run(1, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		testName      string
 | 
			
		||||
@@ -325,19 +325,19 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
 | 
			
		||||
				}},
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			_, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
 | 
			
		||||
			_, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Error creating service: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			_, err = client.CoreV1().Endpoints(ns.Name).Create(context.TODO(), customEndpoints, metav1.CreateOptions{})
 | 
			
		||||
			_, err = client.CoreV1().Endpoints(ns.Name).Create(ctx, customEndpoints, metav1.CreateOptions{})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Error creating endpoints: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// update endpoint
 | 
			
		||||
			tc.tweakEndpoint(customEndpoints)
 | 
			
		||||
			_, err = client.CoreV1().Endpoints(ns.Name).Update(context.TODO(), customEndpoints, metav1.UpdateOptions{})
 | 
			
		||||
			_, err = client.CoreV1().Endpoints(ns.Name).Update(ctx, customEndpoints, metav1.UpdateOptions{})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Error updating endpoints: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -345,7 +345,7 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
 | 
			
		||||
			// verify the endpoint updates were mirrored
 | 
			
		||||
			err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
 | 
			
		||||
				lSelector := discovery.LabelServiceName + "=" + service.Name
 | 
			
		||||
				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: lSelector})
 | 
			
		||||
				esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(ctx, metav1.ListOptions{LabelSelector: lSelector})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Logf("Error listing EndpointSlices: %v", err)
 | 
			
		||||
					return false, err
 | 
			
		||||
 
 | 
			
		||||
@@ -258,9 +258,9 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
 | 
			
		||||
		t.Fatalf("failed to create garbage collector: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	tearDown := func() {
 | 
			
		||||
		close(stopCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
		result.TearDownFn()
 | 
			
		||||
	}
 | 
			
		||||
	syncPeriod := 5 * time.Second
 | 
			
		||||
@@ -270,9 +270,9 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
 | 
			
		||||
			// client. This is a leaky abstraction and assumes behavior about the REST
 | 
			
		||||
			// mapper, but we'll deal with it for now.
 | 
			
		||||
			restMapper.Reset()
 | 
			
		||||
		}, syncPeriod, stopCh)
 | 
			
		||||
		go gc.Run(workers, stopCh)
 | 
			
		||||
		go gc.Sync(clientSet.Discovery(), syncPeriod, stopCh)
 | 
			
		||||
		}, syncPeriod, ctx.Done())
 | 
			
		||||
		go gc.Run(ctx, workers)
 | 
			
		||||
		go gc.Sync(clientSet.Discovery(), syncPeriod, ctx.Done())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if workerCount > 0 {
 | 
			
		||||
 
 | 
			
		||||
@@ -141,6 +141,7 @@ func TestTaintBasedEvictions(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Start NodeLifecycleController for taint.
 | 
			
		||||
			nc, err := nodelifecycle.NewNodeLifecycleController(
 | 
			
		||||
				testCtx.Ctx,
 | 
			
		||||
				externalInformers.Coordination().V1().Leases(),
 | 
			
		||||
				externalInformers.Core().V1().Pods(),
 | 
			
		||||
				externalInformers.Core().V1().Nodes(),
 | 
			
		||||
@@ -167,7 +168,7 @@ func TestTaintBasedEvictions(t *testing.T) {
 | 
			
		||||
			testutils.SyncInformerFactory(testCtx)
 | 
			
		||||
 | 
			
		||||
			// Run all controllers
 | 
			
		||||
			go nc.Run(testCtx.Ctx.Done())
 | 
			
		||||
			go nc.Run(testCtx.Ctx)
 | 
			
		||||
			go testCtx.Scheduler.Run(testCtx.Ctx)
 | 
			
		||||
 | 
			
		||||
			nodeRes := v1.ResourceList{
 | 
			
		||||
 
 | 
			
		||||
@@ -95,8 +95,8 @@ func TestQuota(t *testing.T) {
 | 
			
		||||
	ns2 := framework.CreateTestingNamespace("non-quotaed", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns2, s, t)
 | 
			
		||||
 | 
			
		||||
	controllerCh := make(chan struct{})
 | 
			
		||||
	defer close(controllerCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
 | 
			
		||||
	rm := replicationcontroller.NewReplicationManager(
 | 
			
		||||
@@ -106,7 +106,7 @@ func TestQuota(t *testing.T) {
 | 
			
		||||
		replicationcontroller.BurstReplicas,
 | 
			
		||||
	)
 | 
			
		||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
			
		||||
	go rm.Run(context.TODO(), 3)
 | 
			
		||||
	go rm.Run(ctx, 3)
 | 
			
		||||
 | 
			
		||||
	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
			
		||||
	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
			
		||||
@@ -127,13 +127,13 @@ func TestQuota(t *testing.T) {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("unexpected err: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go resourceQuotaController.Run(2, controllerCh)
 | 
			
		||||
	go resourceQuotaController.Run(ctx, 2)
 | 
			
		||||
 | 
			
		||||
	// Periodically the quota controller to detect new resource types
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh)
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	internalInformers.Start(controllerCh)
 | 
			
		||||
	informers.Start(controllerCh)
 | 
			
		||||
	internalInformers.Start(ctx.Done())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	close(informersStarted)
 | 
			
		||||
 | 
			
		||||
	startTime := time.Now()
 | 
			
		||||
@@ -326,8 +326,8 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
			
		||||
	ns := framework.CreateTestingNamespace("quota", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
 | 
			
		||||
	controllerCh := make(chan struct{})
 | 
			
		||||
	defer close(controllerCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
 | 
			
		||||
	rm := replicationcontroller.NewReplicationManager(
 | 
			
		||||
@@ -337,7 +337,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
			
		||||
		replicationcontroller.BurstReplicas,
 | 
			
		||||
	)
 | 
			
		||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
			
		||||
	go rm.Run(context.TODO(), 3)
 | 
			
		||||
	go rm.Run(ctx, 3)
 | 
			
		||||
 | 
			
		||||
	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
			
		||||
	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
			
		||||
@@ -358,13 +358,13 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("unexpected err: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go resourceQuotaController.Run(2, controllerCh)
 | 
			
		||||
	go resourceQuotaController.Run(ctx, 2)
 | 
			
		||||
 | 
			
		||||
	// Periodically the quota controller to detect new resource types
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh)
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	externalInformers.Start(controllerCh)
 | 
			
		||||
	informers.Start(controllerCh)
 | 
			
		||||
	externalInformers.Start(ctx.Done())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	close(informersStarted)
 | 
			
		||||
 | 
			
		||||
	// try to create a pod
 | 
			
		||||
@@ -382,7 +382,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err == nil {
 | 
			
		||||
	if _, err := clientset.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err == nil {
 | 
			
		||||
		t.Fatalf("expected error for insufficient quota")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -405,7 +405,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
			
		||||
	// attempt to create a new pod once the quota is propagated
 | 
			
		||||
	err = wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
 | 
			
		||||
		// retry until we succeed (to allow time for all changes to propagate)
 | 
			
		||||
		if _, err := clientset.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err == nil {
 | 
			
		||||
		if _, err := clientset.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err == nil {
 | 
			
		||||
			return true, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, nil
 | 
			
		||||
@@ -456,8 +456,8 @@ func TestQuotaLimitService(t *testing.T) {
 | 
			
		||||
	ns := framework.CreateTestingNamespace("quota", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
 | 
			
		||||
	controllerCh := make(chan struct{})
 | 
			
		||||
	defer close(controllerCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	informers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
 | 
			
		||||
	rm := replicationcontroller.NewReplicationManager(
 | 
			
		||||
@@ -467,7 +467,7 @@ func TestQuotaLimitService(t *testing.T) {
 | 
			
		||||
		replicationcontroller.BurstReplicas,
 | 
			
		||||
	)
 | 
			
		||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
			
		||||
	go rm.Run(context.TODO(), 3)
 | 
			
		||||
	go rm.Run(ctx, 3)
 | 
			
		||||
 | 
			
		||||
	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
			
		||||
	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
			
		||||
@@ -488,13 +488,13 @@ func TestQuotaLimitService(t *testing.T) {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("unexpected err: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go resourceQuotaController.Run(2, controllerCh)
 | 
			
		||||
	go resourceQuotaController.Run(ctx, 2)
 | 
			
		||||
 | 
			
		||||
	// Periodically the quota controller to detect new resource types
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh)
 | 
			
		||||
	go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	externalInformers.Start(controllerCh)
 | 
			
		||||
	informers.Start(controllerCh)
 | 
			
		||||
	externalInformers.Start(ctx.Done())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	close(informersStarted)
 | 
			
		||||
 | 
			
		||||
	// now create a covering quota
 | 
			
		||||
@@ -517,14 +517,14 @@ func TestQuotaLimitService(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Creating the first node port service should succeed
 | 
			
		||||
	nodePortService := newService("np-svc", v1.ServiceTypeNodePort, true)
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(context.TODO(), nodePortService, metav1.CreateOptions{})
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, nodePortService, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("creating first node port Service should not have returned error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Creating the first loadbalancer service should succeed
 | 
			
		||||
	lbServiceWithNodePort1 := newService("lb-svc-withnp1", v1.ServiceTypeLoadBalancer, true)
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(context.TODO(), lbServiceWithNodePort1, metav1.CreateOptions{})
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, lbServiceWithNodePort1, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("creating first loadbalancer Service should not have returned error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -543,7 +543,7 @@ func TestQuotaLimitService(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Creating a loadbalancer Service without node ports should succeed
 | 
			
		||||
	lbServiceWithoutNodePort1 := newService("lb-svc-wonp1", v1.ServiceTypeLoadBalancer, false)
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(context.TODO(), lbServiceWithoutNodePort1, metav1.CreateOptions{})
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, lbServiceWithoutNodePort1, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("creating another loadbalancer Service without node ports should not have returned error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -562,7 +562,7 @@ func TestQuotaLimitService(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Creating a ClusterIP Service should succeed
 | 
			
		||||
	clusterIPService1 := newService("clusterip-svc1", v1.ServiceTypeClusterIP, false)
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(context.TODO(), clusterIPService1, metav1.CreateOptions{})
 | 
			
		||||
	_, err = clientset.CoreV1().Services(ns.Name).Create(ctx, clusterIPService1, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("creating a cluster IP Service should not have returned error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -83,6 +83,7 @@ func TestTaintNodeByCondition(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Start NodeLifecycleController for taint.
 | 
			
		||||
	nc, err := nodelifecycle.NewNodeLifecycleController(
 | 
			
		||||
		context.TODO(),
 | 
			
		||||
		externalInformers.Coordination().V1().Leases(),
 | 
			
		||||
		externalInformers.Core().V1().Pods(),
 | 
			
		||||
		externalInformers.Core().V1().Nodes(),
 | 
			
		||||
@@ -109,7 +110,7 @@ func TestTaintNodeByCondition(t *testing.T) {
 | 
			
		||||
	testutils.SyncInformerFactory(testCtx)
 | 
			
		||||
 | 
			
		||||
	// Run all controllers
 | 
			
		||||
	go nc.Run(testCtx.Ctx.Done())
 | 
			
		||||
	go nc.Run(testCtx.Ctx)
 | 
			
		||||
	go testCtx.Scheduler.Run(testCtx.Ctx)
 | 
			
		||||
 | 
			
		||||
	// -------------------------------------------
 | 
			
		||||
 
 | 
			
		||||
@@ -162,10 +162,10 @@ func Test_ServiceLoadBalancerEnableLoadBalancerClass(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	controller, cloud, informer := newServiceController(t, client)
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informer.Start(stopCh)
 | 
			
		||||
	go controller.Run(stopCh, 1)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	informer.Start(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx, 1)
 | 
			
		||||
 | 
			
		||||
	service := &corev1.Service{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -180,7 +180,7 @@ func Test_ServiceLoadBalancerEnableLoadBalancerClass(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Error creating test service: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -211,10 +211,10 @@ func Test_ServiceLoadBalancerEnableLoadBalancerClassThenUpdateLoadBalancerClass(
 | 
			
		||||
 | 
			
		||||
	controller, cloud, informer := newServiceController(t, client)
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informer.Start(stopCh)
 | 
			
		||||
	go controller.Run(stopCh, 1)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	informer.Start(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx, 1)
 | 
			
		||||
 | 
			
		||||
	service := &corev1.Service{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -229,7 +229,7 @@ func Test_ServiceLoadBalancerEnableLoadBalancerClassThenUpdateLoadBalancerClass(
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
 | 
			
		||||
	service, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Error creating test service: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -239,7 +239,7 @@ func Test_ServiceLoadBalancerEnableLoadBalancerClassThenUpdateLoadBalancerClass(
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	service.Spec.LoadBalancerClass = utilpointer.StringPtr("test.com/update")
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Update(context.TODO(), service, metav1.UpdateOptions{})
 | 
			
		||||
	_, err = client.CoreV1().Services(ns.Name).Update(ctx, service, metav1.UpdateOptions{})
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		t.Fatal("Error updating test service load balancer class should throw error")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -408,9 +408,9 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
 | 
			
		||||
	_, _, kubeAPIServerCloseFn := framework.RunAnAPIServerUsingServer(controlPlaneConfig, apiServer, h)
 | 
			
		||||
 | 
			
		||||
	// Start the service account and service account token controllers
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	stop := func() {
 | 
			
		||||
		close(stopCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
		kubeAPIServerCloseFn()
 | 
			
		||||
		apiServer.Close()
 | 
			
		||||
	}
 | 
			
		||||
@@ -428,7 +428,7 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return rootClientset, clientConfig, stop, err
 | 
			
		||||
	}
 | 
			
		||||
	go tokenController.Run(1, stopCh)
 | 
			
		||||
	go tokenController.Run(1, ctx.Done())
 | 
			
		||||
 | 
			
		||||
	serviceAccountController, err := serviceaccountcontroller.NewServiceAccountsController(
 | 
			
		||||
		informers.Core().V1().ServiceAccounts(),
 | 
			
		||||
@@ -439,9 +439,9 @@ func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclie
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return rootClientset, clientConfig, stop, err
 | 
			
		||||
	}
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	externalInformers.Start(stopCh)
 | 
			
		||||
	go serviceAccountController.Run(5, stopCh)
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	externalInformers.Start(ctx.Done())
 | 
			
		||||
	go serviceAccountController.Run(ctx, 5)
 | 
			
		||||
 | 
			
		||||
	return rootClientset, clientConfig, stop, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -64,11 +64,11 @@ func TestStorageVersionGarbageCollection(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	controller := storageversiongc.NewStorageVersionGC(kubeclient, leaseInformer, storageVersionInformer)
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	go leaseInformer.Informer().Run(stopCh)
 | 
			
		||||
	go storageVersionInformer.Informer().Run(stopCh)
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	go leaseInformer.Informer().Run(ctx.Done())
 | 
			
		||||
	go storageVersionInformer.Informer().Run(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx)
 | 
			
		||||
 | 
			
		||||
	createTestAPIServerIdentityLease(t, kubeclient, idA)
 | 
			
		||||
	createTestAPIServerIdentityLease(t, kubeclient, idB)
 | 
			
		||||
 
 | 
			
		||||
@@ -141,10 +141,10 @@ func TestTTLAnnotations(t *testing.T) {
 | 
			
		||||
	nodeInformer := informers.Core().V1().Nodes()
 | 
			
		||||
	ttlc := ttl.NewTTLController(nodeInformer, testClient)
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	go nodeInformer.Informer().Run(stopCh)
 | 
			
		||||
	go ttlc.Run(1, stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	go nodeInformer.Informer().Run(ctx.Done())
 | 
			
		||||
	go ttlc.Run(ctx, 1)
 | 
			
		||||
 | 
			
		||||
	// Create 100 nodes all should have annotation equal to 0.
 | 
			
		||||
	createNodes(t, testClient, 0, 100)
 | 
			
		||||
 
 | 
			
		||||
@@ -210,7 +210,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
 | 
			
		||||
	waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 80*time.Second, "expected 0 pods in dsw after pod delete", 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func initCSIObjects(stopCh chan struct{}, informers clientgoinformers.SharedInformerFactory) {
 | 
			
		||||
func initCSIObjects(stopCh <-chan struct{}, informers clientgoinformers.SharedInformerFactory) {
 | 
			
		||||
	if utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
 | 
			
		||||
		go informers.Storage().V1().CSINodes().Informer().Run(stopCh)
 | 
			
		||||
	}
 | 
			
		||||
@@ -593,12 +593,12 @@ func TestPVCBoundWithADC(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// start controller loop
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	informers.WaitForCacheSync(stopCh)
 | 
			
		||||
	initCSIObjects(stopCh, informers)
 | 
			
		||||
	go ctrl.Run(stopCh)
 | 
			
		||||
	go pvCtrl.Run(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	informers.WaitForCacheSync(ctx.Done())
 | 
			
		||||
	initCSIObjects(ctx.Done(), informers)
 | 
			
		||||
	go ctrl.Run(ctx.Done())
 | 
			
		||||
	go pvCtrl.Run(ctx)
 | 
			
		||||
 | 
			
		||||
	waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4)
 | 
			
		||||
	// Give attachdetach controller enough time to populate pods into DSWP.
 | 
			
		||||
@@ -608,7 +608,7 @@ func TestPVCBoundWithADC(t *testing.T) {
 | 
			
		||||
		createPVForPVC(t, testClient, pvc)
 | 
			
		||||
	}
 | 
			
		||||
	waitForPodFuncInDSWP(t, ctrl.GetDesiredStateOfWorld(), 60*time.Second, "expected 4 pods in dsw after PVCs are bound", 4)
 | 
			
		||||
	close(stopCh)
 | 
			
		||||
	cancel()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Create PV for PVC, pv controller will bind them together.
 | 
			
		||||
 
 | 
			
		||||
@@ -119,10 +119,10 @@ func TestPersistentVolumeRecycler(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go ctrl.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go ctrl.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// This PV will be claimed, released, and recycled.
 | 
			
		||||
	pv := createPV("fake-pv-recycler", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRecycle)
 | 
			
		||||
@@ -174,10 +174,10 @@ func TestPersistentVolumeDeleter(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go ctrl.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go ctrl.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// This PV will be claimed, released, and deleted.
 | 
			
		||||
	pv := createPV("fake-pv-deleter", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimDelete)
 | 
			
		||||
@@ -234,10 +234,10 @@ func TestPersistentVolumeBindRace(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go ctrl.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go ctrl.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
 | 
			
		||||
	pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, "")
 | 
			
		||||
@@ -304,10 +304,10 @@ func TestPersistentVolumeClaimLabelSelector(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		err     error
 | 
			
		||||
@@ -385,10 +385,10 @@ func TestPersistentVolumeClaimLabelSelectorMatchExpressions(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		err     error
 | 
			
		||||
@@ -485,10 +485,10 @@ func TestPersistentVolumeMultiPVs(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	maxPVs := getObjectCount()
 | 
			
		||||
	pvs := make([]*v1.PersistentVolume, maxPVs)
 | 
			
		||||
@@ -575,10 +575,10 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	controllerStopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(controllerStopCh)
 | 
			
		||||
	go binder.Run(controllerStopCh)
 | 
			
		||||
	defer close(controllerStopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go binder.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	objCount := getObjectCount()
 | 
			
		||||
	pvs := make([]*v1.PersistentVolume, objCount)
 | 
			
		||||
@@ -788,10 +788,10 @@ func TestPersistentVolumeControllerStartup(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Start the controller when all PVs and PVCs are already saved in etcd
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go binder.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go binder.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// wait for at least two sync periods for changes. No volume should be
 | 
			
		||||
	// Released and no claim should be Lost during this time.
 | 
			
		||||
@@ -876,10 +876,10 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	testClient.StorageV1().StorageClasses().Create(context.TODO(), &storageClass, metav1.CreateOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go binder.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go binder.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	objCount := getObjectCount()
 | 
			
		||||
	pvcs := make([]*v1.PersistentVolumeClaim, objCount)
 | 
			
		||||
@@ -959,10 +959,10 @@ func TestPersistentVolumeMultiPVsDiffAccessModes(t *testing.T) {
 | 
			
		||||
	// non-namespaced objects (PersistenceVolumes).
 | 
			
		||||
	defer testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
	go controller.Run(stopCh)
 | 
			
		||||
	defer close(stopCh)
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.TODO())
 | 
			
		||||
	informers.Start(ctx.Done())
 | 
			
		||||
	go controller.Run(ctx)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// This PV will be claimed, released, and deleted
 | 
			
		||||
	pvRwo := createPV("pv-rwo", "/tmp/foo", "10G",
 | 
			
		||||
 
 | 
			
		||||
@@ -1004,7 +1004,7 @@ func TestCapacity(t *testing.T) {
 | 
			
		||||
// on provision failure.
 | 
			
		||||
func TestRescheduleProvisioning(t *testing.T) {
 | 
			
		||||
	// Set feature gates
 | 
			
		||||
	controllerCh := make(chan struct{})
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
 | 
			
		||||
	testCtx := initTestAPIServer(t, "reschedule-volume-provision", nil)
 | 
			
		||||
 | 
			
		||||
@@ -1012,7 +1012,7 @@ func TestRescheduleProvisioning(t *testing.T) {
 | 
			
		||||
	ns := testCtx.ns.Name
 | 
			
		||||
 | 
			
		||||
	defer func() {
 | 
			
		||||
		close(controllerCh)
 | 
			
		||||
		cancel()
 | 
			
		||||
		deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
 | 
			
		||||
		testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
		testCtx.closeFn()
 | 
			
		||||
@@ -1051,9 +1051,9 @@ func TestRescheduleProvisioning(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Start controller.
 | 
			
		||||
	go ctrl.Run(controllerCh)
 | 
			
		||||
	informerFactory.Start(controllerCh)
 | 
			
		||||
	informerFactory.WaitForCacheSync(controllerCh)
 | 
			
		||||
	go ctrl.Run(ctx)
 | 
			
		||||
	informerFactory.Start(ctx.Done())
 | 
			
		||||
	informerFactory.WaitForCacheSync(ctx.Done())
 | 
			
		||||
 | 
			
		||||
	// Validate that the annotation is removed by controller for provision reschedule.
 | 
			
		||||
	if err := waitForProvisionAnn(clientset, pvc, false); err != nil {
 | 
			
		||||
@@ -1062,18 +1062,18 @@ func TestRescheduleProvisioning(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
 | 
			
		||||
	textCtx := initTestSchedulerWithOptions(t, initTestAPIServer(t, nsName, nil), resyncPeriod)
 | 
			
		||||
	clientset := textCtx.clientSet
 | 
			
		||||
	ns := textCtx.ns.Name
 | 
			
		||||
	testCtx := initTestSchedulerWithOptions(t, initTestAPIServer(t, nsName, nil), resyncPeriod)
 | 
			
		||||
	clientset := testCtx.clientSet
 | 
			
		||||
	ns := testCtx.ns.Name
 | 
			
		||||
 | 
			
		||||
	ctrl, informerFactory, err := initPVController(t, textCtx, provisionDelaySeconds)
 | 
			
		||||
	ctrl, informerFactory, err := initPVController(t, testCtx, provisionDelaySeconds)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create PV controller: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go ctrl.Run(textCtx.ctx.Done())
 | 
			
		||||
	go ctrl.Run(testCtx.ctx)
 | 
			
		||||
	// Start informer factory after all controllers are configured and running.
 | 
			
		||||
	informerFactory.Start(textCtx.ctx.Done())
 | 
			
		||||
	informerFactory.WaitForCacheSync(textCtx.ctx.Done())
 | 
			
		||||
	informerFactory.Start(testCtx.ctx.Done())
 | 
			
		||||
	informerFactory.WaitForCacheSync(testCtx.ctx.Done())
 | 
			
		||||
 | 
			
		||||
	// Create shared objects
 | 
			
		||||
	// Create nodes
 | 
			
		||||
@@ -1094,11 +1094,11 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
 | 
			
		||||
	return &testConfig{
 | 
			
		||||
		client: clientset,
 | 
			
		||||
		ns:     ns,
 | 
			
		||||
		stop:   textCtx.ctx.Done(),
 | 
			
		||||
		stop:   testCtx.ctx.Done(),
 | 
			
		||||
		teardown: func() {
 | 
			
		||||
			klog.Infof("test cluster %q start to tear down", ns)
 | 
			
		||||
			deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
 | 
			
		||||
			cleanupTest(t, textCtx)
 | 
			
		||||
			cleanupTest(t, testCtx)
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -54,7 +54,7 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create PV controller: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	go ctrl.Run(textCtx.ctx.Done())
 | 
			
		||||
	go ctrl.Run(context.TODO())
 | 
			
		||||
 | 
			
		||||
	// Start informer factory after all controllers are configured and running.
 | 
			
		||||
	informerFactory.Start(textCtx.ctx.Done())
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user