mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #105377 from damemi/wire-contexts-apps
Wire contexts to Apps controllers
This commit is contained in:
		@@ -45,7 +45,7 @@ func startDaemonSetController(ctx context.Context, controllerContext ControllerC
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, true, fmt.Errorf("error creating DaemonSets controller: %v", err)
 | 
							return nil, true, fmt.Errorf("error creating DaemonSets controller: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	go dsc.Run(int(controllerContext.ComponentConfig.DaemonSetController.ConcurrentDaemonSetSyncs), ctx.Done())
 | 
						go dsc.Run(ctx, int(controllerContext.ComponentConfig.DaemonSetController.ConcurrentDaemonSetSyncs))
 | 
				
			||||||
	return nil, true, nil
 | 
						return nil, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -56,7 +56,7 @@ func startStatefulSetController(ctx context.Context, controllerContext Controlle
 | 
				
			|||||||
		controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
 | 
							controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
 | 
				
			||||||
		controllerContext.InformerFactory.Apps().V1().ControllerRevisions(),
 | 
							controllerContext.InformerFactory.Apps().V1().ControllerRevisions(),
 | 
				
			||||||
		controllerContext.ClientBuilder.ClientOrDie("statefulset-controller"),
 | 
							controllerContext.ClientBuilder.ClientOrDie("statefulset-controller"),
 | 
				
			||||||
	).Run(int(controllerContext.ComponentConfig.StatefulSetController.ConcurrentStatefulSetSyncs), ctx.Done())
 | 
						).Run(ctx, int(controllerContext.ComponentConfig.StatefulSetController.ConcurrentStatefulSetSyncs))
 | 
				
			||||||
	return nil, true, nil
 | 
						return nil, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -66,7 +66,7 @@ func startReplicaSetController(ctx context.Context, controllerContext Controller
 | 
				
			|||||||
		controllerContext.InformerFactory.Core().V1().Pods(),
 | 
							controllerContext.InformerFactory.Core().V1().Pods(),
 | 
				
			||||||
		controllerContext.ClientBuilder.ClientOrDie("replicaset-controller"),
 | 
							controllerContext.ClientBuilder.ClientOrDie("replicaset-controller"),
 | 
				
			||||||
		replicaset.BurstReplicas,
 | 
							replicaset.BurstReplicas,
 | 
				
			||||||
	).Run(int(controllerContext.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Done())
 | 
						).Run(ctx, int(controllerContext.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs))
 | 
				
			||||||
	return nil, true, nil
 | 
						return nil, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -80,6 +80,6 @@ func startDeploymentController(ctx context.Context, controllerContext Controller
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, true, fmt.Errorf("error creating Deployment controller: %v", err)
 | 
							return nil, true, fmt.Errorf("error creating Deployment controller: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	go dc.Run(int(controllerContext.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Done())
 | 
						go dc.Run(ctx, int(controllerContext.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs))
 | 
				
			||||||
	return nil, true, nil
 | 
						return nil, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -399,7 +399,7 @@ func startReplicationController(ctx context.Context, controllerContext Controlle
 | 
				
			|||||||
		controllerContext.InformerFactory.Core().V1().ReplicationControllers(),
 | 
							controllerContext.InformerFactory.Core().V1().ReplicationControllers(),
 | 
				
			||||||
		controllerContext.ClientBuilder.ClientOrDie("replication-controller"),
 | 
							controllerContext.ClientBuilder.ClientOrDie("replication-controller"),
 | 
				
			||||||
		replicationcontroller.BurstReplicas,
 | 
							replicationcontroller.BurstReplicas,
 | 
				
			||||||
	).Run(int(controllerContext.ComponentConfig.ReplicationController.ConcurrentRCSyncs), ctx.Done())
 | 
						).Run(ctx, int(controllerContext.ComponentConfig.ReplicationController.ConcurrentRCSyncs))
 | 
				
			||||||
	return nil, true, nil
 | 
						return nil, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package controller
 | 
					package controller
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
@@ -38,13 +39,13 @@ type BaseControllerRefManager struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	canAdoptErr  error
 | 
						canAdoptErr  error
 | 
				
			||||||
	canAdoptOnce sync.Once
 | 
						canAdoptOnce sync.Once
 | 
				
			||||||
	CanAdoptFunc func() error
 | 
						CanAdoptFunc func(ctx context.Context) error
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (m *BaseControllerRefManager) CanAdopt() error {
 | 
					func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error {
 | 
				
			||||||
	m.canAdoptOnce.Do(func() {
 | 
						m.canAdoptOnce.Do(func() {
 | 
				
			||||||
		if m.CanAdoptFunc != nil {
 | 
							if m.CanAdoptFunc != nil {
 | 
				
			||||||
			m.canAdoptErr = m.CanAdoptFunc()
 | 
								m.canAdoptErr = m.CanAdoptFunc(ctx)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	return m.canAdoptErr
 | 
						return m.canAdoptErr
 | 
				
			||||||
@@ -65,7 +66,7 @@ func (m *BaseControllerRefManager) CanAdopt() error {
 | 
				
			|||||||
// own the object.
 | 
					// own the object.
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
// No reconciliation will be attempted if the controller is being deleted.
 | 
					// No reconciliation will be attempted if the controller is being deleted.
 | 
				
			||||||
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
 | 
					func (m *BaseControllerRefManager) ClaimObject(ctx context.Context, obj metav1.Object, match func(metav1.Object) bool, adopt func(context.Context, metav1.Object) error, release func(metav1.Object) error) (bool, error) {
 | 
				
			||||||
	controllerRef := metav1.GetControllerOfNoCopy(obj)
 | 
						controllerRef := metav1.GetControllerOfNoCopy(obj)
 | 
				
			||||||
	if controllerRef != nil {
 | 
						if controllerRef != nil {
 | 
				
			||||||
		if controllerRef.UID != m.Controller.GetUID() {
 | 
							if controllerRef.UID != m.Controller.GetUID() {
 | 
				
			||||||
@@ -107,7 +108,7 @@ func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(met
 | 
				
			|||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Selector matches. Try to adopt.
 | 
						// Selector matches. Try to adopt.
 | 
				
			||||||
	if err := adopt(obj); err != nil {
 | 
						if err := adopt(ctx, obj); err != nil {
 | 
				
			||||||
		// If the pod no longer exists, ignore the error.
 | 
							// If the pod no longer exists, ignore the error.
 | 
				
			||||||
		if errors.IsNotFound(err) {
 | 
							if errors.IsNotFound(err) {
 | 
				
			||||||
			return false, nil
 | 
								return false, nil
 | 
				
			||||||
@@ -143,7 +144,7 @@ func NewPodControllerRefManager(
 | 
				
			|||||||
	controller metav1.Object,
 | 
						controller metav1.Object,
 | 
				
			||||||
	selector labels.Selector,
 | 
						selector labels.Selector,
 | 
				
			||||||
	controllerKind schema.GroupVersionKind,
 | 
						controllerKind schema.GroupVersionKind,
 | 
				
			||||||
	canAdopt func() error,
 | 
						canAdopt func(ctx context.Context) error,
 | 
				
			||||||
	finalizers ...string,
 | 
						finalizers ...string,
 | 
				
			||||||
) *PodControllerRefManager {
 | 
					) *PodControllerRefManager {
 | 
				
			||||||
	return &PodControllerRefManager{
 | 
						return &PodControllerRefManager{
 | 
				
			||||||
@@ -173,7 +174,7 @@ func NewPodControllerRefManager(
 | 
				
			|||||||
//
 | 
					//
 | 
				
			||||||
// If the error is nil, either the reconciliation succeeded, or no
 | 
					// If the error is nil, either the reconciliation succeeded, or no
 | 
				
			||||||
// reconciliation was necessary. The list of Pods that you now own is returned.
 | 
					// reconciliation was necessary. The list of Pods that you now own is returned.
 | 
				
			||||||
func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
 | 
					func (m *PodControllerRefManager) ClaimPods(ctx context.Context, pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
 | 
				
			||||||
	var claimed []*v1.Pod
 | 
						var claimed []*v1.Pod
 | 
				
			||||||
	var errlist []error
 | 
						var errlist []error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -190,15 +191,15 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		return true
 | 
							return true
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	adopt := func(obj metav1.Object) error {
 | 
						adopt := func(ctx context.Context, obj metav1.Object) error {
 | 
				
			||||||
		return m.AdoptPod(obj.(*v1.Pod))
 | 
							return m.AdoptPod(ctx, obj.(*v1.Pod))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	release := func(obj metav1.Object) error {
 | 
						release := func(obj metav1.Object) error {
 | 
				
			||||||
		return m.ReleasePod(obj.(*v1.Pod))
 | 
							return m.ReleasePod(obj.(*v1.Pod))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, pod := range pods {
 | 
						for _, pod := range pods {
 | 
				
			||||||
		ok, err := m.ClaimObject(pod, match, adopt, release)
 | 
							ok, err := m.ClaimObject(ctx, pod, match, adopt, release)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			errlist = append(errlist, err)
 | 
								errlist = append(errlist, err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
@@ -212,8 +213,8 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// AdoptPod sends a patch to take control of the pod. It returns the error if
 | 
					// AdoptPod sends a patch to take control of the pod. It returns the error if
 | 
				
			||||||
// the patching fails.
 | 
					// the patching fails.
 | 
				
			||||||
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
 | 
					func (m *PodControllerRefManager) AdoptPod(ctx context.Context, pod *v1.Pod) error {
 | 
				
			||||||
	if err := m.CanAdopt(); err != nil {
 | 
						if err := m.CanAdopt(ctx); err != nil {
 | 
				
			||||||
		return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
 | 
							return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Note that ValidateOwnerReferences() will reject this patch if another
 | 
						// Note that ValidateOwnerReferences() will reject this patch if another
 | 
				
			||||||
@@ -283,7 +284,7 @@ func NewReplicaSetControllerRefManager(
 | 
				
			|||||||
	controller metav1.Object,
 | 
						controller metav1.Object,
 | 
				
			||||||
	selector labels.Selector,
 | 
						selector labels.Selector,
 | 
				
			||||||
	controllerKind schema.GroupVersionKind,
 | 
						controllerKind schema.GroupVersionKind,
 | 
				
			||||||
	canAdopt func() error,
 | 
						canAdopt func(ctx context.Context) error,
 | 
				
			||||||
) *ReplicaSetControllerRefManager {
 | 
					) *ReplicaSetControllerRefManager {
 | 
				
			||||||
	return &ReplicaSetControllerRefManager{
 | 
						return &ReplicaSetControllerRefManager{
 | 
				
			||||||
		BaseControllerRefManager: BaseControllerRefManager{
 | 
							BaseControllerRefManager: BaseControllerRefManager{
 | 
				
			||||||
@@ -309,22 +310,22 @@ func NewReplicaSetControllerRefManager(
 | 
				
			|||||||
// If the error is nil, either the reconciliation succeeded, or no
 | 
					// If the error is nil, either the reconciliation succeeded, or no
 | 
				
			||||||
// reconciliation was necessary. The list of ReplicaSets that you now own is
 | 
					// reconciliation was necessary. The list of ReplicaSets that you now own is
 | 
				
			||||||
// returned.
 | 
					// returned.
 | 
				
			||||||
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
 | 
					func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(ctx context.Context, sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
 | 
				
			||||||
	var claimed []*apps.ReplicaSet
 | 
						var claimed []*apps.ReplicaSet
 | 
				
			||||||
	var errlist []error
 | 
						var errlist []error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	match := func(obj metav1.Object) bool {
 | 
						match := func(obj metav1.Object) bool {
 | 
				
			||||||
		return m.Selector.Matches(labels.Set(obj.GetLabels()))
 | 
							return m.Selector.Matches(labels.Set(obj.GetLabels()))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	adopt := func(obj metav1.Object) error {
 | 
						adopt := func(ctx context.Context, obj metav1.Object) error {
 | 
				
			||||||
		return m.AdoptReplicaSet(obj.(*apps.ReplicaSet))
 | 
							return m.AdoptReplicaSet(ctx, obj.(*apps.ReplicaSet))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	release := func(obj metav1.Object) error {
 | 
						release := func(obj metav1.Object) error {
 | 
				
			||||||
		return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet))
 | 
							return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, rs := range sets {
 | 
						for _, rs := range sets {
 | 
				
			||||||
		ok, err := m.ClaimObject(rs, match, adopt, release)
 | 
							ok, err := m.ClaimObject(ctx, rs, match, adopt, release)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			errlist = append(errlist, err)
 | 
								errlist = append(errlist, err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
@@ -338,8 +339,8 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSe
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
 | 
					// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
 | 
				
			||||||
// the error if the patching fails.
 | 
					// the error if the patching fails.
 | 
				
			||||||
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error {
 | 
					func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(ctx context.Context, rs *apps.ReplicaSet) error {
 | 
				
			||||||
	if err := m.CanAdopt(); err != nil {
 | 
						if err := m.CanAdopt(ctx); err != nil {
 | 
				
			||||||
		return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
 | 
							return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Note that ValidateOwnerReferences() will reject this patch if another
 | 
						// Note that ValidateOwnerReferences() will reject this patch if another
 | 
				
			||||||
@@ -381,9 +382,9 @@ func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.Repl
 | 
				
			|||||||
//
 | 
					//
 | 
				
			||||||
// The CanAdopt() function calls getObject() to fetch the latest value,
 | 
					// The CanAdopt() function calls getObject() to fetch the latest value,
 | 
				
			||||||
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
 | 
					// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
 | 
				
			||||||
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
 | 
					func RecheckDeletionTimestamp(getObject func(context.Context) (metav1.Object, error)) func(context.Context) error {
 | 
				
			||||||
	return func() error {
 | 
						return func(ctx context.Context) error {
 | 
				
			||||||
		obj, err := getObject()
 | 
							obj, err := getObject(ctx)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
 | 
								return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -421,7 +422,7 @@ func NewControllerRevisionControllerRefManager(
 | 
				
			|||||||
	controller metav1.Object,
 | 
						controller metav1.Object,
 | 
				
			||||||
	selector labels.Selector,
 | 
						selector labels.Selector,
 | 
				
			||||||
	controllerKind schema.GroupVersionKind,
 | 
						controllerKind schema.GroupVersionKind,
 | 
				
			||||||
	canAdopt func() error,
 | 
						canAdopt func(ctx context.Context) error,
 | 
				
			||||||
) *ControllerRevisionControllerRefManager {
 | 
					) *ControllerRevisionControllerRefManager {
 | 
				
			||||||
	return &ControllerRevisionControllerRefManager{
 | 
						return &ControllerRevisionControllerRefManager{
 | 
				
			||||||
		BaseControllerRefManager: BaseControllerRefManager{
 | 
							BaseControllerRefManager: BaseControllerRefManager{
 | 
				
			||||||
@@ -447,22 +448,22 @@ func NewControllerRevisionControllerRefManager(
 | 
				
			|||||||
// If the error is nil, either the reconciliation succeeded, or no
 | 
					// If the error is nil, either the reconciliation succeeded, or no
 | 
				
			||||||
// reconciliation was necessary. The list of ControllerRevisions that you now own is
 | 
					// reconciliation was necessary. The list of ControllerRevisions that you now own is
 | 
				
			||||||
// returned.
 | 
					// returned.
 | 
				
			||||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
 | 
					func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(ctx context.Context, histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
 | 
				
			||||||
	var claimed []*apps.ControllerRevision
 | 
						var claimed []*apps.ControllerRevision
 | 
				
			||||||
	var errlist []error
 | 
						var errlist []error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	match := func(obj metav1.Object) bool {
 | 
						match := func(obj metav1.Object) bool {
 | 
				
			||||||
		return m.Selector.Matches(labels.Set(obj.GetLabels()))
 | 
							return m.Selector.Matches(labels.Set(obj.GetLabels()))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	adopt := func(obj metav1.Object) error {
 | 
						adopt := func(ctx context.Context, obj metav1.Object) error {
 | 
				
			||||||
		return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
 | 
							return m.AdoptControllerRevision(ctx, obj.(*apps.ControllerRevision))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	release := func(obj metav1.Object) error {
 | 
						release := func(obj metav1.Object) error {
 | 
				
			||||||
		return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
 | 
							return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, h := range histories {
 | 
						for _, h := range histories {
 | 
				
			||||||
		ok, err := m.ClaimObject(h, match, adopt, release)
 | 
							ok, err := m.ClaimObject(ctx, h, match, adopt, release)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			errlist = append(errlist, err)
 | 
								errlist = append(errlist, err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
@@ -476,8 +477,8 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
 | 
					// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
 | 
				
			||||||
// the patching fails.
 | 
					// the patching fails.
 | 
				
			||||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
 | 
					func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
 | 
				
			||||||
	if err := m.CanAdopt(); err != nil {
 | 
						if err := m.CanAdopt(ctx); err != nil {
 | 
				
			||||||
		return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
 | 
							return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Note that ValidateOwnerReferences() will reject this patch if another
 | 
						// Note that ValidateOwnerReferences() will reject this patch if another
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package controller
 | 
					package controller
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -73,7 +74,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
				&v1.ReplicationController{},
 | 
									&v1.ReplicationController{},
 | 
				
			||||||
				productionLabelSelector,
 | 
									productionLabelSelector,
 | 
				
			||||||
				controllerKind,
 | 
									controllerKind,
 | 
				
			||||||
				func() error { return nil }),
 | 
									func(ctx context.Context) error { return nil }),
 | 
				
			||||||
			pods:    []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", testLabel, nil)},
 | 
								pods:    []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", testLabel, nil)},
 | 
				
			||||||
			claimed: []*v1.Pod{newPod("pod1", productionLabel, nil)},
 | 
								claimed: []*v1.Pod{newPod("pod1", productionLabel, nil)},
 | 
				
			||||||
			patches: 1,
 | 
								patches: 1,
 | 
				
			||||||
@@ -89,7 +90,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
					&controller,
 | 
										&controller,
 | 
				
			||||||
					productionLabelSelector,
 | 
										productionLabelSelector,
 | 
				
			||||||
					controllerKind,
 | 
										controllerKind,
 | 
				
			||||||
					func() error { return nil }),
 | 
										func(ctx context.Context) error { return nil }),
 | 
				
			||||||
				pods:    []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", productionLabel, nil)},
 | 
									pods:    []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", productionLabel, nil)},
 | 
				
			||||||
				claimed: nil,
 | 
									claimed: nil,
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -105,7 +106,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
					&controller,
 | 
										&controller,
 | 
				
			||||||
					productionLabelSelector,
 | 
										productionLabelSelector,
 | 
				
			||||||
					controllerKind,
 | 
										controllerKind,
 | 
				
			||||||
					func() error { return nil }),
 | 
										func(ctx context.Context) error { return nil }),
 | 
				
			||||||
				pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, nil)},
 | 
									pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, nil)},
 | 
				
			||||||
				claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
 | 
									claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -121,7 +122,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
					&controller,
 | 
										&controller,
 | 
				
			||||||
					productionLabelSelector,
 | 
										productionLabelSelector,
 | 
				
			||||||
					controllerKind,
 | 
										controllerKind,
 | 
				
			||||||
					func() error { return nil }),
 | 
										func(ctx context.Context) error { return nil }),
 | 
				
			||||||
				pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, &controller2)},
 | 
									pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, &controller2)},
 | 
				
			||||||
				claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
 | 
									claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -135,7 +136,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
					&controller,
 | 
										&controller,
 | 
				
			||||||
					productionLabelSelector,
 | 
										productionLabelSelector,
 | 
				
			||||||
					controllerKind,
 | 
										controllerKind,
 | 
				
			||||||
					func() error { return nil }),
 | 
										func(ctx context.Context) error { return nil }),
 | 
				
			||||||
				pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller)},
 | 
									pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller)},
 | 
				
			||||||
				claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
 | 
									claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
 | 
				
			||||||
				patches: 1,
 | 
									patches: 1,
 | 
				
			||||||
@@ -156,7 +157,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
					&controller,
 | 
										&controller,
 | 
				
			||||||
					productionLabelSelector,
 | 
										productionLabelSelector,
 | 
				
			||||||
					controllerKind,
 | 
										controllerKind,
 | 
				
			||||||
					func() error { return nil }),
 | 
										func(ctx context.Context) error { return nil }),
 | 
				
			||||||
				pods:    []*v1.Pod{podToDelete1, podToDelete2},
 | 
									pods:    []*v1.Pod{podToDelete1, podToDelete2},
 | 
				
			||||||
				claimed: []*v1.Pod{podToDelete1},
 | 
									claimed: []*v1.Pod{podToDelete1},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -170,7 +171,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
					&controller,
 | 
										&controller,
 | 
				
			||||||
					productionLabelSelector,
 | 
										productionLabelSelector,
 | 
				
			||||||
					controllerKind,
 | 
										controllerKind,
 | 
				
			||||||
					func() error { return nil },
 | 
										func(ctx context.Context) error { return nil },
 | 
				
			||||||
					"foo-finalizer", "bar-finalizer"),
 | 
										"foo-finalizer", "bar-finalizer"),
 | 
				
			||||||
				pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller), newPod("pod3", productionLabel, nil)},
 | 
									pods:    []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller), newPod("pod3", productionLabel, nil)},
 | 
				
			||||||
				claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod3", productionLabel, nil)},
 | 
									claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod3", productionLabel, nil)},
 | 
				
			||||||
@@ -180,7 +181,7 @@ func TestClaimPods(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	for _, test := range tests {
 | 
						for _, test := range tests {
 | 
				
			||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			claimed, err := test.manager.ClaimPods(test.pods)
 | 
								claimed, err := test.manager.ClaimPods(context.TODO(), test.pods)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Fatalf("Unexpected error: %v", err)
 | 
									t.Fatalf("Unexpected error: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -94,7 +94,7 @@ type DaemonSetsController struct {
 | 
				
			|||||||
	burstReplicas int
 | 
						burstReplicas int
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// To allow injection of syncDaemonSet for testing.
 | 
						// To allow injection of syncDaemonSet for testing.
 | 
				
			||||||
	syncHandler func(dsKey string) error
 | 
						syncHandler func(ctx context.Context, dsKey string) error
 | 
				
			||||||
	// used for unit testing
 | 
						// used for unit testing
 | 
				
			||||||
	enqueueDaemonSet func(ds *apps.DaemonSet)
 | 
						enqueueDaemonSet func(ds *apps.DaemonSet)
 | 
				
			||||||
	// A TTLCache of pod creates/deletes each ds expects to see
 | 
						// A TTLCache of pod creates/deletes each ds expects to see
 | 
				
			||||||
@@ -277,40 +277,40 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run begins watching and syncing daemon sets.
 | 
					// Run begins watching and syncing daemon sets.
 | 
				
			||||||
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
 | 
					func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
 | 
				
			||||||
	defer utilruntime.HandleCrash()
 | 
						defer utilruntime.HandleCrash()
 | 
				
			||||||
	defer dsc.queue.ShutDown()
 | 
						defer dsc.queue.ShutDown()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	klog.Infof("Starting daemon sets controller")
 | 
						klog.Infof("Starting daemon sets controller")
 | 
				
			||||||
	defer klog.Infof("Shutting down daemon sets controller")
 | 
						defer klog.Infof("Shutting down daemon sets controller")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !cache.WaitForNamedCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
 | 
						if !cache.WaitForNamedCacheSync("daemon sets", ctx.Done(), dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := 0; i < workers; i++ {
 | 
						for i := 0; i < workers; i++ {
 | 
				
			||||||
		go wait.Until(dsc.runWorker, time.Second, stopCh)
 | 
							go wait.UntilWithContext(ctx, dsc.runWorker, time.Second)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go wait.Until(dsc.failedPodsBackoff.GC, BackoffGCInterval, stopCh)
 | 
						go wait.Until(dsc.failedPodsBackoff.GC, BackoffGCInterval, ctx.Done())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	<-stopCh
 | 
						<-ctx.Done()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dsc *DaemonSetsController) runWorker() {
 | 
					func (dsc *DaemonSetsController) runWorker(ctx context.Context) {
 | 
				
			||||||
	for dsc.processNextWorkItem() {
 | 
						for dsc.processNextWorkItem(ctx) {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// processNextWorkItem deals with one key off the queue.  It returns false when it's time to quit.
 | 
					// processNextWorkItem deals with one key off the queue.  It returns false when it's time to quit.
 | 
				
			||||||
func (dsc *DaemonSetsController) processNextWorkItem() bool {
 | 
					func (dsc *DaemonSetsController) processNextWorkItem(ctx context.Context) bool {
 | 
				
			||||||
	dsKey, quit := dsc.queue.Get()
 | 
						dsKey, quit := dsc.queue.Get()
 | 
				
			||||||
	if quit {
 | 
						if quit {
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	defer dsc.queue.Done(dsKey)
 | 
						defer dsc.queue.Done(dsKey)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err := dsc.syncHandler(dsKey.(string))
 | 
						err := dsc.syncHandler(ctx, dsKey.(string))
 | 
				
			||||||
	if err == nil {
 | 
						if err == nil {
 | 
				
			||||||
		dsc.queue.Forget(dsKey)
 | 
							dsc.queue.Forget(dsKey)
 | 
				
			||||||
		return true
 | 
							return true
 | 
				
			||||||
@@ -711,7 +711,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
 | 
				
			|||||||
// This also reconciles ControllerRef by adopting/orphaning.
 | 
					// This also reconciles ControllerRef by adopting/orphaning.
 | 
				
			||||||
// Note that returned Pods are pointers to objects in the cache.
 | 
					// Note that returned Pods are pointers to objects in the cache.
 | 
				
			||||||
// If you want to modify one, you need to deep-copy it first.
 | 
					// If you want to modify one, you need to deep-copy it first.
 | 
				
			||||||
func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, error) {
 | 
					func (dsc *DaemonSetsController) getDaemonPods(ctx context.Context, ds *apps.DaemonSet) ([]*v1.Pod, error) {
 | 
				
			||||||
	selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
 | 
						selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
@@ -725,8 +725,8 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	// If any adoptions are attempted, we should first recheck for deletion with
 | 
						// If any adoptions are attempted, we should first recheck for deletion with
 | 
				
			||||||
	// an uncached quorum read sometime after listing Pods (see #42639).
 | 
						// an uncached quorum read sometime after listing Pods (see #42639).
 | 
				
			||||||
	dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
 | 
						dsNotDeleted := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
 | 
				
			||||||
		fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
 | 
							fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -738,15 +738,15 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Use ControllerRefManager to adopt/orphan as needed.
 | 
						// Use ControllerRefManager to adopt/orphan as needed.
 | 
				
			||||||
	cm := controller.NewPodControllerRefManager(dsc.podControl, ds, selector, controllerKind, dsNotDeleted)
 | 
						cm := controller.NewPodControllerRefManager(dsc.podControl, ds, selector, controllerKind, dsNotDeleted)
 | 
				
			||||||
	return cm.ClaimPods(pods)
 | 
						return cm.ClaimPods(ctx, pods)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) created for the nodes.
 | 
					// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) created for the nodes.
 | 
				
			||||||
// This also reconciles ControllerRef by adopting/orphaning.
 | 
					// This also reconciles ControllerRef by adopting/orphaning.
 | 
				
			||||||
// Note that returned Pods are pointers to objects in the cache.
 | 
					// Note that returned Pods are pointers to objects in the cache.
 | 
				
			||||||
// If you want to modify one, you need to deep-copy it first.
 | 
					// If you want to modify one, you need to deep-copy it first.
 | 
				
			||||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
 | 
					func (dsc *DaemonSetsController) getNodesToDaemonPods(ctx context.Context, ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
 | 
				
			||||||
	claimedPods, err := dsc.getDaemonPods(ds)
 | 
						claimedPods, err := dsc.getDaemonPods(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -910,9 +910,9 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
 | 
				
			|||||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
 | 
					// After figuring out which nodes should run a Pod of ds but not yet running one and
 | 
				
			||||||
// which nodes should not run a Pod of ds but currently running one, it calls function
 | 
					// which nodes should not run a Pod of ds but currently running one, it calls function
 | 
				
			||||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
 | 
					// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
 | 
				
			||||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
 | 
					func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
 | 
				
			||||||
	// Find out the pods which are created for the nodes by DaemonSet.
 | 
						// Find out the pods which are created for the nodes by DaemonSet.
 | 
				
			||||||
	nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
 | 
						nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
							return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1053,7 +1053,17 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
 | 
				
			|||||||
	return utilerrors.NewAggregate(errors)
 | 
						return utilerrors.NewAggregate(errors)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int, updateObservedGen bool) error {
 | 
					func storeDaemonSetStatus(
 | 
				
			||||||
 | 
						ctx context.Context,
 | 
				
			||||||
 | 
						dsClient unversionedapps.DaemonSetInterface,
 | 
				
			||||||
 | 
						ds *apps.DaemonSet, desiredNumberScheduled,
 | 
				
			||||||
 | 
						currentNumberScheduled,
 | 
				
			||||||
 | 
						numberMisscheduled,
 | 
				
			||||||
 | 
						numberReady,
 | 
				
			||||||
 | 
						updatedNumberScheduled,
 | 
				
			||||||
 | 
						numberAvailable,
 | 
				
			||||||
 | 
						numberUnavailable int,
 | 
				
			||||||
 | 
						updateObservedGen bool) error {
 | 
				
			||||||
	if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
 | 
						if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
 | 
				
			||||||
		int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
 | 
							int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
 | 
				
			||||||
		int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
 | 
							int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
 | 
				
			||||||
@@ -1080,7 +1090,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
 | 
				
			|||||||
		toUpdate.Status.NumberAvailable = int32(numberAvailable)
 | 
							toUpdate.Status.NumberAvailable = int32(numberAvailable)
 | 
				
			||||||
		toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
 | 
							toUpdate.Status.NumberUnavailable = int32(numberUnavailable)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if _, updateErr = dsClient.UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}); updateErr == nil {
 | 
							if _, updateErr = dsClient.UpdateStatus(ctx, toUpdate, metav1.UpdateOptions{}); updateErr == nil {
 | 
				
			||||||
			return nil
 | 
								return nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1089,7 +1099,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
 | 
				
			|||||||
			break
 | 
								break
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// Update the set with the latest resource version for the next poll
 | 
							// Update the set with the latest resource version for the next poll
 | 
				
			||||||
		if toUpdate, getErr = dsClient.Get(context.TODO(), ds.Name, metav1.GetOptions{}); getErr != nil {
 | 
							if toUpdate, getErr = dsClient.Get(ctx, ds.Name, metav1.GetOptions{}); getErr != nil {
 | 
				
			||||||
			// If the GET fails we can't trust status.Replicas anymore. This error
 | 
								// If the GET fails we can't trust status.Replicas anymore. This error
 | 
				
			||||||
			// is bound to be more interesting than the update failure.
 | 
								// is bound to be more interesting than the update failure.
 | 
				
			||||||
			return getErr
 | 
								return getErr
 | 
				
			||||||
@@ -1098,9 +1108,9 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
 | 
				
			|||||||
	return updateErr
 | 
						return updateErr
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
 | 
					func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
 | 
				
			||||||
	klog.V(4).Infof("Updating daemon set status")
 | 
						klog.V(4).Infof("Updating daemon set status")
 | 
				
			||||||
	nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
 | 
						nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
							return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1143,7 +1153,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeL
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	numberUnavailable := desiredNumberScheduled - numberAvailable
 | 
						numberUnavailable := desiredNumberScheduled - numberAvailable
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
 | 
						err = storeDaemonSetStatus(ctx, dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
 | 
							return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1155,7 +1165,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, nodeL
 | 
				
			|||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
 | 
					func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string) error {
 | 
				
			||||||
	startTime := dsc.failedPodsBackoff.Clock.Now()
 | 
						startTime := dsc.failedPodsBackoff.Clock.Now()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
@@ -1208,7 +1218,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Construct histories of the DaemonSet, and get the hash of current history
 | 
						// Construct histories of the DaemonSet, and get the hash of current history
 | 
				
			||||||
	cur, old, err := dsc.constructHistory(ds)
 | 
						cur, old, err := dsc.constructHistory(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
 | 
							return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1216,10 +1226,10 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	if !dsc.expectations.SatisfiedExpectations(dsKey) {
 | 
						if !dsc.expectations.SatisfiedExpectations(dsKey) {
 | 
				
			||||||
		// Only update status. Don't raise observedGeneration since controller didn't process object of that generation.
 | 
							// Only update status. Don't raise observedGeneration since controller didn't process object of that generation.
 | 
				
			||||||
		return dsc.updateDaemonSetStatus(ds, nodeList, hash, false)
 | 
							return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, false)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = dsc.manage(ds, nodeList, hash)
 | 
						err = dsc.manage(ctx, ds, nodeList, hash)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1229,19 +1239,19 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
 | 
				
			|||||||
		switch ds.Spec.UpdateStrategy.Type {
 | 
							switch ds.Spec.UpdateStrategy.Type {
 | 
				
			||||||
		case apps.OnDeleteDaemonSetStrategyType:
 | 
							case apps.OnDeleteDaemonSetStrategyType:
 | 
				
			||||||
		case apps.RollingUpdateDaemonSetStrategyType:
 | 
							case apps.RollingUpdateDaemonSetStrategyType:
 | 
				
			||||||
			err = dsc.rollingUpdate(ds, nodeList, hash)
 | 
								err = dsc.rollingUpdate(ctx, ds, nodeList, hash)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = dsc.cleanupHistory(ds, old)
 | 
						err = dsc.cleanupHistory(ctx, ds, old)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
 | 
							return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return dsc.updateDaemonSetStatus(ds, nodeList, hash, true)
 | 
						return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
 | 
					// nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -383,7 +383,7 @@ func expectSyncDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.
 | 
				
			|||||||
		t.Fatal("could not get key for daemon")
 | 
							t.Fatal("could not get key for daemon")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = manager.syncHandler(key)
 | 
						err = manager.syncHandler(context.TODO(), key)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Log(err)
 | 
							t.Log(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -547,7 +547,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// create of DS adds to queue, processes
 | 
						// create of DS adds to queue, processes
 | 
				
			||||||
	waitForQueueLength(1, "created DS")
 | 
						waitForQueueLength(1, "created DS")
 | 
				
			||||||
	ok := dsc.processNextWorkItem()
 | 
						ok := dsc.processNextWorkItem(context.TODO())
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		t.Fatal("queue is shutting down")
 | 
							t.Fatal("queue is shutting down")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -576,7 +576,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// process updates DS, update adds to queue
 | 
						// process updates DS, update adds to queue
 | 
				
			||||||
	waitForQueueLength(1, "updated DS")
 | 
						waitForQueueLength(1, "updated DS")
 | 
				
			||||||
	ok = dsc.processNextWorkItem()
 | 
						ok = dsc.processNextWorkItem(context.TODO())
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		t.Fatal("queue is shutting down")
 | 
							t.Fatal("queue is shutting down")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -624,7 +624,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	waitForQueueLength(1, "recreated DS")
 | 
						waitForQueueLength(1, "recreated DS")
 | 
				
			||||||
	ok = dsc.processNextWorkItem()
 | 
						ok = dsc.processNextWorkItem(context.TODO())
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		t.Fatal("Queue is shutting down!")
 | 
							t.Fatal("Queue is shutting down!")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -2797,7 +2797,7 @@ func TestGetNodesToDaemonPods(t *testing.T) {
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds)
 | 
								nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Fatalf("getNodesToDaemonPods() error: %v", err)
 | 
									t.Fatalf("getNodesToDaemonPods() error: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -3552,7 +3552,7 @@ func TestStoreDaemonSetStatus(t *testing.T) {
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
				return true, ds, nil
 | 
									return true, ds, nil
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
			if err := storeDaemonSetStatus(fakeClient.AppsV1().DaemonSets("default"), ds, 2, 2, 2, 2, 2, 2, 2, true); err != tt.expectedError {
 | 
								if err := storeDaemonSetStatus(context.TODO(), fakeClient.AppsV1().DaemonSets("default"), ds, 2, 2, 2, 2, 2, 2, 2, true); err != tt.expectedError {
 | 
				
			||||||
				t.Errorf("storeDaemonSetStatus() got %v, expected %v", err, tt.expectedError)
 | 
									t.Errorf("storeDaemonSetStatus() got %v, expected %v", err, tt.expectedError)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if getCalled != tt.expectedGetCalled {
 | 
								if getCalled != tt.expectedGetCalled {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -40,8 +40,8 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// rollingUpdate identifies the set of old pods to delete, or additional pods to create on nodes,
 | 
					// rollingUpdate identifies the set of old pods to delete, or additional pods to create on nodes,
 | 
				
			||||||
// remaining within the constraints imposed by the update strategy.
 | 
					// remaining within the constraints imposed by the update strategy.
 | 
				
			||||||
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
 | 
					func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
 | 
				
			||||||
	nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
 | 
						nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
							return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -234,10 +234,10 @@ func findUpdatedPodsOnNode(ds *apps.DaemonSet, podsOnNode []*v1.Pod, hash string
 | 
				
			|||||||
// constructHistory finds all histories controlled by the given DaemonSet, and
 | 
					// constructHistory finds all histories controlled by the given DaemonSet, and
 | 
				
			||||||
// update current history revision number, or create current history if need to.
 | 
					// update current history revision number, or create current history if need to.
 | 
				
			||||||
// It also deduplicates current history, and adds missing unique labels to existing histories.
 | 
					// It also deduplicates current history, and adds missing unique labels to existing histories.
 | 
				
			||||||
func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
 | 
					func (dsc *DaemonSetsController) constructHistory(ctx context.Context, ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
 | 
				
			||||||
	var histories []*apps.ControllerRevision
 | 
						var histories []*apps.ControllerRevision
 | 
				
			||||||
	var currentHistories []*apps.ControllerRevision
 | 
						var currentHistories []*apps.ControllerRevision
 | 
				
			||||||
	histories, err = dsc.controlledHistories(ds)
 | 
						histories, err = dsc.controlledHistories(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, nil, err
 | 
							return nil, nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -247,7 +247,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
 | 
				
			|||||||
		if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
 | 
							if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
 | 
				
			||||||
			toUpdate := history.DeepCopy()
 | 
								toUpdate := history.DeepCopy()
 | 
				
			||||||
			toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
 | 
								toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
 | 
				
			||||||
			history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
 | 
								history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return nil, nil, err
 | 
									return nil, nil, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -269,12 +269,12 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
 | 
				
			|||||||
	switch len(currentHistories) {
 | 
						switch len(currentHistories) {
 | 
				
			||||||
	case 0:
 | 
						case 0:
 | 
				
			||||||
		// Create a new history if the current one isn't found
 | 
							// Create a new history if the current one isn't found
 | 
				
			||||||
		cur, err = dsc.snapshot(ds, currRevision)
 | 
							cur, err = dsc.snapshot(ctx, ds, currRevision)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, nil, err
 | 
								return nil, nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		cur, err = dsc.dedupCurHistories(ds, currentHistories)
 | 
							cur, err = dsc.dedupCurHistories(ctx, ds, currentHistories)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, nil, err
 | 
								return nil, nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -282,7 +282,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
 | 
				
			|||||||
		if cur.Revision < currRevision {
 | 
							if cur.Revision < currRevision {
 | 
				
			||||||
			toUpdate := cur.DeepCopy()
 | 
								toUpdate := cur.DeepCopy()
 | 
				
			||||||
			toUpdate.Revision = currRevision
 | 
								toUpdate.Revision = currRevision
 | 
				
			||||||
			_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
 | 
								_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return nil, nil, err
 | 
									return nil, nil, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -291,8 +291,8 @@ func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps
 | 
				
			|||||||
	return cur, old, err
 | 
						return cur, old, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
 | 
					func (dsc *DaemonSetsController) cleanupHistory(ctx context.Context, ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
 | 
				
			||||||
	nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
 | 
						nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
							return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -323,7 +323,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.
 | 
				
			|||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// Clean up
 | 
							// Clean up
 | 
				
			||||||
		err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), history.Name, metav1.DeleteOptions{})
 | 
							err := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(ctx, history.Name, metav1.DeleteOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -343,7 +343,7 @@ func maxRevision(histories []*apps.ControllerRevision) int64 {
 | 
				
			|||||||
	return max
 | 
						return max
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
 | 
					func (dsc *DaemonSetsController) dedupCurHistories(ctx context.Context, ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
 | 
				
			||||||
	if len(curHistories) == 1 {
 | 
						if len(curHistories) == 1 {
 | 
				
			||||||
		return curHistories[0], nil
 | 
							return curHistories[0], nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -361,7 +361,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
 | 
				
			|||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// Relabel pods before dedup
 | 
							// Relabel pods before dedup
 | 
				
			||||||
		pods, err := dsc.getDaemonPods(ds)
 | 
							pods, err := dsc.getDaemonPods(ctx, ds)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -372,14 +372,14 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
 | 
				
			|||||||
					toUpdate.Labels = make(map[string]string)
 | 
										toUpdate.Labels = make(map[string]string)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
 | 
									toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
 | 
				
			||||||
				_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(context.TODO(), toUpdate, metav1.UpdateOptions{})
 | 
									_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(ctx, toUpdate, metav1.UpdateOptions{})
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					return nil, err
 | 
										return nil, err
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// Remove duplicates
 | 
							// Remove duplicates
 | 
				
			||||||
		err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(context.TODO(), cur.Name, metav1.DeleteOptions{})
 | 
							err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Delete(ctx, cur.Name, metav1.DeleteOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -391,7 +391,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistor
 | 
				
			|||||||
// This also reconciles ControllerRef by adopting/orphaning.
 | 
					// This also reconciles ControllerRef by adopting/orphaning.
 | 
				
			||||||
// Note that returned histories are pointers to objects in the cache.
 | 
					// Note that returned histories are pointers to objects in the cache.
 | 
				
			||||||
// If you want to modify one, you need to deep-copy it first.
 | 
					// If you want to modify one, you need to deep-copy it first.
 | 
				
			||||||
func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
 | 
					func (dsc *DaemonSetsController) controlledHistories(ctx context.Context, ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
 | 
				
			||||||
	selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
 | 
						selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
@@ -405,8 +405,8 @@ func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*app
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	// If any adoptions are attempted, we should first recheck for deletion with
 | 
						// If any adoptions are attempted, we should first recheck for deletion with
 | 
				
			||||||
	// an uncached quorum read sometime after listing Pods (see #42639).
 | 
						// an uncached quorum read sometime after listing Pods (see #42639).
 | 
				
			||||||
	canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
 | 
						canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
 | 
				
			||||||
		fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
 | 
							fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -417,7 +417,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*app
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
	// Use ControllerRefManager to adopt/orphan as needed.
 | 
						// Use ControllerRefManager to adopt/orphan as needed.
 | 
				
			||||||
	cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
 | 
						cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
 | 
				
			||||||
	return cm.ClaimControllerRevisions(histories)
 | 
						return cm.ClaimControllerRevisions(ctx, histories)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Match check if the given DaemonSet's template matches the template stored in the given history.
 | 
					// Match check if the given DaemonSet's template matches the template stored in the given history.
 | 
				
			||||||
@@ -456,7 +456,7 @@ func getPatch(ds *apps.DaemonSet) ([]byte, error) {
 | 
				
			|||||||
	return patch, err
 | 
						return patch, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
 | 
					func (dsc *DaemonSetsController) snapshot(ctx context.Context, ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
 | 
				
			||||||
	patch, err := getPatch(ds)
 | 
						patch, err := getPatch(ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
@@ -475,10 +475,10 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
 | 
				
			|||||||
		Revision: revision,
 | 
							Revision: revision,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(context.TODO(), history, metav1.CreateOptions{})
 | 
						history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, history, metav1.CreateOptions{})
 | 
				
			||||||
	if outerErr := err; errors.IsAlreadyExists(outerErr) {
 | 
						if outerErr := err; errors.IsAlreadyExists(outerErr) {
 | 
				
			||||||
		// TODO: Is it okay to get from historyLister?
 | 
							// TODO: Is it okay to get from historyLister?
 | 
				
			||||||
		existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
 | 
							existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{})
 | 
				
			||||||
		if getErr != nil {
 | 
							if getErr != nil {
 | 
				
			||||||
			return nil, getErr
 | 
								return nil, getErr
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -493,7 +493,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		// Handle name collisions between different history
 | 
							// Handle name collisions between different history
 | 
				
			||||||
		// Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
 | 
							// Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
 | 
				
			||||||
		currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
 | 
							currDS, getErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ctx, ds.Name, metav1.GetOptions{})
 | 
				
			||||||
		if getErr != nil {
 | 
							if getErr != nil {
 | 
				
			||||||
			return nil, getErr
 | 
								return nil, getErr
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
 | 
				
			|||||||
			currDS.Status.CollisionCount = new(int32)
 | 
								currDS.Status.CollisionCount = new(int32)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		*currDS.Status.CollisionCount++
 | 
							*currDS.Status.CollisionCount++
 | 
				
			||||||
		_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(context.TODO(), currDS, metav1.UpdateOptions{})
 | 
							_, updateErr := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).UpdateStatus(ctx, currDS, metav1.UpdateOptions{})
 | 
				
			||||||
		if updateErr != nil {
 | 
							if updateErr != nil {
 | 
				
			||||||
			return nil, updateErr
 | 
								return nil, updateErr
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package daemon
 | 
					package daemon
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -323,7 +324,7 @@ func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) {
 | 
					func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) {
 | 
				
			||||||
	// Construct histories of the DaemonSet, and get the hash of current history
 | 
						// Construct histories of the DaemonSet, and get the hash of current history
 | 
				
			||||||
	cur, _, err := dsc.constructHistory(ds)
 | 
						cur, _, err := dsc.constructHistory(context.TODO(), ds)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return "", err
 | 
							return "", err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -72,7 +72,7 @@ type DeploymentController struct {
 | 
				
			|||||||
	eventRecorder record.EventRecorder
 | 
						eventRecorder record.EventRecorder
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// To allow injection of syncDeployment for testing.
 | 
						// To allow injection of syncDeployment for testing.
 | 
				
			||||||
	syncHandler func(dKey string) error
 | 
						syncHandler func(ctx context.Context, dKey string) error
 | 
				
			||||||
	// used for unit testing
 | 
						// used for unit testing
 | 
				
			||||||
	enqueueDeployment func(deployment *apps.Deployment)
 | 
						enqueueDeployment func(deployment *apps.Deployment)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -146,22 +146,22 @@ func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInfor
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run begins watching and syncing.
 | 
					// Run begins watching and syncing.
 | 
				
			||||||
func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
 | 
					func (dc *DeploymentController) Run(ctx context.Context, workers int) {
 | 
				
			||||||
	defer utilruntime.HandleCrash()
 | 
						defer utilruntime.HandleCrash()
 | 
				
			||||||
	defer dc.queue.ShutDown()
 | 
						defer dc.queue.ShutDown()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	klog.InfoS("Starting controller", "controller", "deployment")
 | 
						klog.InfoS("Starting controller", "controller", "deployment")
 | 
				
			||||||
	defer klog.InfoS("Shutting down controller", "controller", "deployment")
 | 
						defer klog.InfoS("Shutting down controller", "controller", "deployment")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !cache.WaitForNamedCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
 | 
						if !cache.WaitForNamedCacheSync("deployment", ctx.Done(), dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := 0; i < workers; i++ {
 | 
						for i := 0; i < workers; i++ {
 | 
				
			||||||
		go wait.Until(dc.worker, time.Second, stopCh)
 | 
							go wait.UntilWithContext(ctx, dc.worker, time.Second)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	<-stopCh
 | 
						<-ctx.Done()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) addDeployment(obj interface{}) {
 | 
					func (dc *DeploymentController) addDeployment(obj interface{}) {
 | 
				
			||||||
@@ -457,19 +457,19 @@ func (dc *DeploymentController) resolveControllerRef(namespace string, controlle
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
 | 
					// worker runs a worker thread that just dequeues items, processes them, and marks them done.
 | 
				
			||||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
 | 
					// It enforces that the syncHandler is never invoked concurrently with the same key.
 | 
				
			||||||
func (dc *DeploymentController) worker() {
 | 
					func (dc *DeploymentController) worker(ctx context.Context) {
 | 
				
			||||||
	for dc.processNextWorkItem() {
 | 
						for dc.processNextWorkItem(ctx) {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) processNextWorkItem() bool {
 | 
					func (dc *DeploymentController) processNextWorkItem(ctx context.Context) bool {
 | 
				
			||||||
	key, quit := dc.queue.Get()
 | 
						key, quit := dc.queue.Get()
 | 
				
			||||||
	if quit {
 | 
						if quit {
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	defer dc.queue.Done(key)
 | 
						defer dc.queue.Done(key)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err := dc.syncHandler(key.(string))
 | 
						err := dc.syncHandler(ctx, key.(string))
 | 
				
			||||||
	dc.handleErr(err, key)
 | 
						dc.handleErr(err, key)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true
 | 
						return true
 | 
				
			||||||
@@ -500,7 +500,7 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
 | 
				
			|||||||
// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
 | 
					// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
 | 
				
			||||||
// ControllerRef by adopting and orphaning.
 | 
					// ControllerRef by adopting and orphaning.
 | 
				
			||||||
// It returns the list of ReplicaSets that this Deployment should manage.
 | 
					// It returns the list of ReplicaSets that this Deployment should manage.
 | 
				
			||||||
func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) ([]*apps.ReplicaSet, error) {
 | 
					func (dc *DeploymentController) getReplicaSetsForDeployment(ctx context.Context, d *apps.Deployment) ([]*apps.ReplicaSet, error) {
 | 
				
			||||||
	// List all ReplicaSets to find those we own but that no longer match our
 | 
						// List all ReplicaSets to find those we own but that no longer match our
 | 
				
			||||||
	// selector. They will be orphaned by ClaimReplicaSets().
 | 
						// selector. They will be orphaned by ClaimReplicaSets().
 | 
				
			||||||
	rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
 | 
						rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
 | 
				
			||||||
@@ -513,8 +513,8 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment)
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	// If any adoptions are attempted, we should first recheck for deletion with
 | 
						// If any adoptions are attempted, we should first recheck for deletion with
 | 
				
			||||||
	// an uncached quorum read sometime after listing ReplicaSets (see #42639).
 | 
						// an uncached quorum read sometime after listing ReplicaSets (see #42639).
 | 
				
			||||||
	canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
 | 
						canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
 | 
				
			||||||
		fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(context.TODO(), d.Name, metav1.GetOptions{})
 | 
							fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(ctx, d.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -524,7 +524,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment)
 | 
				
			|||||||
		return fresh, nil
 | 
							return fresh, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	cm := controller.NewReplicaSetControllerRefManager(dc.rsControl, d, deploymentSelector, controllerKind, canAdoptFunc)
 | 
						cm := controller.NewReplicaSetControllerRefManager(dc.rsControl, d, deploymentSelector, controllerKind, canAdoptFunc)
 | 
				
			||||||
	return cm.ClaimReplicaSets(rsList)
 | 
						return cm.ClaimReplicaSets(ctx, rsList)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// getPodMapForDeployment returns the Pods managed by a Deployment.
 | 
					// getPodMapForDeployment returns the Pods managed by a Deployment.
 | 
				
			||||||
@@ -565,7 +565,7 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// syncDeployment will sync the deployment with the given key.
 | 
					// syncDeployment will sync the deployment with the given key.
 | 
				
			||||||
// This function is not meant to be invoked concurrently with the same key.
 | 
					// This function is not meant to be invoked concurrently with the same key.
 | 
				
			||||||
func (dc *DeploymentController) syncDeployment(key string) error {
 | 
					func (dc *DeploymentController) syncDeployment(ctx context.Context, key string) error {
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		klog.ErrorS(err, "Failed to split meta namespace cache key", "cacheKey", key)
 | 
							klog.ErrorS(err, "Failed to split meta namespace cache key", "cacheKey", key)
 | 
				
			||||||
@@ -596,14 +596,14 @@ func (dc *DeploymentController) syncDeployment(key string) error {
 | 
				
			|||||||
		dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
 | 
							dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
 | 
				
			||||||
		if d.Status.ObservedGeneration < d.Generation {
 | 
							if d.Status.ObservedGeneration < d.Generation {
 | 
				
			||||||
			d.Status.ObservedGeneration = d.Generation
 | 
								d.Status.ObservedGeneration = d.Generation
 | 
				
			||||||
			dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
 | 
								dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// List ReplicaSets owned by this Deployment, while reconciling ControllerRef
 | 
						// List ReplicaSets owned by this Deployment, while reconciling ControllerRef
 | 
				
			||||||
	// through adoption/orphaning.
 | 
						// through adoption/orphaning.
 | 
				
			||||||
	rsList, err := dc.getReplicaSetsForDeployment(d)
 | 
						rsList, err := dc.getReplicaSetsForDeployment(ctx, d)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -618,40 +618,40 @@ func (dc *DeploymentController) syncDeployment(key string) error {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if d.DeletionTimestamp != nil {
 | 
						if d.DeletionTimestamp != nil {
 | 
				
			||||||
		return dc.syncStatusOnly(d, rsList)
 | 
							return dc.syncStatusOnly(ctx, d, rsList)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Update deployment conditions with an Unknown condition when pausing/resuming
 | 
						// Update deployment conditions with an Unknown condition when pausing/resuming
 | 
				
			||||||
	// a deployment. In this way, we can be sure that we won't timeout when a user
 | 
						// a deployment. In this way, we can be sure that we won't timeout when a user
 | 
				
			||||||
	// resumes a Deployment with a set progressDeadlineSeconds.
 | 
						// resumes a Deployment with a set progressDeadlineSeconds.
 | 
				
			||||||
	if err = dc.checkPausedConditions(d); err != nil {
 | 
						if err = dc.checkPausedConditions(ctx, d); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if d.Spec.Paused {
 | 
						if d.Spec.Paused {
 | 
				
			||||||
		return dc.sync(d, rsList)
 | 
							return dc.sync(ctx, d, rsList)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// rollback is not re-entrant in case the underlying replica sets are updated with a new
 | 
						// rollback is not re-entrant in case the underlying replica sets are updated with a new
 | 
				
			||||||
	// revision so we should ensure that we won't proceed to update replica sets until we
 | 
						// revision so we should ensure that we won't proceed to update replica sets until we
 | 
				
			||||||
	// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
 | 
						// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
 | 
				
			||||||
	if getRollbackTo(d) != nil {
 | 
						if getRollbackTo(d) != nil {
 | 
				
			||||||
		return dc.rollback(d, rsList)
 | 
							return dc.rollback(ctx, d, rsList)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	scalingEvent, err := dc.isScalingEvent(d, rsList)
 | 
						scalingEvent, err := dc.isScalingEvent(ctx, d, rsList)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if scalingEvent {
 | 
						if scalingEvent {
 | 
				
			||||||
		return dc.sync(d, rsList)
 | 
							return dc.sync(ctx, d, rsList)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch d.Spec.Strategy.Type {
 | 
						switch d.Spec.Strategy.Type {
 | 
				
			||||||
	case apps.RecreateDeploymentStrategyType:
 | 
						case apps.RecreateDeploymentStrategyType:
 | 
				
			||||||
		return dc.rolloutRecreate(d, rsList, podMap)
 | 
							return dc.rolloutRecreate(ctx, d, rsList, podMap)
 | 
				
			||||||
	case apps.RollingUpdateDeploymentStrategyType:
 | 
						case apps.RollingUpdateDeploymentStrategyType:
 | 
				
			||||||
		return dc.rolloutRolling(d, rsList)
 | 
							return dc.rolloutRolling(ctx, d, rsList)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
 | 
						return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"strconv"
 | 
						"strconv"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
@@ -221,7 +222,7 @@ func (f *fixture) run_(deploymentName string, startInformers bool, expectError b
 | 
				
			|||||||
		informers.Start(stopCh)
 | 
							informers.Start(stopCh)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = c.syncDeployment(deploymentName)
 | 
						err = c.syncDeployment(context.TODO(), deploymentName)
 | 
				
			||||||
	if !expectError && err != nil {
 | 
						if !expectError && err != nil {
 | 
				
			||||||
		f.t.Errorf("error syncing deployment: %v", err)
 | 
							f.t.Errorf("error syncing deployment: %v", err)
 | 
				
			||||||
	} else if expectError && err == nil {
 | 
						} else if expectError && err == nil {
 | 
				
			||||||
@@ -529,7 +530,7 @@ func TestGetReplicaSetsForDeployment(t *testing.T) {
 | 
				
			|||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rsList, err := c.getReplicaSetsForDeployment(d1)
 | 
						rsList, err := c.getReplicaSetsForDeployment(context.TODO(), d1)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
 | 
							t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -541,7 +542,7 @@ func TestGetReplicaSetsForDeployment(t *testing.T) {
 | 
				
			|||||||
		t.Errorf("getReplicaSetsForDeployment() = %v, want [%v]", rsNames, rs1.Name)
 | 
							t.Errorf("getReplicaSetsForDeployment() = %v, want [%v]", rsNames, rs1.Name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rsList, err = c.getReplicaSetsForDeployment(d2)
 | 
						rsList, err = c.getReplicaSetsForDeployment(context.TODO(), d2)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
 | 
							t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -579,7 +580,7 @@ func TestGetReplicaSetsForDeploymentAdoptRelease(t *testing.T) {
 | 
				
			|||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rsList, err := c.getReplicaSetsForDeployment(d)
 | 
						rsList, err := c.getReplicaSetsForDeployment(context.TODO(), d)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
 | 
							t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -34,7 +34,7 @@ import (
 | 
				
			|||||||
// cases this helper will run that cannot be prevented from the scaling detection,
 | 
					// cases this helper will run that cannot be prevented from the scaling detection,
 | 
				
			||||||
// for example a resync of the deployment after it was scaled up. In those cases,
 | 
					// for example a resync of the deployment after it was scaled up. In those cases,
 | 
				
			||||||
// we shouldn't try to estimate any progress.
 | 
					// we shouldn't try to estimate any progress.
 | 
				
			||||||
func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
 | 
					func (dc *DeploymentController) syncRolloutStatus(ctx context.Context, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
 | 
				
			||||||
	newStatus := calculateStatus(allRSs, newRS, d)
 | 
						newStatus := calculateStatus(allRSs, newRS, d)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// If there is no progressDeadlineSeconds set, remove any Progressing condition.
 | 
						// If there is no progressDeadlineSeconds set, remove any Progressing condition.
 | 
				
			||||||
@@ -114,7 +114,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	newDeployment := d
 | 
						newDeployment := d
 | 
				
			||||||
	newDeployment.Status = newStatus
 | 
						newDeployment.Status = newStatus
 | 
				
			||||||
	_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
 | 
						_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(ctx, newDeployment, metav1.UpdateOptions{})
 | 
				
			||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"math"
 | 
						"math"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
@@ -330,7 +331,7 @@ func TestSyncRolloutStatus(t *testing.T) {
 | 
				
			|||||||
				test.allRSs = append(test.allRSs, test.newRS)
 | 
									test.allRSs = append(test.allRSs, test.newRS)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d)
 | 
								err := dc.syncRolloutStatus(context.TODO(), test.allRSs, test.newRS, test.d)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Error(err)
 | 
									t.Error(err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	apps "k8s.io/api/apps/v1"
 | 
						apps "k8s.io/api/apps/v1"
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/types"
 | 
						"k8s.io/apimachinery/pkg/types"
 | 
				
			||||||
@@ -25,9 +26,9 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// rolloutRecreate implements the logic for recreating a replica set.
 | 
					// rolloutRecreate implements the logic for recreating a replica set.
 | 
				
			||||||
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID][]*v1.Pod) error {
 | 
					func (dc *DeploymentController) rolloutRecreate(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID][]*v1.Pod) error {
 | 
				
			||||||
	// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
 | 
						// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
 | 
				
			||||||
	newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
 | 
						newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, false)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -35,23 +36,23 @@ func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*ap
 | 
				
			|||||||
	activeOldRSs := controller.FilterActiveReplicaSets(oldRSs)
 | 
						activeOldRSs := controller.FilterActiveReplicaSets(oldRSs)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// scale down old replica sets.
 | 
						// scale down old replica sets.
 | 
				
			||||||
	scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(activeOldRSs, d)
 | 
						scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(ctx, activeOldRSs, d)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if scaledDown {
 | 
						if scaledDown {
 | 
				
			||||||
		// Update DeploymentStatus.
 | 
							// Update DeploymentStatus.
 | 
				
			||||||
		return dc.syncRolloutStatus(allRSs, newRS, d)
 | 
							return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Do not process a deployment when it has old pods running.
 | 
						// Do not process a deployment when it has old pods running.
 | 
				
			||||||
	if oldPodsRunning(newRS, oldRSs, podMap) {
 | 
						if oldPodsRunning(newRS, oldRSs, podMap) {
 | 
				
			||||||
		return dc.syncRolloutStatus(allRSs, newRS, d)
 | 
							return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// If we need to create a new RS, create it now.
 | 
						// If we need to create a new RS, create it now.
 | 
				
			||||||
	if newRS == nil {
 | 
						if newRS == nil {
 | 
				
			||||||
		newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
 | 
							newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, true)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -59,22 +60,22 @@ func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*ap
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// scale up new replica set.
 | 
						// scale up new replica set.
 | 
				
			||||||
	if _, err := dc.scaleUpNewReplicaSetForRecreate(newRS, d); err != nil {
 | 
						if _, err := dc.scaleUpNewReplicaSetForRecreate(ctx, newRS, d); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if util.DeploymentComplete(d, &d.Status) {
 | 
						if util.DeploymentComplete(d, &d.Status) {
 | 
				
			||||||
		if err := dc.cleanupDeployment(oldRSs, d); err != nil {
 | 
							if err := dc.cleanupDeployment(ctx, oldRSs, d); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Sync deployment status.
 | 
						// Sync deployment status.
 | 
				
			||||||
	return dc.syncRolloutStatus(allRSs, newRS, d)
 | 
						return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate".
 | 
					// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate".
 | 
				
			||||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
					func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(ctx context.Context, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
				
			||||||
	scaled := false
 | 
						scaled := false
 | 
				
			||||||
	for i := range oldRSs {
 | 
						for i := range oldRSs {
 | 
				
			||||||
		rs := oldRSs[i]
 | 
							rs := oldRSs[i]
 | 
				
			||||||
@@ -82,7 +83,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*app
 | 
				
			|||||||
		if *(rs.Spec.Replicas) == 0 {
 | 
							if *(rs.Spec.Replicas) == 0 {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
 | 
							scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(ctx, rs, 0, deployment)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return false, err
 | 
								return false, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -125,7 +126,7 @@ func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap ma
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
 | 
					// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
 | 
				
			||||||
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
					func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(ctx context.Context, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
				
			||||||
	scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
 | 
						scaled, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, newRS, *(deployment.Spec.Replicas), deployment)
 | 
				
			||||||
	return scaled, err
 | 
						return scaled, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -71,7 +72,7 @@ func TestScaleDownOldReplicaSets(t *testing.T) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		c.eventRecorder = &record.FakeRecorder{}
 | 
							c.eventRecorder = &record.FakeRecorder{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)
 | 
							c.scaleDownOldReplicaSetsForRecreate(context.TODO(), oldRSs, test.d)
 | 
				
			||||||
		for j := range oldRSs {
 | 
							for j := range oldRSs {
 | 
				
			||||||
			rs := oldRSs[j]
 | 
								rs := oldRSs[j]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,8 +31,8 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
 | 
					// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
 | 
				
			||||||
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
					func (dc *DeploymentController) rollback(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
				
			||||||
	newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
 | 
						newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, true)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -45,7 +45,7 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
 | 
				
			|||||||
			// If we still can't find the last revision, gives up rollback
 | 
								// If we still can't find the last revision, gives up rollback
 | 
				
			||||||
			dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
 | 
								dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
 | 
				
			||||||
			// Gives up rollback
 | 
								// Gives up rollback
 | 
				
			||||||
			return dc.updateDeploymentAndClearRollbackTo(d)
 | 
								return dc.updateDeploymentAndClearRollbackTo(ctx, d)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, rs := range allRSs {
 | 
						for _, rs := range allRSs {
 | 
				
			||||||
@@ -59,7 +59,7 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
 | 
				
			|||||||
			// rollback by copying podTemplate.Spec from the replica set
 | 
								// rollback by copying podTemplate.Spec from the replica set
 | 
				
			||||||
			// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
 | 
								// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
 | 
				
			||||||
			// no-op if the spec matches current deployment's podTemplate.Spec
 | 
								// no-op if the spec matches current deployment's podTemplate.Spec
 | 
				
			||||||
			performedRollback, err := dc.rollbackToTemplate(d, rs)
 | 
								performedRollback, err := dc.rollbackToTemplate(ctx, d, rs)
 | 
				
			||||||
			if performedRollback && err == nil {
 | 
								if performedRollback && err == nil {
 | 
				
			||||||
				dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision))
 | 
									dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -68,13 +68,13 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
 | 
						dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
 | 
				
			||||||
	// Gives up rollback
 | 
						// Gives up rollback
 | 
				
			||||||
	return dc.updateDeploymentAndClearRollbackTo(d)
 | 
						return dc.updateDeploymentAndClearRollbackTo(ctx, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// rollbackToTemplate compares the templates of the provided deployment and replica set and
 | 
					// rollbackToTemplate compares the templates of the provided deployment and replica set and
 | 
				
			||||||
// updates the deployment with the replica set template in case they are different. It also
 | 
					// updates the deployment with the replica set template in case they are different. It also
 | 
				
			||||||
// cleans up the rollback spec so subsequent requeues of the deployment won't end up in here.
 | 
					// cleans up the rollback spec so subsequent requeues of the deployment won't end up in here.
 | 
				
			||||||
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
 | 
					func (dc *DeploymentController) rollbackToTemplate(ctx context.Context, d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
 | 
				
			||||||
	performedRollback := false
 | 
						performedRollback := false
 | 
				
			||||||
	if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
 | 
						if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
 | 
				
			||||||
		klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
 | 
							klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
 | 
				
			||||||
@@ -98,7 +98,7 @@ func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.
 | 
				
			|||||||
		dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
 | 
							dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return performedRollback, dc.updateDeploymentAndClearRollbackTo(d)
 | 
						return performedRollback, dc.updateDeploymentAndClearRollbackTo(ctx, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) {
 | 
					func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) {
 | 
				
			||||||
@@ -112,10 +112,10 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
 | 
				
			|||||||
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
 | 
					// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
 | 
				
			||||||
// It is assumed that the caller will have updated the deployment template appropriately (in case
 | 
					// It is assumed that the caller will have updated the deployment template appropriately (in case
 | 
				
			||||||
// we want to rollback).
 | 
					// we want to rollback).
 | 
				
			||||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
 | 
					func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(ctx context.Context, d *apps.Deployment) error {
 | 
				
			||||||
	klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
 | 
						klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
 | 
				
			||||||
	setRollbackTo(d, nil)
 | 
						setRollbackTo(d, nil)
 | 
				
			||||||
	_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(context.TODO(), d, metav1.UpdateOptions{})
 | 
						_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(ctx, d, metav1.UpdateOptions{})
 | 
				
			||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"sort"
 | 
						"sort"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -28,62 +29,62 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// rolloutRolling implements the logic for rolling a new replica set.
 | 
					// rolloutRolling implements the logic for rolling a new replica set.
 | 
				
			||||||
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
					func (dc *DeploymentController) rolloutRolling(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
				
			||||||
	newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
 | 
						newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, true)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	allRSs := append(oldRSs, newRS)
 | 
						allRSs := append(oldRSs, newRS)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Scale up, if we can.
 | 
						// Scale up, if we can.
 | 
				
			||||||
	scaledUp, err := dc.reconcileNewReplicaSet(allRSs, newRS, d)
 | 
						scaledUp, err := dc.reconcileNewReplicaSet(ctx, allRSs, newRS, d)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if scaledUp {
 | 
						if scaledUp {
 | 
				
			||||||
		// Update DeploymentStatus
 | 
							// Update DeploymentStatus
 | 
				
			||||||
		return dc.syncRolloutStatus(allRSs, newRS, d)
 | 
							return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Scale down, if we can.
 | 
						// Scale down, if we can.
 | 
				
			||||||
	scaledDown, err := dc.reconcileOldReplicaSets(allRSs, controller.FilterActiveReplicaSets(oldRSs), newRS, d)
 | 
						scaledDown, err := dc.reconcileOldReplicaSets(ctx, allRSs, controller.FilterActiveReplicaSets(oldRSs), newRS, d)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if scaledDown {
 | 
						if scaledDown {
 | 
				
			||||||
		// Update DeploymentStatus
 | 
							// Update DeploymentStatus
 | 
				
			||||||
		return dc.syncRolloutStatus(allRSs, newRS, d)
 | 
							return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if deploymentutil.DeploymentComplete(d, &d.Status) {
 | 
						if deploymentutil.DeploymentComplete(d, &d.Status) {
 | 
				
			||||||
		if err := dc.cleanupDeployment(oldRSs, d); err != nil {
 | 
							if err := dc.cleanupDeployment(ctx, oldRSs, d); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Sync deployment status
 | 
						// Sync deployment status
 | 
				
			||||||
	return dc.syncRolloutStatus(allRSs, newRS, d)
 | 
						return dc.syncRolloutStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
					func (dc *DeploymentController) reconcileNewReplicaSet(ctx context.Context, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
				
			||||||
	if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
 | 
						if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
 | 
				
			||||||
		// Scaling not required.
 | 
							// Scaling not required.
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
 | 
						if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
 | 
				
			||||||
		// Scale down.
 | 
							// Scale down.
 | 
				
			||||||
		scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
 | 
							scaled, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, newRS, *(deployment.Spec.Replicas), deployment)
 | 
				
			||||||
		return scaled, err
 | 
							return scaled, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
 | 
						newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return false, err
 | 
							return false, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, newReplicasCount, deployment)
 | 
						scaled, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, newRS, newReplicasCount, deployment)
 | 
				
			||||||
	return scaled, err
 | 
						return scaled, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
					func (dc *DeploymentController) reconcileOldReplicaSets(ctx context.Context, allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
 | 
				
			||||||
	oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
 | 
						oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
 | 
				
			||||||
	if oldPodsCount == 0 {
 | 
						if oldPodsCount == 0 {
 | 
				
			||||||
		// Can't scale down further
 | 
							// Can't scale down further
 | 
				
			||||||
@@ -133,7 +134,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
 | 
						// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
 | 
				
			||||||
	// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
 | 
						// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
 | 
				
			||||||
	oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown)
 | 
						oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(ctx, oldRSs, deployment, maxScaledDown)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -141,7 +142,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
 | 
						// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
 | 
				
			||||||
	allRSs = append(oldRSs, newRS)
 | 
						allRSs = append(oldRSs, newRS)
 | 
				
			||||||
	scaledDownCount, err := dc.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
 | 
						scaledDownCount, err := dc.scaleDownOldReplicaSetsForRollingUpdate(ctx, allRSs, oldRSs, deployment)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return false, nil
 | 
							return false, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -152,7 +153,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
 | 
					// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
 | 
				
			||||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) {
 | 
					func (dc *DeploymentController) cleanupUnhealthyReplicas(ctx context.Context, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) {
 | 
				
			||||||
	sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
 | 
						sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
 | 
				
			||||||
	// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
 | 
						// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
 | 
				
			||||||
	// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
 | 
						// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
 | 
				
			||||||
@@ -177,7 +178,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaS
 | 
				
			|||||||
		if newReplicasCount > *(targetRS.Spec.Replicas) {
 | 
							if newReplicasCount > *(targetRS.Spec.Replicas) {
 | 
				
			||||||
			return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
 | 
								return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
 | 
							_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(ctx, targetRS, newReplicasCount, deployment)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, totalScaledDown, err
 | 
								return nil, totalScaledDown, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -189,7 +190,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaS
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
 | 
					// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
 | 
				
			||||||
// Need check maxUnavailable to ensure availability
 | 
					// Need check maxUnavailable to ensure availability
 | 
				
			||||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) {
 | 
					func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(ctx context.Context, allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) {
 | 
				
			||||||
	maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
 | 
						maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Check if we can scale down.
 | 
						// Check if we can scale down.
 | 
				
			||||||
@@ -221,7 +222,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
 | 
				
			|||||||
		if newReplicasCount > *(targetRS.Spec.Replicas) {
 | 
							if newReplicasCount > *(targetRS.Spec.Replicas) {
 | 
				
			||||||
			return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
 | 
								return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
 | 
							_, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, targetRS, newReplicasCount, deployment)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return totalScaledDown, err
 | 
								return totalScaledDown, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	apps "k8s.io/api/apps/v1"
 | 
						apps "k8s.io/api/apps/v1"
 | 
				
			||||||
@@ -90,7 +91,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
 | 
				
			|||||||
			client:        &fake,
 | 
								client:        &fake,
 | 
				
			||||||
			eventRecorder: &record.FakeRecorder{},
 | 
								eventRecorder: &record.FakeRecorder{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, deployment)
 | 
							scaled, err := controller.reconcileNewReplicaSet(context.TODO(), allRSs, newRS, deployment)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
@@ -197,7 +198,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
 | 
				
			|||||||
			eventRecorder: &record.FakeRecorder{},
 | 
								eventRecorder: &record.FakeRecorder{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment)
 | 
							scaled, err := controller.reconcileOldReplicaSets(context.TODO(), allRSs, oldRSs, newRS, deployment)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
@@ -265,7 +266,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
 | 
				
			|||||||
			client:        &fakeClientset,
 | 
								client:        &fakeClientset,
 | 
				
			||||||
			eventRecorder: &record.FakeRecorder{},
 | 
								eventRecorder: &record.FakeRecorder{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, int32(test.maxCleanupCount))
 | 
							_, cleanupCount, err := controller.cleanupUnhealthyReplicas(context.TODO(), oldRSs, deployment, int32(test.maxCleanupCount))
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
@@ -339,7 +340,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
 | 
				
			|||||||
			client:        &fakeClientset,
 | 
								client:        &fakeClientset,
 | 
				
			||||||
			eventRecorder: &record.FakeRecorder{},
 | 
								eventRecorder: &record.FakeRecorder{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
 | 
							scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(context.TODO(), allRSs, oldRSs, deployment)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -34,24 +34,24 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
 | 
					// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
 | 
				
			||||||
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
					func (dc *DeploymentController) syncStatusOnly(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
				
			||||||
	newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
 | 
						newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, false)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	allRSs := append(oldRSs, newRS)
 | 
						allRSs := append(oldRSs, newRS)
 | 
				
			||||||
	return dc.syncDeploymentStatus(allRSs, newRS, d)
 | 
						return dc.syncDeploymentStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// sync is responsible for reconciling deployments on scaling events or when they
 | 
					// sync is responsible for reconciling deployments on scaling events or when they
 | 
				
			||||||
// are paused.
 | 
					// are paused.
 | 
				
			||||||
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
					func (dc *DeploymentController) sync(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet) error {
 | 
				
			||||||
	newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
 | 
						newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, false)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := dc.scale(d, newRS, oldRSs); err != nil {
 | 
						if err := dc.scale(ctx, d, newRS, oldRSs); err != nil {
 | 
				
			||||||
		// If we get an error while trying to scale, the deployment will be requeued
 | 
							// If we get an error while trying to scale, the deployment will be requeued
 | 
				
			||||||
		// so we can abort this resync
 | 
							// so we can abort this resync
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
@@ -59,19 +59,19 @@ func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaS
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Clean up the deployment when it's paused and no rollback is in flight.
 | 
						// Clean up the deployment when it's paused and no rollback is in flight.
 | 
				
			||||||
	if d.Spec.Paused && getRollbackTo(d) == nil {
 | 
						if d.Spec.Paused && getRollbackTo(d) == nil {
 | 
				
			||||||
		if err := dc.cleanupDeployment(oldRSs, d); err != nil {
 | 
							if err := dc.cleanupDeployment(ctx, oldRSs, d); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	allRSs := append(oldRSs, newRS)
 | 
						allRSs := append(oldRSs, newRS)
 | 
				
			||||||
	return dc.syncDeploymentStatus(allRSs, newRS, d)
 | 
						return dc.syncDeploymentStatus(ctx, allRSs, newRS, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
 | 
					// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
 | 
				
			||||||
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
 | 
					// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
 | 
				
			||||||
// that were paused for longer than progressDeadlineSeconds.
 | 
					// that were paused for longer than progressDeadlineSeconds.
 | 
				
			||||||
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
 | 
					func (dc *DeploymentController) checkPausedConditions(ctx context.Context, d *apps.Deployment) error {
 | 
				
			||||||
	if !deploymentutil.HasProgressDeadline(d) {
 | 
						if !deploymentutil.HasProgressDeadline(d) {
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -98,7 +98,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
	_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
 | 
						_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
 | 
				
			||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -113,11 +113,11 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
 | 
				
			|||||||
//
 | 
					//
 | 
				
			||||||
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
 | 
					// Note that currently the deployment controller is using caches to avoid querying the server for reads.
 | 
				
			||||||
// This may lead to stale reads of replica sets, thus incorrect deployment status.
 | 
					// This may lead to stale reads of replica sets, thus incorrect deployment status.
 | 
				
			||||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
 | 
					func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
 | 
				
			||||||
	_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
 | 
						_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Get new replica set with the updated revision number
 | 
						// Get new replica set with the updated revision number
 | 
				
			||||||
	newRS, err := dc.getNewReplicaSet(d, rsList, allOldRSs, createIfNotExisted)
 | 
						newRS, err := dc.getNewReplicaSet(ctx, d, rsList, allOldRSs, createIfNotExisted)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, nil, err
 | 
							return nil, nil, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -135,7 +135,7 @@ const (
 | 
				
			|||||||
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
 | 
					// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
 | 
				
			||||||
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
 | 
					// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
 | 
				
			||||||
// Note that the pod-template-hash will be added to adopted RSes and pods.
 | 
					// Note that the pod-template-hash will be added to adopted RSes and pods.
 | 
				
			||||||
func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
 | 
					func (dc *DeploymentController) getNewReplicaSet(ctx context.Context, d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
 | 
				
			||||||
	existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
 | 
						existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Calculate the max revision number among all old RSes
 | 
						// Calculate the max revision number among all old RSes
 | 
				
			||||||
@@ -155,7 +155,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
		minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
 | 
							minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
 | 
				
			||||||
		if annotationsUpdated || minReadySecondsNeedsUpdate {
 | 
							if annotationsUpdated || minReadySecondsNeedsUpdate {
 | 
				
			||||||
			rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
 | 
								rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
 | 
				
			||||||
			return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
 | 
								return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Should use the revision in existingNewRS's annotation, since it set by before
 | 
							// Should use the revision in existingNewRS's annotation, since it set by before
 | 
				
			||||||
@@ -173,7 +173,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		if needsUpdate {
 | 
							if needsUpdate {
 | 
				
			||||||
			var err error
 | 
								var err error
 | 
				
			||||||
			if _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{}); err != nil {
 | 
								if _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{}); err != nil {
 | 
				
			||||||
				return nil, err
 | 
									return nil, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -220,7 +220,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
	// hash collisions. If there is any other error, we need to report it in the status of
 | 
						// hash collisions. If there is any other error, we need to report it in the status of
 | 
				
			||||||
	// the Deployment.
 | 
						// the Deployment.
 | 
				
			||||||
	alreadyExists := false
 | 
						alreadyExists := false
 | 
				
			||||||
	createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(context.TODO(), &newRS, metav1.CreateOptions{})
 | 
						createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(ctx, &newRS, metav1.CreateOptions{})
 | 
				
			||||||
	switch {
 | 
						switch {
 | 
				
			||||||
	// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
 | 
						// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
 | 
				
			||||||
	case errors.IsAlreadyExists(err):
 | 
						case errors.IsAlreadyExists(err):
 | 
				
			||||||
@@ -252,7 +252,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
		*d.Status.CollisionCount++
 | 
							*d.Status.CollisionCount++
 | 
				
			||||||
		// Update the collisionCount for the Deployment and let it requeue by returning the original
 | 
							// Update the collisionCount for the Deployment and let it requeue by returning the original
 | 
				
			||||||
		// error.
 | 
							// error.
 | 
				
			||||||
		_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
 | 
							_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
 | 
				
			||||||
		if dErr == nil {
 | 
							if dErr == nil {
 | 
				
			||||||
			klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
 | 
								klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -268,7 +268,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
			// We don't really care about this error at this point, since we have a bigger issue to report.
 | 
								// We don't really care about this error at this point, since we have a bigger issue to report.
 | 
				
			||||||
			// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
 | 
								// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
 | 
				
			||||||
			// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
 | 
								// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
 | 
				
			||||||
			_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
 | 
								_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
 | 
							dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
@@ -285,7 +285,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
		needsUpdate = true
 | 
							needsUpdate = true
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if needsUpdate {
 | 
						if needsUpdate {
 | 
				
			||||||
		_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(context.TODO(), d, metav1.UpdateOptions{})
 | 
							_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return createdRS, err
 | 
						return createdRS, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -295,14 +295,14 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
 | 
				
			|||||||
// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
 | 
					// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
 | 
				
			||||||
// replicas in the event of a problem with the rolled out template. Should run only on scaling events or
 | 
					// replicas in the event of a problem with the rolled out template. Should run only on scaling events or
 | 
				
			||||||
// when a deployment is paused and not during the normal rollout process.
 | 
					// when a deployment is paused and not during the normal rollout process.
 | 
				
			||||||
func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {
 | 
					func (dc *DeploymentController) scale(ctx context.Context, deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {
 | 
				
			||||||
	// If there is only one active replica set then we should scale that up to the full count of the
 | 
						// If there is only one active replica set then we should scale that up to the full count of the
 | 
				
			||||||
	// deployment. If there is no active replica set, then we should scale up the newest replica set.
 | 
						// deployment. If there is no active replica set, then we should scale up the newest replica set.
 | 
				
			||||||
	if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
 | 
						if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
 | 
				
			||||||
		if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
 | 
							if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
 | 
				
			||||||
			return nil
 | 
								return nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment)
 | 
							_, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, activeOrLatest, *(deployment.Spec.Replicas), deployment)
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -310,7 +310,7 @@ func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.R
 | 
				
			|||||||
	// This case handles replica set adoption during a saturated new replica set.
 | 
						// This case handles replica set adoption during a saturated new replica set.
 | 
				
			||||||
	if deploymentutil.IsSaturated(deployment, newRS) {
 | 
						if deploymentutil.IsSaturated(deployment, newRS) {
 | 
				
			||||||
		for _, old := range controller.FilterActiveReplicaSets(oldRSs) {
 | 
							for _, old := range controller.FilterActiveReplicaSets(oldRSs) {
 | 
				
			||||||
			if _, _, err := dc.scaleReplicaSetAndRecordEvent(old, 0, deployment); err != nil {
 | 
								if _, _, err := dc.scaleReplicaSetAndRecordEvent(ctx, old, 0, deployment); err != nil {
 | 
				
			||||||
				return err
 | 
									return err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -384,7 +384,7 @@ func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.R
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// TODO: Use transactions when we have them.
 | 
								// TODO: Use transactions when we have them.
 | 
				
			||||||
			if _, _, err := dc.scaleReplicaSet(rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil {
 | 
								if _, _, err := dc.scaleReplicaSet(ctx, rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil {
 | 
				
			||||||
				// Return as soon as we fail, the deployment is requeued
 | 
									// Return as soon as we fail, the deployment is requeued
 | 
				
			||||||
				return err
 | 
									return err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -393,7 +393,7 @@ func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.R
 | 
				
			|||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) {
 | 
					func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(ctx context.Context, rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) {
 | 
				
			||||||
	// No need to scale
 | 
						// No need to scale
 | 
				
			||||||
	if *(rs.Spec.Replicas) == newScale {
 | 
						if *(rs.Spec.Replicas) == newScale {
 | 
				
			||||||
		return false, rs, nil
 | 
							return false, rs, nil
 | 
				
			||||||
@@ -404,11 +404,11 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSe
 | 
				
			|||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		scalingOperation = "down"
 | 
							scalingOperation = "down"
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	scaled, newRS, err := dc.scaleReplicaSet(rs, newScale, deployment, scalingOperation)
 | 
						scaled, newRS, err := dc.scaleReplicaSet(ctx, rs, newScale, deployment, scalingOperation)
 | 
				
			||||||
	return scaled, newRS, err
 | 
						return scaled, newRS, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
 | 
					func (dc *DeploymentController) scaleReplicaSet(ctx context.Context, rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
 | 
						sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -420,7 +420,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
 | 
				
			|||||||
		rsCopy := rs.DeepCopy()
 | 
							rsCopy := rs.DeepCopy()
 | 
				
			||||||
		*(rsCopy.Spec.Replicas) = newScale
 | 
							*(rsCopy.Spec.Replicas) = newScale
 | 
				
			||||||
		deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
 | 
							deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
 | 
				
			||||||
		rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(context.TODO(), rsCopy, metav1.UpdateOptions{})
 | 
							rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
 | 
				
			||||||
		if err == nil && sizeNeedsUpdate {
 | 
							if err == nil && sizeNeedsUpdate {
 | 
				
			||||||
			scaled = true
 | 
								scaled = true
 | 
				
			||||||
			dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
 | 
								dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
 | 
				
			||||||
@@ -432,7 +432,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
 | 
				
			|||||||
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
 | 
					// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
 | 
				
			||||||
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
 | 
					// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
 | 
				
			||||||
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
 | 
					// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
 | 
				
			||||||
func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
 | 
					func (dc *DeploymentController) cleanupDeployment(ctx context.Context, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
 | 
				
			||||||
	if !deploymentutil.HasRevisionHistoryLimit(deployment) {
 | 
						if !deploymentutil.HasRevisionHistoryLimit(deployment) {
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -458,7 +458,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
 | 
				
			|||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
 | 
							klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
 | 
				
			||||||
		if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(context.TODO(), rs.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {
 | 
							if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(ctx, rs.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {
 | 
				
			||||||
			// Return error instead of aggregating and continuing DELETEs on the theory
 | 
								// Return error instead of aggregating and continuing DELETEs on the theory
 | 
				
			||||||
			// that we may be overloading the api server.
 | 
								// that we may be overloading the api server.
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
@@ -469,7 +469,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
 | 
					// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
 | 
				
			||||||
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
 | 
					func (dc *DeploymentController) syncDeploymentStatus(ctx context.Context, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
 | 
				
			||||||
	newStatus := calculateStatus(allRSs, newRS, d)
 | 
						newStatus := calculateStatus(allRSs, newRS, d)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if reflect.DeepEqual(d.Status, newStatus) {
 | 
						if reflect.DeepEqual(d.Status, newStatus) {
 | 
				
			||||||
@@ -478,7 +478,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	newDeployment := d
 | 
						newDeployment := d
 | 
				
			||||||
	newDeployment.Status = newStatus
 | 
						newDeployment.Status = newStatus
 | 
				
			||||||
	_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(context.TODO(), newDeployment, metav1.UpdateOptions{})
 | 
						_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(ctx, newDeployment, metav1.UpdateOptions{})
 | 
				
			||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -525,8 +525,8 @@ func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployme
 | 
				
			|||||||
// by looking at the desired-replicas annotation in the active replica sets of the deployment.
 | 
					// by looking at the desired-replicas annotation in the active replica sets of the deployment.
 | 
				
			||||||
//
 | 
					//
 | 
				
			||||||
// rsList should come from getReplicaSetsForDeployment(d).
 | 
					// rsList should come from getReplicaSetsForDeployment(d).
 | 
				
			||||||
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet) (bool, error) {
 | 
					func (dc *DeploymentController) isScalingEvent(ctx context.Context, d *apps.Deployment, rsList []*apps.ReplicaSet) (bool, error) {
 | 
				
			||||||
	newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
 | 
						newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(ctx, d, rsList, false)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return false, err
 | 
							return false, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package deployment
 | 
					package deployment
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"math"
 | 
						"math"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
@@ -297,7 +298,7 @@ func TestScale(t *testing.T) {
 | 
				
			|||||||
				deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
 | 
									deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
 | 
								if err := dc.scale(context.TODO(), test.deployment, test.newRS, test.oldRSs); err != nil {
 | 
				
			||||||
				t.Errorf("%s: unexpected error: %v", test.name, err)
 | 
									t.Errorf("%s: unexpected error: %v", test.name, err)
 | 
				
			||||||
				return
 | 
									return
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -433,7 +434,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		t.Logf(" &test.revisionHistoryLimit: %d", test.revisionHistoryLimit)
 | 
							t.Logf(" &test.revisionHistoryLimit: %d", test.revisionHistoryLimit)
 | 
				
			||||||
		d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
 | 
							d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
 | 
				
			||||||
		controller.cleanupDeployment(test.oldRSs, d)
 | 
							controller.cleanupDeployment(context.TODO(), test.oldRSs, d)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		gotDeletions := 0
 | 
							gotDeletions := 0
 | 
				
			||||||
		for _, action := range fake.Actions() {
 | 
							for _, action := range fake.Actions() {
 | 
				
			||||||
@@ -565,7 +566,7 @@ func TestDeploymentController_cleanupDeploymentOrder(t *testing.T) {
 | 
				
			|||||||
		informers.Start(stopCh)
 | 
							informers.Start(stopCh)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
 | 
							d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
 | 
				
			||||||
		controller.cleanupDeployment(test.oldRSs, d)
 | 
							controller.cleanupDeployment(context.TODO(), test.oldRSs, d)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		deletedRSs := sets.String{}
 | 
							deletedRSs := sets.String{}
 | 
				
			||||||
		for _, action := range fake.Actions() {
 | 
							for _, action := range fake.Actions() {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -545,7 +545,7 @@ func (jm *Controller) getPodsForJob(j *batch.Job, withFinalizers bool) ([]*v1.Po
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	// If any adoptions are attempted, we should first recheck for deletion
 | 
						// If any adoptions are attempted, we should first recheck for deletion
 | 
				
			||||||
	// with an uncached quorum read sometime after listing Pods (see #42639).
 | 
						// with an uncached quorum read sometime after listing Pods (see #42639).
 | 
				
			||||||
	canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
 | 
						canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
 | 
				
			||||||
		fresh, err := jm.kubeClient.BatchV1().Jobs(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
 | 
							fresh, err := jm.kubeClient.BatchV1().Jobs(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
@@ -561,7 +561,7 @@ func (jm *Controller) getPodsForJob(j *batch.Job, withFinalizers bool) ([]*v1.Po
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	cm := controller.NewPodControllerRefManager(jm.podControl, j, selector, controllerKind, canAdoptFunc, finalizers...)
 | 
						cm := controller.NewPodControllerRefManager(jm.podControl, j, selector, controllerKind, canAdoptFunc, finalizers...)
 | 
				
			||||||
	// When adopting Pods, this operation adds an ownerRef and finalizers.
 | 
						// When adopting Pods, this operation adds an ownerRef and finalizers.
 | 
				
			||||||
	pods, err = cm.ClaimPods(pods)
 | 
						pods, err = cm.ClaimPods(context.TODO(), pods)
 | 
				
			||||||
	if err != nil || !withFinalizers {
 | 
						if err != nil || !withFinalizers {
 | 
				
			||||||
		return pods, err
 | 
							return pods, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -88,7 +88,7 @@ type ReplicaSetController struct {
 | 
				
			|||||||
	// It resumes normal action after observing the watch events for them.
 | 
						// It resumes normal action after observing the watch events for them.
 | 
				
			||||||
	burstReplicas int
 | 
						burstReplicas int
 | 
				
			||||||
	// To allow injection of syncReplicaSet for testing.
 | 
						// To allow injection of syncReplicaSet for testing.
 | 
				
			||||||
	syncHandler func(rsKey string) error
 | 
						syncHandler func(ctx context.Context, rsKey string) error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// A TTLCache of pod creates/deletes each rc expects to see.
 | 
						// A TTLCache of pod creates/deletes each rc expects to see.
 | 
				
			||||||
	expectations *controller.UIDTrackingControllerExpectations
 | 
						expectations *controller.UIDTrackingControllerExpectations
 | 
				
			||||||
@@ -178,7 +178,7 @@ func (rsc *ReplicaSetController) SetEventRecorder(recorder record.EventRecorder)
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run begins watching and syncing.
 | 
					// Run begins watching and syncing.
 | 
				
			||||||
func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
 | 
					func (rsc *ReplicaSetController) Run(ctx context.Context, workers int) {
 | 
				
			||||||
	defer utilruntime.HandleCrash()
 | 
						defer utilruntime.HandleCrash()
 | 
				
			||||||
	defer rsc.queue.ShutDown()
 | 
						defer rsc.queue.ShutDown()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -186,15 +186,15 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
 | 
				
			|||||||
	klog.Infof("Starting %v controller", controllerName)
 | 
						klog.Infof("Starting %v controller", controllerName)
 | 
				
			||||||
	defer klog.Infof("Shutting down %v controller", controllerName)
 | 
						defer klog.Infof("Shutting down %v controller", controllerName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !cache.WaitForNamedCacheSync(rsc.Kind, stopCh, rsc.podListerSynced, rsc.rsListerSynced) {
 | 
						if !cache.WaitForNamedCacheSync(rsc.Kind, ctx.Done(), rsc.podListerSynced, rsc.rsListerSynced) {
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := 0; i < workers; i++ {
 | 
						for i := 0; i < workers; i++ {
 | 
				
			||||||
		go wait.Until(rsc.worker, time.Second, stopCh)
 | 
							go wait.UntilWithContext(ctx, rsc.worker, time.Second)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	<-stopCh
 | 
						<-ctx.Done()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// getReplicaSetsWithSameController returns a list of ReplicaSets with the same
 | 
					// getReplicaSetsWithSameController returns a list of ReplicaSets with the same
 | 
				
			||||||
@@ -515,19 +515,19 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
 | 
					// worker runs a worker thread that just dequeues items, processes them, and marks them done.
 | 
				
			||||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
 | 
					// It enforces that the syncHandler is never invoked concurrently with the same key.
 | 
				
			||||||
func (rsc *ReplicaSetController) worker() {
 | 
					func (rsc *ReplicaSetController) worker(ctx context.Context) {
 | 
				
			||||||
	for rsc.processNextWorkItem() {
 | 
						for rsc.processNextWorkItem(ctx) {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (rsc *ReplicaSetController) processNextWorkItem() bool {
 | 
					func (rsc *ReplicaSetController) processNextWorkItem(ctx context.Context) bool {
 | 
				
			||||||
	key, quit := rsc.queue.Get()
 | 
						key, quit := rsc.queue.Get()
 | 
				
			||||||
	if quit {
 | 
						if quit {
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	defer rsc.queue.Done(key)
 | 
						defer rsc.queue.Done(key)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err := rsc.syncHandler(key.(string))
 | 
						err := rsc.syncHandler(ctx, key.(string))
 | 
				
			||||||
	if err == nil {
 | 
						if err == nil {
 | 
				
			||||||
		rsc.queue.Forget(key)
 | 
							rsc.queue.Forget(key)
 | 
				
			||||||
		return true
 | 
							return true
 | 
				
			||||||
@@ -647,7 +647,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
 | 
				
			|||||||
// syncReplicaSet will sync the ReplicaSet with the given key if it has had its expectations fulfilled,
 | 
					// syncReplicaSet will sync the ReplicaSet with the given key if it has had its expectations fulfilled,
 | 
				
			||||||
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
 | 
					// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
 | 
				
			||||||
// invoked concurrently with the same key.
 | 
					// invoked concurrently with the same key.
 | 
				
			||||||
func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
 | 
					func (rsc *ReplicaSetController) syncReplicaSet(ctx context.Context, key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		klog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
 | 
							klog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
 | 
				
			||||||
@@ -686,7 +686,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// NOTE: filteredPods are pointing to objects from cache - if you need to
 | 
						// NOTE: filteredPods are pointing to objects from cache - if you need to
 | 
				
			||||||
	// modify them, you need to copy it first.
 | 
						// modify them, you need to copy it first.
 | 
				
			||||||
	filteredPods, err = rsc.claimPods(rs, selector, filteredPods)
 | 
						filteredPods, err = rsc.claimPods(ctx, rs, selector, filteredPods)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -714,11 +714,11 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
 | 
				
			|||||||
	return manageReplicasErr
 | 
						return manageReplicasErr
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
 | 
					func (rsc *ReplicaSetController) claimPods(ctx context.Context, rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
 | 
				
			||||||
	// If any adoptions are attempted, we should first recheck for deletion with
 | 
						// If any adoptions are attempted, we should first recheck for deletion with
 | 
				
			||||||
	// an uncached quorum read sometime after listing Pods (see #42639).
 | 
						// an uncached quorum read sometime after listing Pods (see #42639).
 | 
				
			||||||
	canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
 | 
						canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
 | 
				
			||||||
		fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(context.TODO(), rs.Name, metav1.GetOptions{})
 | 
							fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(ctx, rs.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -728,7 +728,7 @@ func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.
 | 
				
			|||||||
		return fresh, nil
 | 
							return fresh, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, rsc.GroupVersionKind, canAdoptFunc)
 | 
						cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, rsc.GroupVersionKind, canAdoptFunc)
 | 
				
			||||||
	return cm.ClaimPods(filteredPods)
 | 
						return cm.ClaimPods(ctx, filteredPods)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// slowStartBatch tries to call the provided function a total of 'count' times,
 | 
					// slowStartBatch tries to call the provided function a total of 'count' times,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -185,12 +185,12 @@ func processSync(rsc *ReplicaSetController, key string) error {
 | 
				
			|||||||
		rsc.syncHandler = oldSyncHandler
 | 
							rsc.syncHandler = oldSyncHandler
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	var syncErr error
 | 
						var syncErr error
 | 
				
			||||||
	rsc.syncHandler = func(key string) error {
 | 
						rsc.syncHandler = func(ctx context.Context, key string) error {
 | 
				
			||||||
		syncErr = oldSyncHandler(key)
 | 
							syncErr = oldSyncHandler(ctx, key)
 | 
				
			||||||
		return syncErr
 | 
							return syncErr
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rsc.queue.Add(key)
 | 
						rsc.queue.Add(key)
 | 
				
			||||||
	rsc.processNextWorkItem()
 | 
						rsc.processNextWorkItem(context.TODO())
 | 
				
			||||||
	return syncErr
 | 
						return syncErr
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -224,7 +224,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
 | 
				
			|||||||
	newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
 | 
						newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
	err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
						err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -240,7 +240,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
				
			|||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	received := make(chan string)
 | 
						received := make(chan string)
 | 
				
			||||||
	manager.syncHandler = func(key string) error {
 | 
						manager.syncHandler = func(ctx context.Context, key string) error {
 | 
				
			||||||
		received <- key
 | 
							received <- key
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -253,7 +253,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
				
			|||||||
	pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
 | 
						pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
 | 
				
			||||||
	manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
 | 
						manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go manager.worker()
 | 
						go manager.worker(context.TODO())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	expected := GetKey(rsSpec, t)
 | 
						expected := GetKey(rsSpec, t)
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
@@ -282,7 +282,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) {
 | 
				
			|||||||
	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
 | 
						informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rs, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
	err := validateSyncReplicaSet(&fakePodControl, fakePodControl.CreateLimit, 0, 0)
 | 
						err := validateSyncReplicaSet(&fakePodControl, fakePodControl.CreateLimit, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -324,7 +324,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
 | 
				
			|||||||
	rsSpec.Status.Replicas = 1
 | 
						rsSpec.Status.Replicas = 1
 | 
				
			||||||
	rsSpec.Status.ReadyReplicas = 1
 | 
						rsSpec.Status.ReadyReplicas = 1
 | 
				
			||||||
	rsSpec.Status.AvailableReplicas = 1
 | 
						rsSpec.Status.AvailableReplicas = 1
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
	err := validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
						err := validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -335,7 +335,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
 | 
				
			|||||||
	rsSpec.Status.ReadyReplicas = 0
 | 
						rsSpec.Status.ReadyReplicas = 0
 | 
				
			||||||
	rsSpec.Status.AvailableReplicas = 0
 | 
						rsSpec.Status.AvailableReplicas = 0
 | 
				
			||||||
	fakePodControl.Clear()
 | 
						fakePodControl.Clear()
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
	err = validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
						err = validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -356,7 +356,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
 | 
				
			|||||||
	fakePodControl.Clear()
 | 
						fakePodControl.Clear()
 | 
				
			||||||
	fakePodControl.Err = fmt.Errorf("fake Error")
 | 
						fakePodControl.Err = fmt.Errorf("fake Error")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
	err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
						err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -365,7 +365,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
 | 
				
			|||||||
	// This replica should not need a Lowering of expectations, since the previous create failed
 | 
						// This replica should not need a Lowering of expectations, since the previous create failed
 | 
				
			||||||
	fakePodControl.Clear()
 | 
						fakePodControl.Clear()
 | 
				
			||||||
	fakePodControl.Err = nil
 | 
						fakePodControl.Err = nil
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
	err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
						err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -600,7 +600,7 @@ func TestWatchControllers(t *testing.T) {
 | 
				
			|||||||
	// The update sent through the fakeWatcher should make its way into the workqueue,
 | 
						// The update sent through the fakeWatcher should make its way into the workqueue,
 | 
				
			||||||
	// and eventually into the syncHandler. The handler validates the received controller
 | 
						// and eventually into the syncHandler. The handler validates the received controller
 | 
				
			||||||
	// and closes the received channel to indicate that the test can finish.
 | 
						// and closes the received channel to indicate that the test can finish.
 | 
				
			||||||
	manager.syncHandler = func(key string) error {
 | 
						manager.syncHandler = func(ctx context.Context, key string) error {
 | 
				
			||||||
		obj, exists, err := informers.Apps().V1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
 | 
							obj, exists, err := informers.Apps().V1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
 | 
				
			||||||
		if !exists || err != nil {
 | 
							if !exists || err != nil {
 | 
				
			||||||
			t.Errorf("Expected to find replica set under key %v", key)
 | 
								t.Errorf("Expected to find replica set under key %v", key)
 | 
				
			||||||
@@ -614,7 +614,7 @@ func TestWatchControllers(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	// Start only the ReplicaSet watcher and the workqueue, send a watch event,
 | 
						// Start only the ReplicaSet watcher and the workqueue, send a watch event,
 | 
				
			||||||
	// and make sure it hits the sync method.
 | 
						// and make sure it hits the sync method.
 | 
				
			||||||
	go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
 | 
						go wait.UntilWithContext(context.TODO(), manager.worker, 10*time.Millisecond)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	testRSSpec.Name = "foo"
 | 
						testRSSpec.Name = "foo"
 | 
				
			||||||
	fakeWatch.Add(&testRSSpec)
 | 
						fakeWatch.Add(&testRSSpec)
 | 
				
			||||||
@@ -645,7 +645,7 @@ func TestWatchPods(t *testing.T) {
 | 
				
			|||||||
	received := make(chan string)
 | 
						received := make(chan string)
 | 
				
			||||||
	// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
 | 
						// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
 | 
				
			||||||
	// send it into the syncHandler.
 | 
						// send it into the syncHandler.
 | 
				
			||||||
	manager.syncHandler = func(key string) error {
 | 
						manager.syncHandler = func(ctx context.Context, key string) error {
 | 
				
			||||||
		namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
							namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("Error splitting key: %v", err)
 | 
								t.Errorf("Error splitting key: %v", err)
 | 
				
			||||||
@@ -664,7 +664,7 @@ func TestWatchPods(t *testing.T) {
 | 
				
			|||||||
	// Start only the pod watcher and the workqueue, send a watch event,
 | 
						// Start only the pod watcher and the workqueue, send a watch event,
 | 
				
			||||||
	// and make sure it hits the sync method for the right ReplicaSet.
 | 
						// and make sure it hits the sync method for the right ReplicaSet.
 | 
				
			||||||
	go informers.Core().V1().Pods().Informer().Run(stopCh)
 | 
						go informers.Core().V1().Pods().Informer().Run(stopCh)
 | 
				
			||||||
	go manager.Run(1, stopCh)
 | 
						go manager.Run(context.TODO(), 1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
 | 
						pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
 | 
				
			||||||
	testPod := pods.Items[0]
 | 
						testPod := pods.Items[0]
 | 
				
			||||||
@@ -685,7 +685,7 @@ func TestUpdatePods(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	received := make(chan string)
 | 
						received := make(chan string)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	manager.syncHandler = func(key string) error {
 | 
						manager.syncHandler = func(ctx context.Context, key string) error {
 | 
				
			||||||
		namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
							namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("Error splitting key: %v", err)
 | 
								t.Errorf("Error splitting key: %v", err)
 | 
				
			||||||
@@ -698,7 +698,7 @@ func TestUpdatePods(t *testing.T) {
 | 
				
			|||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
 | 
						go wait.UntilWithContext(context.TODO(), manager.worker, 10*time.Millisecond)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Put 2 ReplicaSets and one pod into the informers
 | 
						// Put 2 ReplicaSets and one pod into the informers
 | 
				
			||||||
	labelMap1 := map[string]string{"foo": "bar"}
 | 
						labelMap1 := map[string]string{"foo": "bar"}
 | 
				
			||||||
@@ -829,7 +829,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
 | 
				
			|||||||
	// Enqueue once. Then process it. Disable rate-limiting for this.
 | 
						// Enqueue once. Then process it. Disable rate-limiting for this.
 | 
				
			||||||
	manager.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter())
 | 
						manager.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter())
 | 
				
			||||||
	manager.enqueueRS(rs)
 | 
						manager.enqueueRS(rs)
 | 
				
			||||||
	manager.processNextWorkItem()
 | 
						manager.processNextWorkItem(context.TODO())
 | 
				
			||||||
	// It should have been requeued.
 | 
						// It should have been requeued.
 | 
				
			||||||
	if got, want := manager.queue.Len(), 1; got != want {
 | 
						if got, want := manager.queue.Len(), 1; got != want {
 | 
				
			||||||
		t.Errorf("queue.Len() = %v, want %v", got, want)
 | 
							t.Errorf("queue.Len() = %v, want %v", got, want)
 | 
				
			||||||
@@ -909,7 +909,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
 | 
				
			|||||||
		informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
 | 
							informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for i := 0; i < numReplicas; i += burstReplicas {
 | 
							for i := 0; i < numReplicas; i += burstReplicas {
 | 
				
			||||||
			manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
								manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// The store accrues active pods. It's also used by the ReplicaSet to determine how many
 | 
								// The store accrues active pods. It's also used by the ReplicaSet to determine how many
 | 
				
			||||||
			// replicas to create.
 | 
								// replicas to create.
 | 
				
			||||||
@@ -988,7 +988,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			// Check that the ReplicaSet didn't take any action for all the above pods
 | 
								// Check that the ReplicaSet didn't take any action for all the above pods
 | 
				
			||||||
			fakePodControl.Clear()
 | 
								fakePodControl.Clear()
 | 
				
			||||||
			manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
								manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
			err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
								err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Fatal(err)
 | 
									t.Fatal(err)
 | 
				
			||||||
@@ -1075,7 +1075,7 @@ func TestRSSyncExpectations(t *testing.T) {
 | 
				
			|||||||
			informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
 | 
								informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rsSpec, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rsSpec, t))
 | 
				
			||||||
	err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
						err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -1095,7 +1095,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			|||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// This should set expectations for the ReplicaSet
 | 
						// This should set expectations for the ReplicaSet
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rs, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
	err := validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
						err := validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -1116,7 +1116,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
 | 
						informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
 | 
				
			||||||
	manager.deleteRS(rs)
 | 
						manager.deleteRS(rs)
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rs, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	_, exists, err = manager.expectations.GetExpectations(rsKey)
 | 
						_, exists, err = manager.expectations.GetExpectations(rsKey)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -1129,7 +1129,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			|||||||
	// This should have no effect, since we've deleted the ReplicaSet.
 | 
						// This should have no effect, since we've deleted the ReplicaSet.
 | 
				
			||||||
	podExp.Add(-1, 0)
 | 
						podExp.Add(-1, 0)
 | 
				
			||||||
	informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
 | 
						informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
 | 
				
			||||||
	manager.syncReplicaSet(GetKey(rs, t))
 | 
						manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
	err = validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
						err = validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -1171,7 +1171,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
 | 
				
			|||||||
		t.Fatalf("initial RS didn't result in new item in the queue: %v", err)
 | 
							t.Fatalf("initial RS didn't result in new item in the queue: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ok := manager.processNextWorkItem()
 | 
						ok := manager.processNextWorkItem(context.TODO())
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		t.Fatal("queue is shutting down")
 | 
							t.Fatal("queue is shutting down")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1257,7 +1257,7 @@ func TestExpectationsOnRecreate(t *testing.T) {
 | 
				
			|||||||
		t.Fatalf("Re-creating RS didn't result in new item in the queue: %v", err)
 | 
							t.Fatalf("Re-creating RS didn't result in new item in the queue: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ok = manager.processNextWorkItem()
 | 
						ok = manager.processNextWorkItem(context.TODO())
 | 
				
			||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		t.Fatal("Queue is shutting down!")
 | 
							t.Fatal("Queue is shutting down!")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1457,7 +1457,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
 | 
				
			|||||||
	pod := newPod("pod", rs, v1.PodRunning, nil, true)
 | 
						pod := newPod("pod", rs, v1.PodRunning, nil, true)
 | 
				
			||||||
	pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
 | 
						pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
 | 
				
			||||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
 | 
						informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
 | 
				
			||||||
	err := manager.syncReplicaSet(GetKey(rs, t))
 | 
						err := manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1514,7 +1514,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
 | 
				
			|||||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
 | 
						informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// no patch, no create
 | 
						// no patch, no create
 | 
				
			||||||
	err := manager.syncReplicaSet(GetKey(rs, t))
 | 
						err := manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1543,7 +1543,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
 | 
				
			|||||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
 | 
						informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// sync should abort.
 | 
						// sync should abort.
 | 
				
			||||||
	err := manager.syncReplicaSet(GetKey(rs, t))
 | 
						err := manager.syncReplicaSet(context.TODO(), GetKey(rs, t))
 | 
				
			||||||
	if err == nil {
 | 
						if err == nil {
 | 
				
			||||||
		t.Error("syncReplicaSet() err = nil, expected non-nil")
 | 
							t.Error("syncReplicaSet() err = nil, expected non-nil")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -41,7 +41,7 @@ import (
 | 
				
			|||||||
type StatefulPodControlInterface interface {
 | 
					type StatefulPodControlInterface interface {
 | 
				
			||||||
	// CreateStatefulPod create a Pod in a StatefulSet. Any PVCs necessary for the Pod are created prior to creating
 | 
						// CreateStatefulPod create a Pod in a StatefulSet. Any PVCs necessary for the Pod are created prior to creating
 | 
				
			||||||
	// the Pod. If the returned error is nil the Pod and its PVCs have been created.
 | 
						// the Pod. If the returned error is nil the Pod and its PVCs have been created.
 | 
				
			||||||
	CreateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error
 | 
						CreateStatefulPod(ctx context.Context, set *apps.StatefulSet, pod *v1.Pod) error
 | 
				
			||||||
	// UpdateStatefulPod Updates a Pod in a StatefulSet. If the Pod already has the correct identity and stable
 | 
						// UpdateStatefulPod Updates a Pod in a StatefulSet. If the Pod already has the correct identity and stable
 | 
				
			||||||
	// storage this method is a no-op. If the Pod must be mutated to conform to the Set, it is mutated and updated.
 | 
						// storage this method is a no-op. If the Pod must be mutated to conform to the Set, it is mutated and updated.
 | 
				
			||||||
	// pod is an in-out parameter, and any updates made to the pod are reflected as mutations to this parameter. If
 | 
						// pod is an in-out parameter, and any updates made to the pod are reflected as mutations to this parameter. If
 | 
				
			||||||
@@ -72,14 +72,14 @@ type realStatefulPodControl struct {
 | 
				
			|||||||
	recorder  record.EventRecorder
 | 
						recorder  record.EventRecorder
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
 | 
					func (spc *realStatefulPodControl) CreateStatefulPod(ctx context.Context, set *apps.StatefulSet, pod *v1.Pod) error {
 | 
				
			||||||
	// Create the Pod's PVCs prior to creating the Pod
 | 
						// Create the Pod's PVCs prior to creating the Pod
 | 
				
			||||||
	if err := spc.createPersistentVolumeClaims(set, pod); err != nil {
 | 
						if err := spc.createPersistentVolumeClaims(set, pod); err != nil {
 | 
				
			||||||
		spc.recordPodEvent("create", set, pod, err)
 | 
							spc.recordPodEvent("create", set, pod, err)
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// If we created the PVCs attempt to create the Pod
 | 
						// If we created the PVCs attempt to create the Pod
 | 
				
			||||||
	_, err := spc.client.CoreV1().Pods(set.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
 | 
						_, err := spc.client.CoreV1().Pods(set.Namespace).Create(ctx, pod, metav1.CreateOptions{})
 | 
				
			||||||
	// sink already exists errors
 | 
						// sink already exists errors
 | 
				
			||||||
	if apierrors.IsAlreadyExists(err) {
 | 
						if apierrors.IsAlreadyExists(err) {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package statefulset
 | 
					package statefulset
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"errors"
 | 
						"errors"
 | 
				
			||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
@@ -56,7 +57,7 @@ func TestStatefulPodControlCreatesPods(t *testing.T) {
 | 
				
			|||||||
		create := action.(core.CreateAction)
 | 
							create := action.(core.CreateAction)
 | 
				
			||||||
		return true, create.GetObject(), nil
 | 
							return true, create.GetObject(), nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := control.CreateStatefulPod(set, pod); err != nil {
 | 
						if err := control.CreateStatefulPod(context.TODO(), set, pod); err != nil {
 | 
				
			||||||
		t.Errorf("StatefulPodControl failed to create Pod error: %s", err)
 | 
							t.Errorf("StatefulPodControl failed to create Pod error: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	events := collectEvents(recorder.Events)
 | 
						events := collectEvents(recorder.Events)
 | 
				
			||||||
@@ -90,7 +91,7 @@ func TestStatefulPodControlCreatePodExists(t *testing.T) {
 | 
				
			|||||||
	fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
 | 
						fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		return true, pod, apierrors.NewAlreadyExists(action.GetResource().GroupResource(), pod.Name)
 | 
							return true, pod, apierrors.NewAlreadyExists(action.GetResource().GroupResource(), pod.Name)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := control.CreateStatefulPod(set, pod); !apierrors.IsAlreadyExists(err) {
 | 
						if err := control.CreateStatefulPod(context.TODO(), set, pod); !apierrors.IsAlreadyExists(err) {
 | 
				
			||||||
		t.Errorf("Failed to create Pod error: %s", err)
 | 
							t.Errorf("Failed to create Pod error: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	events := collectEvents(recorder.Events)
 | 
						events := collectEvents(recorder.Events)
 | 
				
			||||||
@@ -117,7 +118,7 @@ func TestStatefulPodControlCreatePodPvcCreateFailure(t *testing.T) {
 | 
				
			|||||||
		create := action.(core.CreateAction)
 | 
							create := action.(core.CreateAction)
 | 
				
			||||||
		return true, create.GetObject(), nil
 | 
							return true, create.GetObject(), nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := control.CreateStatefulPod(set, pod); err == nil {
 | 
						if err := control.CreateStatefulPod(context.TODO(), set, pod); err == nil {
 | 
				
			||||||
		t.Error("Failed to produce error on PVC creation failure")
 | 
							t.Error("Failed to produce error on PVC creation failure")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	events := collectEvents(recorder.Events)
 | 
						events := collectEvents(recorder.Events)
 | 
				
			||||||
@@ -153,7 +154,7 @@ func TestStatefulPodControlCreatePodPvcDeleting(t *testing.T) {
 | 
				
			|||||||
		create := action.(core.CreateAction)
 | 
							create := action.(core.CreateAction)
 | 
				
			||||||
		return true, create.GetObject(), nil
 | 
							return true, create.GetObject(), nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := control.CreateStatefulPod(set, pod); err == nil {
 | 
						if err := control.CreateStatefulPod(context.TODO(), set, pod); err == nil {
 | 
				
			||||||
		t.Error("Failed to produce error on deleting PVC")
 | 
							t.Error("Failed to produce error on deleting PVC")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	events := collectEvents(recorder.Events)
 | 
						events := collectEvents(recorder.Events)
 | 
				
			||||||
@@ -191,7 +192,7 @@ func TestStatefulPodControlCreatePodPvcGetFailure(t *testing.T) {
 | 
				
			|||||||
		create := action.(core.CreateAction)
 | 
							create := action.(core.CreateAction)
 | 
				
			||||||
		return true, create.GetObject(), nil
 | 
							return true, create.GetObject(), nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := control.CreateStatefulPod(set, pod); err == nil {
 | 
						if err := control.CreateStatefulPod(context.TODO(), set, pod); err == nil {
 | 
				
			||||||
		t.Error("Failed to produce error on PVC creation failure")
 | 
							t.Error("Failed to produce error on PVC creation failure")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	events := collectEvents(recorder.Events)
 | 
						events := collectEvents(recorder.Events)
 | 
				
			||||||
@@ -220,7 +221,7 @@ func TestStatefulPodControlCreatePodFailed(t *testing.T) {
 | 
				
			|||||||
	fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
 | 
						fakeClient.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		return true, nil, apierrors.NewInternalError(errors.New("API server down"))
 | 
							return true, nil, apierrors.NewInternalError(errors.New("API server down"))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := control.CreateStatefulPod(set, pod); err == nil {
 | 
						if err := control.CreateStatefulPod(context.TODO(), set, pod); err == nil {
 | 
				
			||||||
		t.Error("Failed to produce error on Pod creation failure")
 | 
							t.Error("Failed to produce error on Pod creation failure")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	events := collectEvents(recorder.Events)
 | 
						events := collectEvents(recorder.Events)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -141,22 +141,22 @@ func NewStatefulSetController(
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run runs the statefulset controller.
 | 
					// Run runs the statefulset controller.
 | 
				
			||||||
func (ssc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
 | 
					func (ssc *StatefulSetController) Run(ctx context.Context, workers int) {
 | 
				
			||||||
	defer utilruntime.HandleCrash()
 | 
						defer utilruntime.HandleCrash()
 | 
				
			||||||
	defer ssc.queue.ShutDown()
 | 
						defer ssc.queue.ShutDown()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	klog.Infof("Starting stateful set controller")
 | 
						klog.Infof("Starting stateful set controller")
 | 
				
			||||||
	defer klog.Infof("Shutting down statefulset controller")
 | 
						defer klog.Infof("Shutting down statefulset controller")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !cache.WaitForNamedCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) {
 | 
						if !cache.WaitForNamedCacheSync("stateful set", ctx.Done(), ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) {
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := 0; i < workers; i++ {
 | 
						for i := 0; i < workers; i++ {
 | 
				
			||||||
		go wait.Until(ssc.worker, time.Second, stopCh)
 | 
							go wait.UntilWithContext(ctx, ssc.worker, time.Second)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	<-stopCh
 | 
						<-ctx.Done()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// addPod adds the statefulset for the pod to the sync queue
 | 
					// addPod adds the statefulset for the pod to the sync queue
 | 
				
			||||||
@@ -287,7 +287,7 @@ func (ssc *StatefulSetController) deletePod(obj interface{}) {
 | 
				
			|||||||
//
 | 
					//
 | 
				
			||||||
// NOTE: Returned Pods are pointers to objects from the cache.
 | 
					// NOTE: Returned Pods are pointers to objects from the cache.
 | 
				
			||||||
//       If you need to modify one, you need to copy it first.
 | 
					//       If you need to modify one, you need to copy it first.
 | 
				
			||||||
func (ssc *StatefulSetController) getPodsForStatefulSet(set *apps.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) {
 | 
					func (ssc *StatefulSetController) getPodsForStatefulSet(ctx context.Context, set *apps.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) {
 | 
				
			||||||
	// List all pods to include the pods that don't match the selector anymore but
 | 
						// List all pods to include the pods that don't match the selector anymore but
 | 
				
			||||||
	// has a ControllerRef pointing to this StatefulSet.
 | 
						// has a ControllerRef pointing to this StatefulSet.
 | 
				
			||||||
	pods, err := ssc.podLister.Pods(set.Namespace).List(labels.Everything())
 | 
						pods, err := ssc.podLister.Pods(set.Namespace).List(labels.Everything())
 | 
				
			||||||
@@ -300,15 +300,15 @@ func (ssc *StatefulSetController) getPodsForStatefulSet(set *apps.StatefulSet, s
 | 
				
			|||||||
		return isMemberOf(set, pod)
 | 
							return isMemberOf(set, pod)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cm := controller.NewPodControllerRefManager(ssc.podControl, set, selector, controllerKind, ssc.canAdoptFunc(set))
 | 
						cm := controller.NewPodControllerRefManager(ssc.podControl, set, selector, controllerKind, ssc.canAdoptFunc(ctx, set))
 | 
				
			||||||
	return cm.ClaimPods(pods, filter)
 | 
						return cm.ClaimPods(ctx, pods, filter)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// If any adoptions are attempted, we should first recheck for deletion with
 | 
					// If any adoptions are attempted, we should first recheck for deletion with
 | 
				
			||||||
// an uncached quorum read sometime after listing Pods/ControllerRevisions (see #42639).
 | 
					// an uncached quorum read sometime after listing Pods/ControllerRevisions (see #42639).
 | 
				
			||||||
func (ssc *StatefulSetController) canAdoptFunc(set *apps.StatefulSet) func() error {
 | 
					func (ssc *StatefulSetController) canAdoptFunc(ctx context.Context, set *apps.StatefulSet) func(ctx2 context.Context) error {
 | 
				
			||||||
	return controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
 | 
						return controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
 | 
				
			||||||
		fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(context.TODO(), set.Name, metav1.GetOptions{})
 | 
							fresh, err := ssc.kubeClient.AppsV1().StatefulSets(set.Namespace).Get(ctx, set.Name, metav1.GetOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -320,7 +320,7 @@ func (ssc *StatefulSetController) canAdoptFunc(set *apps.StatefulSet) func() err
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// adoptOrphanRevisions adopts any orphaned ControllerRevisions matched by set's Selector.
 | 
					// adoptOrphanRevisions adopts any orphaned ControllerRevisions matched by set's Selector.
 | 
				
			||||||
func (ssc *StatefulSetController) adoptOrphanRevisions(set *apps.StatefulSet) error {
 | 
					func (ssc *StatefulSetController) adoptOrphanRevisions(ctx context.Context, set *apps.StatefulSet) error {
 | 
				
			||||||
	revisions, err := ssc.control.ListRevisions(set)
 | 
						revisions, err := ssc.control.ListRevisions(set)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
@@ -332,7 +332,7 @@ func (ssc *StatefulSetController) adoptOrphanRevisions(set *apps.StatefulSet) er
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if len(orphanRevisions) > 0 {
 | 
						if len(orphanRevisions) > 0 {
 | 
				
			||||||
		canAdoptErr := ssc.canAdoptFunc(set)()
 | 
							canAdoptErr := ssc.canAdoptFunc(ctx, set)(ctx)
 | 
				
			||||||
		if canAdoptErr != nil {
 | 
							if canAdoptErr != nil {
 | 
				
			||||||
			return fmt.Errorf("can't adopt ControllerRevisions: %v", canAdoptErr)
 | 
								return fmt.Errorf("can't adopt ControllerRevisions: %v", canAdoptErr)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -403,13 +403,13 @@ func (ssc *StatefulSetController) enqueueSSAfter(ss *apps.StatefulSet, duration
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// processNextWorkItem dequeues items, processes them, and marks them done. It enforces that the syncHandler is never
 | 
					// processNextWorkItem dequeues items, processes them, and marks them done. It enforces that the syncHandler is never
 | 
				
			||||||
// invoked concurrently with the same key.
 | 
					// invoked concurrently with the same key.
 | 
				
			||||||
func (ssc *StatefulSetController) processNextWorkItem() bool {
 | 
					func (ssc *StatefulSetController) processNextWorkItem(ctx context.Context) bool {
 | 
				
			||||||
	key, quit := ssc.queue.Get()
 | 
						key, quit := ssc.queue.Get()
 | 
				
			||||||
	if quit {
 | 
						if quit {
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	defer ssc.queue.Done(key)
 | 
						defer ssc.queue.Done(key)
 | 
				
			||||||
	if err := ssc.sync(key.(string)); err != nil {
 | 
						if err := ssc.sync(ctx, key.(string)); err != nil {
 | 
				
			||||||
		utilruntime.HandleError(fmt.Errorf("error syncing StatefulSet %v, requeuing: %v", key.(string), err))
 | 
							utilruntime.HandleError(fmt.Errorf("error syncing StatefulSet %v, requeuing: %v", key.(string), err))
 | 
				
			||||||
		ssc.queue.AddRateLimited(key)
 | 
							ssc.queue.AddRateLimited(key)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
@@ -419,13 +419,13 @@ func (ssc *StatefulSetController) processNextWorkItem() bool {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// worker runs a worker goroutine that invokes processNextWorkItem until the controller's queue is closed
 | 
					// worker runs a worker goroutine that invokes processNextWorkItem until the controller's queue is closed
 | 
				
			||||||
func (ssc *StatefulSetController) worker() {
 | 
					func (ssc *StatefulSetController) worker(ctx context.Context) {
 | 
				
			||||||
	for ssc.processNextWorkItem() {
 | 
						for ssc.processNextWorkItem(ctx) {
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// sync syncs the given statefulset.
 | 
					// sync syncs the given statefulset.
 | 
				
			||||||
func (ssc *StatefulSetController) sync(key string) error {
 | 
					func (ssc *StatefulSetController) sync(ctx context.Context, key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		klog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
 | 
							klog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
@@ -452,25 +452,25 @@ func (ssc *StatefulSetController) sync(key string) error {
 | 
				
			|||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if err := ssc.adoptOrphanRevisions(set); err != nil {
 | 
						if err := ssc.adoptOrphanRevisions(ctx, set); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pods, err := ssc.getPodsForStatefulSet(set, selector)
 | 
						pods, err := ssc.getPodsForStatefulSet(ctx, set, selector)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ssc.syncStatefulSet(set, pods)
 | 
						return ssc.syncStatefulSet(ctx, set, pods)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// syncStatefulSet syncs a tuple of (statefulset, []*v1.Pod).
 | 
					// syncStatefulSet syncs a tuple of (statefulset, []*v1.Pod).
 | 
				
			||||||
func (ssc *StatefulSetController) syncStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error {
 | 
					func (ssc *StatefulSetController) syncStatefulSet(ctx context.Context, set *apps.StatefulSet, pods []*v1.Pod) error {
 | 
				
			||||||
	klog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
 | 
						klog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods))
 | 
				
			||||||
	var status *apps.StatefulSetStatus
 | 
						var status *apps.StatefulSetStatus
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
	// TODO: investigate where we mutate the set during the update as it is not obvious.
 | 
						// TODO: investigate where we mutate the set during the update as it is not obvious.
 | 
				
			||||||
	status, err = ssc.control.UpdateStatefulSet(set.DeepCopy(), pods)
 | 
						status, err = ssc.control.UpdateStatefulSet(ctx, set.DeepCopy(), pods)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package statefulset
 | 
					package statefulset
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"sort"
 | 
						"sort"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	apps "k8s.io/api/apps/v1"
 | 
						apps "k8s.io/api/apps/v1"
 | 
				
			||||||
@@ -38,7 +39,7 @@ type StatefulSetControlInterface interface {
 | 
				
			|||||||
	// If an implementation returns a non-nil error, the invocation will be retried using a rate-limited strategy.
 | 
						// If an implementation returns a non-nil error, the invocation will be retried using a rate-limited strategy.
 | 
				
			||||||
	// Implementors should sink any errors that they do not wish to trigger a retry, and they may feel free to
 | 
						// Implementors should sink any errors that they do not wish to trigger a retry, and they may feel free to
 | 
				
			||||||
	// exit exceptionally at any point provided they wish the update to be re-run at a later point in time.
 | 
						// exit exceptionally at any point provided they wish the update to be re-run at a later point in time.
 | 
				
			||||||
	UpdateStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) (*apps.StatefulSetStatus, error)
 | 
						UpdateStatefulSet(ctx context.Context, set *apps.StatefulSet, pods []*v1.Pod) (*apps.StatefulSetStatus, error)
 | 
				
			||||||
	// ListRevisions returns a array of the ControllerRevisions that represent the revisions of set. If the returned
 | 
						// ListRevisions returns a array of the ControllerRevisions that represent the revisions of set. If the returned
 | 
				
			||||||
	// error is nil, the returns slice of ControllerRevisions is valid.
 | 
						// error is nil, the returns slice of ControllerRevisions is valid.
 | 
				
			||||||
	ListRevisions(set *apps.StatefulSet) ([]*apps.ControllerRevision, error)
 | 
						ListRevisions(set *apps.StatefulSet) ([]*apps.ControllerRevision, error)
 | 
				
			||||||
@@ -73,7 +74,7 @@ type defaultStatefulSetControl struct {
 | 
				
			|||||||
// strategy allows these constraints to be relaxed - pods will be created and deleted eagerly and
 | 
					// strategy allows these constraints to be relaxed - pods will be created and deleted eagerly and
 | 
				
			||||||
// in no particular order. Clients using the burst strategy should be careful to ensure they
 | 
					// in no particular order. Clients using the burst strategy should be careful to ensure they
 | 
				
			||||||
// understand the consistency implications of having unpredictable numbers of pods available.
 | 
					// understand the consistency implications of having unpredictable numbers of pods available.
 | 
				
			||||||
func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) (*apps.StatefulSetStatus, error) {
 | 
					func (ssc *defaultStatefulSetControl) UpdateStatefulSet(ctx context.Context, set *apps.StatefulSet, pods []*v1.Pod) (*apps.StatefulSetStatus, error) {
 | 
				
			||||||
	// list all revisions and sort them
 | 
						// list all revisions and sort them
 | 
				
			||||||
	revisions, err := ssc.ListRevisions(set)
 | 
						revisions, err := ssc.ListRevisions(set)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -81,7 +82,7 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	history.SortControllerRevisions(revisions)
 | 
						history.SortControllerRevisions(revisions)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	currentRevision, updateRevision, status, err := ssc.performUpdate(set, pods, revisions)
 | 
						currentRevision, updateRevision, status, err := ssc.performUpdate(ctx, set, pods, revisions)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, utilerrors.NewAggregate([]error{err, ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)})
 | 
							return nil, utilerrors.NewAggregate([]error{err, ssc.truncateHistory(set, pods, revisions, currentRevision, updateRevision)})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -91,7 +92,7 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (ssc *defaultStatefulSetControl) performUpdate(
 | 
					func (ssc *defaultStatefulSetControl) performUpdate(
 | 
				
			||||||
	set *apps.StatefulSet, pods []*v1.Pod, revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, *apps.StatefulSetStatus, error) {
 | 
						ctx context.Context, set *apps.StatefulSet, pods []*v1.Pod, revisions []*apps.ControllerRevision) (*apps.ControllerRevision, *apps.ControllerRevision, *apps.StatefulSetStatus, error) {
 | 
				
			||||||
	var currentStatus *apps.StatefulSetStatus
 | 
						var currentStatus *apps.StatefulSetStatus
 | 
				
			||||||
	// get the current, and update revisions
 | 
						// get the current, and update revisions
 | 
				
			||||||
	currentRevision, updateRevision, collisionCount, err := ssc.getStatefulSetRevisions(set, revisions)
 | 
						currentRevision, updateRevision, collisionCount, err := ssc.getStatefulSetRevisions(set, revisions)
 | 
				
			||||||
@@ -100,12 +101,12 @@ func (ssc *defaultStatefulSetControl) performUpdate(
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// perform the main update function and get the status
 | 
						// perform the main update function and get the status
 | 
				
			||||||
	currentStatus, err = ssc.updateStatefulSet(set, currentRevision, updateRevision, collisionCount, pods)
 | 
						currentStatus, err = ssc.updateStatefulSet(ctx, set, currentRevision, updateRevision, collisionCount, pods)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return currentRevision, updateRevision, currentStatus, err
 | 
							return currentRevision, updateRevision, currentStatus, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// update the set's status
 | 
						// update the set's status
 | 
				
			||||||
	err = ssc.updateStatefulSetStatus(set, currentStatus)
 | 
						err = ssc.updateStatefulSetStatus(ctx, set, currentStatus)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return currentRevision, updateRevision, currentStatus, err
 | 
							return currentRevision, updateRevision, currentStatus, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -268,6 +269,7 @@ func (ssc *defaultStatefulSetControl) getStatefulSetRevisions(
 | 
				
			|||||||
// Pods must be at Status.UpdateRevision. If the returned error is nil, the returned StatefulSetStatus is valid and the
 | 
					// Pods must be at Status.UpdateRevision. If the returned error is nil, the returned StatefulSetStatus is valid and the
 | 
				
			||||||
// update must be recorded. If the error is not nil, the method should be retried until successful.
 | 
					// update must be recorded. If the error is not nil, the method should be retried until successful.
 | 
				
			||||||
func (ssc *defaultStatefulSetControl) updateStatefulSet(
 | 
					func (ssc *defaultStatefulSetControl) updateStatefulSet(
 | 
				
			||||||
 | 
						ctx context.Context,
 | 
				
			||||||
	set *apps.StatefulSet,
 | 
						set *apps.StatefulSet,
 | 
				
			||||||
	currentRevision *apps.ControllerRevision,
 | 
						currentRevision *apps.ControllerRevision,
 | 
				
			||||||
	updateRevision *apps.ControllerRevision,
 | 
						updateRevision *apps.ControllerRevision,
 | 
				
			||||||
@@ -416,7 +418,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		// If we find a Pod that has not been created we create the Pod
 | 
							// If we find a Pod that has not been created we create the Pod
 | 
				
			||||||
		if !isCreated(replicas[i]) {
 | 
							if !isCreated(replicas[i]) {
 | 
				
			||||||
			if err := ssc.podControl.CreateStatefulPod(set, replicas[i]); err != nil {
 | 
								if err := ssc.podControl.CreateStatefulPod(ctx, set, replicas[i]); err != nil {
 | 
				
			||||||
				return &status, err
 | 
									return &status, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			status.Replicas++
 | 
								status.Replicas++
 | 
				
			||||||
@@ -579,6 +581,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet(
 | 
				
			|||||||
// mutated to indicate completion. If status is semantically equivalent to set's Status no update is performed. If the
 | 
					// mutated to indicate completion. If status is semantically equivalent to set's Status no update is performed. If the
 | 
				
			||||||
// returned error is nil, the update is successful.
 | 
					// returned error is nil, the update is successful.
 | 
				
			||||||
func (ssc *defaultStatefulSetControl) updateStatefulSetStatus(
 | 
					func (ssc *defaultStatefulSetControl) updateStatefulSetStatus(
 | 
				
			||||||
 | 
						ctx context.Context,
 | 
				
			||||||
	set *apps.StatefulSet,
 | 
						set *apps.StatefulSet,
 | 
				
			||||||
	status *apps.StatefulSetStatus) error {
 | 
						status *apps.StatefulSetStatus) error {
 | 
				
			||||||
	// complete any in progress rolling update if necessary
 | 
						// complete any in progress rolling update if necessary
 | 
				
			||||||
@@ -591,7 +594,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSetStatus(
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// copy set and update its status
 | 
						// copy set and update its status
 | 
				
			||||||
	set = set.DeepCopy()
 | 
						set = set.DeepCopy()
 | 
				
			||||||
	if err := ssc.statusUpdater.UpdateStatefulSetStatus(set, status); err != nil {
 | 
						if err := ssc.statusUpdater.UpdateStatefulSetStatus(ctx, set, status); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package statefulset
 | 
					package statefulset
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"errors"
 | 
						"errors"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"math/rand"
 | 
						"math/rand"
 | 
				
			||||||
@@ -229,7 +230,7 @@ func ReplacesPods(t *testing.T, set *apps.StatefulSet, invariants invariantFunc)
 | 
				
			|||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Error(err)
 | 
								t.Error(err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if _, err = ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
							if _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
			t.Errorf("Failed to update StatefulSet : %s", err)
 | 
								t.Errorf("Failed to update StatefulSet : %s", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
							set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
@@ -239,7 +240,7 @@ func ReplacesPods(t *testing.T, set *apps.StatefulSet, invariants invariantFunc)
 | 
				
			|||||||
		if pods, err = spc.setPodRunning(set, i); err != nil {
 | 
							if pods, err = spc.setPodRunning(set, i); err != nil {
 | 
				
			||||||
			t.Error(err)
 | 
								t.Error(err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if _, err = ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
							if _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
			t.Errorf("Failed to update StatefulSet : %s", err)
 | 
								t.Errorf("Failed to update StatefulSet : %s", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
							set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
@@ -254,7 +255,7 @@ func ReplacesPods(t *testing.T, set *apps.StatefulSet, invariants invariantFunc)
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Error(err)
 | 
							t.Error(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
		t.Errorf("Failed to update StatefulSet : %s", err)
 | 
							t.Errorf("Failed to update StatefulSet : %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
						set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
@@ -278,7 +279,7 @@ func RecreatesFailedPod(t *testing.T, set *apps.StatefulSet, invariants invarian
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Error(err)
 | 
							t.Error(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
		t.Errorf("Error updating StatefulSet %s", err)
 | 
							t.Errorf("Error updating StatefulSet %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := invariants(set, spc); err != nil {
 | 
						if err := invariants(set, spc); err != nil {
 | 
				
			||||||
@@ -290,7 +291,7 @@ func RecreatesFailedPod(t *testing.T, set *apps.StatefulSet, invariants invarian
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	pods[0].Status.Phase = v1.PodFailed
 | 
						pods[0].Status.Phase = v1.PodFailed
 | 
				
			||||||
	spc.podsIndexer.Update(pods[0])
 | 
						spc.podsIndexer.Update(pods[0])
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
		t.Errorf("Error updating StatefulSet %s", err)
 | 
							t.Errorf("Error updating StatefulSet %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := invariants(set, spc); err != nil {
 | 
						if err := invariants(set, spc); err != nil {
 | 
				
			||||||
@@ -371,7 +372,7 @@ func UpdatePodFailure(t *testing.T, set *apps.StatefulSet, invariants invariantF
 | 
				
			|||||||
	spc.podsIndexer.Update(pods[0])
 | 
						spc.podsIndexer.Update(pods[0])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// now it should fail
 | 
						// now it should fail
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil && isOrHasInternalError(err) {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil && isOrHasInternalError(err) {
 | 
				
			||||||
		t.Errorf("StatefulSetControl did not return InternalError found %s", err)
 | 
							t.Errorf("StatefulSetControl did not return InternalError found %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -417,7 +418,7 @@ func PodRecreateDeleteFailure(t *testing.T, set *apps.StatefulSet, invariants in
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Error(err)
 | 
							t.Error(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
		t.Errorf("Error updating StatefulSet %s", err)
 | 
							t.Errorf("Error updating StatefulSet %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := invariants(set, spc); err != nil {
 | 
						if err := invariants(set, spc); err != nil {
 | 
				
			||||||
@@ -430,13 +431,13 @@ func PodRecreateDeleteFailure(t *testing.T, set *apps.StatefulSet, invariants in
 | 
				
			|||||||
	pods[0].Status.Phase = v1.PodFailed
 | 
						pods[0].Status.Phase = v1.PodFailed
 | 
				
			||||||
	spc.podsIndexer.Update(pods[0])
 | 
						spc.podsIndexer.Update(pods[0])
 | 
				
			||||||
	spc.SetDeleteStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0)
 | 
						spc.SetDeleteStatefulPodError(apierrors.NewInternalError(errors.New("API server failed")), 0)
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil && isOrHasInternalError(err) {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil && isOrHasInternalError(err) {
 | 
				
			||||||
		t.Errorf("StatefulSet failed to %s", err)
 | 
							t.Errorf("StatefulSet failed to %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := invariants(set, spc); err != nil {
 | 
						if err := invariants(set, spc); err != nil {
 | 
				
			||||||
		t.Error(err)
 | 
							t.Error(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
						if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
		t.Errorf("Error updating StatefulSet %s", err)
 | 
							t.Errorf("Error updating StatefulSet %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := invariants(set, spc); err != nil {
 | 
						if err := invariants(set, spc); err != nil {
 | 
				
			||||||
@@ -1282,7 +1283,7 @@ func TestStatefulSetControlLimitsHistory(t *testing.T) {
 | 
				
			|||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Fatalf("%s: %s", test.name, err)
 | 
									t.Fatalf("%s: %s", test.name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			_, err = ssc.UpdateStatefulSet(set, pods)
 | 
								_, err = ssc.UpdateStatefulSet(context.TODO(), set, pods)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Fatalf("%s: %s", test.name, err)
 | 
									t.Fatalf("%s: %s", test.name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -1629,7 +1630,7 @@ func TestStatefulSetAvailability(t *testing.T) {
 | 
				
			|||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Fatalf("%s: %s", test.name, err)
 | 
								t.Fatalf("%s: %s", test.name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		status, err := ssc.UpdateStatefulSet(set, pods)
 | 
							status, err := ssc.UpdateStatefulSet(context.TODO(), set, pods)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Fatalf("%s: %s", test.name, err)
 | 
								t.Fatalf("%s: %s", test.name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -1824,7 +1825,7 @@ func (spc *fakeStatefulPodControl) setPodTerminated(set *apps.StatefulSet, ordin
 | 
				
			|||||||
	return spc.podsLister.Pods(set.Namespace).List(selector)
 | 
						return spc.podsLister.Pods(set.Namespace).List(selector)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (spc *fakeStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error {
 | 
					func (spc *fakeStatefulPodControl) CreateStatefulPod(ctx context.Context, set *apps.StatefulSet, pod *v1.Pod) error {
 | 
				
			||||||
	defer spc.createPodTracker.inc()
 | 
						defer spc.createPodTracker.inc()
 | 
				
			||||||
	if spc.createPodTracker.errorReady() {
 | 
						if spc.createPodTracker.errorReady() {
 | 
				
			||||||
		defer spc.createPodTracker.reset()
 | 
							defer spc.createPodTracker.reset()
 | 
				
			||||||
@@ -1890,7 +1891,7 @@ func newFakeStatefulSetStatusUpdater(setInformer appsinformers.StatefulSetInform
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (ssu *fakeStatefulSetStatusUpdater) UpdateStatefulSetStatus(set *apps.StatefulSet, status *apps.StatefulSetStatus) error {
 | 
					func (ssu *fakeStatefulSetStatusUpdater) UpdateStatefulSetStatus(ctx context.Context, set *apps.StatefulSet, status *apps.StatefulSetStatus) error {
 | 
				
			||||||
	defer ssu.updateStatusTracker.inc()
 | 
						defer ssu.updateStatusTracker.inc()
 | 
				
			||||||
	if ssu.updateStatusTracker.errorReady() {
 | 
						if ssu.updateStatusTracker.errorReady() {
 | 
				
			||||||
		defer ssu.updateStatusTracker.reset()
 | 
							defer ssu.updateStatusTracker.reset()
 | 
				
			||||||
@@ -2089,7 +2090,7 @@ func scaleUpStatefulSetControl(set *apps.StatefulSet,
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// run the controller once and check invariants
 | 
							// run the controller once and check invariants
 | 
				
			||||||
		_, err = ssc.UpdateStatefulSet(set, pods)
 | 
							_, err = ssc.UpdateStatefulSet(context.TODO(), set, pods)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -2117,7 +2118,7 @@ func scaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlIn
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		sort.Sort(ascendingOrdinal(pods))
 | 
							sort.Sort(ascendingOrdinal(pods))
 | 
				
			||||||
		if ordinal := len(pods) - 1; ordinal >= 0 {
 | 
							if ordinal := len(pods) - 1; ordinal >= 0 {
 | 
				
			||||||
			if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
								if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
				return err
 | 
									return err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
								set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
@@ -2127,7 +2128,7 @@ func scaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlIn
 | 
				
			|||||||
			if pods, err = spc.addTerminatingPod(set, ordinal); err != nil {
 | 
								if pods, err = spc.addTerminatingPod(set, ordinal); err != nil {
 | 
				
			||||||
				return err
 | 
									return err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if _, err = ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
								if _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
				return err
 | 
									return err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
								set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
@@ -2144,7 +2145,7 @@ func scaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlIn
 | 
				
			|||||||
				spc.podsIndexer.Delete(pods[len(pods)-1])
 | 
									spc.podsIndexer.Delete(pods[len(pods)-1])
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if _, err := ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
							if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
							set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
@@ -2207,7 +2208,7 @@ func updateStatefulSetControl(set *apps.StatefulSet,
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if _, err = ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
						if _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -2255,7 +2256,7 @@ func updateStatefulSetControl(set *apps.StatefulSet,
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if _, err = ssc.UpdateStatefulSet(set, pods); err != nil {
 | 
							if _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
							set, err = spc.setsLister.StatefulSets(set.Namespace).Get(set.Name)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -33,7 +33,7 @@ import (
 | 
				
			|||||||
type StatefulSetStatusUpdaterInterface interface {
 | 
					type StatefulSetStatusUpdaterInterface interface {
 | 
				
			||||||
	// UpdateStatefulSetStatus sets the set's Status to status. Implementations are required to retry on conflicts,
 | 
						// UpdateStatefulSetStatus sets the set's Status to status. Implementations are required to retry on conflicts,
 | 
				
			||||||
	// but fail on other errors. If the returned error is nil set's Status has been successfully set to status.
 | 
						// but fail on other errors. If the returned error is nil set's Status has been successfully set to status.
 | 
				
			||||||
	UpdateStatefulSetStatus(set *apps.StatefulSet, status *apps.StatefulSetStatus) error
 | 
						UpdateStatefulSetStatus(ctx context.Context, set *apps.StatefulSet, status *apps.StatefulSetStatus) error
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewRealStatefulSetStatusUpdater returns a StatefulSetStatusUpdaterInterface that updates the Status of a StatefulSet,
 | 
					// NewRealStatefulSetStatusUpdater returns a StatefulSetStatusUpdaterInterface that updates the Status of a StatefulSet,
 | 
				
			||||||
@@ -50,11 +50,13 @@ type realStatefulSetStatusUpdater struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
 | 
					func (ssu *realStatefulSetStatusUpdater) UpdateStatefulSetStatus(
 | 
				
			||||||
 | 
						ctx context.Context,
 | 
				
			||||||
	set *apps.StatefulSet,
 | 
						set *apps.StatefulSet,
 | 
				
			||||||
	status *apps.StatefulSetStatus) error {
 | 
						status *apps.StatefulSetStatus) error {
 | 
				
			||||||
	// don't wait due to limited number of clients, but backoff after the default number of steps
 | 
						// don't wait due to limited number of clients, but backoff after the default number of steps
 | 
				
			||||||
	return retry.RetryOnConflict(retry.DefaultRetry, func() error {
 | 
						return retry.RetryOnConflict(retry.DefaultRetry, func() error {
 | 
				
			||||||
		set.Status = *status
 | 
							set.Status = *status
 | 
				
			||||||
 | 
							// TODO: This context.TODO should use a real context once we have RetryOnConflictWithContext
 | 
				
			||||||
		_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set, metav1.UpdateOptions{})
 | 
							_, updateErr := ssu.client.AppsV1().StatefulSets(set.Namespace).UpdateStatus(context.TODO(), set, metav1.UpdateOptions{})
 | 
				
			||||||
		if updateErr == nil {
 | 
							if updateErr == nil {
 | 
				
			||||||
			return nil
 | 
								return nil
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ limitations under the License.
 | 
				
			|||||||
package statefulset
 | 
					package statefulset
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"errors"
 | 
						"errors"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -41,7 +42,7 @@ func TestStatefulSetUpdaterUpdatesSetStatus(t *testing.T) {
 | 
				
			|||||||
		update := action.(core.UpdateAction)
 | 
							update := action.(core.UpdateAction)
 | 
				
			||||||
		return true, update.GetObject(), nil
 | 
							return true, update.GetObject(), nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
 | 
						if err := updater.UpdateStatefulSetStatus(context.TODO(), set, &status); err != nil {
 | 
				
			||||||
		t.Errorf("Error returned on successful status update: %s", err)
 | 
							t.Errorf("Error returned on successful status update: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if set.Status.Replicas != 2 {
 | 
						if set.Status.Replicas != 2 {
 | 
				
			||||||
@@ -62,7 +63,7 @@ func TestStatefulSetStatusUpdaterUpdatesObservedGeneration(t *testing.T) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		return true, sts, nil
 | 
							return true, sts, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
 | 
						if err := updater.UpdateStatefulSetStatus(context.TODO(), set, &status); err != nil {
 | 
				
			||||||
		t.Errorf("Error returned on successful status update: %s", err)
 | 
							t.Errorf("Error returned on successful status update: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -78,7 +79,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasFailure(t *testing.T) {
 | 
				
			|||||||
	fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
 | 
						fakeClient.AddReactor("update", "statefulsets", func(action core.Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		return true, nil, apierrors.NewInternalError(errors.New("API server down"))
 | 
							return true, nil, apierrors.NewInternalError(errors.New("API server down"))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
 | 
						if err := updater.UpdateStatefulSetStatus(context.TODO(), set, &status); err == nil {
 | 
				
			||||||
		t.Error("Failed update did not return error")
 | 
							t.Error("Failed update did not return error")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -101,7 +102,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflict(t *testing.T) {
 | 
				
			|||||||
		return true, update.GetObject(), nil
 | 
							return true, update.GetObject(), nil
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
 | 
						if err := updater.UpdateStatefulSetStatus(context.TODO(), set, &status); err != nil {
 | 
				
			||||||
		t.Errorf("UpdateStatefulSetStatus returned an error: %s", err)
 | 
							t.Errorf("UpdateStatefulSetStatus returned an error: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if set.Status.Replicas != 2 {
 | 
						if set.Status.Replicas != 2 {
 | 
				
			||||||
@@ -121,7 +122,7 @@ func TestStatefulSetStatusUpdaterUpdateReplicasConflictFailure(t *testing.T) {
 | 
				
			|||||||
		update := action.(core.UpdateAction)
 | 
							update := action.(core.UpdateAction)
 | 
				
			||||||
		return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists"))
 | 
							return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), set.Name, errors.New("object already exists"))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := updater.UpdateStatefulSetStatus(set, &status); err == nil {
 | 
						if err := updater.UpdateStatefulSetStatus(context.TODO(), set, &status); err == nil {
 | 
				
			||||||
		t.Error("UpdateStatefulSetStatus failed to return an error on get failure")
 | 
							t.Error("UpdateStatefulSetStatus failed to return an error on get failure")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -136,7 +137,7 @@ func TestStatefulSetStatusUpdaterGetAvailableReplicas(t *testing.T) {
 | 
				
			|||||||
		update := action.(core.UpdateAction)
 | 
							update := action.(core.UpdateAction)
 | 
				
			||||||
		return true, update.GetObject(), nil
 | 
							return true, update.GetObject(), nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := updater.UpdateStatefulSetStatus(set, &status); err != nil {
 | 
						if err := updater.UpdateStatefulSetStatus(context.TODO(), set, &status); err != nil {
 | 
				
			||||||
		t.Errorf("Error returned on successful status update: %s", err)
 | 
							t.Errorf("Error returned on successful status update: %s", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if set.Status.AvailableReplicas != 3 {
 | 
						if set.Status.AvailableReplicas != 3 {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,6 +18,7 @@ package statefulset
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"bytes"
 | 
						"bytes"
 | 
				
			||||||
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"sort"
 | 
						"sort"
 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
@@ -106,7 +107,7 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Error(err)
 | 
							t.Error(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	ssc.syncStatefulSet(set, pods)
 | 
						ssc.syncStatefulSet(context.TODO(), set, pods)
 | 
				
			||||||
	selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
 | 
						selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Error(err)
 | 
							t.Error(err)
 | 
				
			||||||
@@ -555,7 +556,7 @@ func TestGetPodsForStatefulSetAdopt(t *testing.T) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pods, err := ssc.getPodsForStatefulSet(set, selector)
 | 
						pods, err := ssc.getPodsForStatefulSet(context.TODO(), set, selector)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("getPodsForStatefulSet() error: %v", err)
 | 
							t.Fatalf("getPodsForStatefulSet() error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -592,7 +593,7 @@ func TestAdoptOrphanRevisions(t *testing.T) {
 | 
				
			|||||||
	spc.revisionsIndexer.Add(ss1Rev1)
 | 
						spc.revisionsIndexer.Add(ss1Rev1)
 | 
				
			||||||
	spc.revisionsIndexer.Add(ss1Rev2)
 | 
						spc.revisionsIndexer.Add(ss1Rev2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = ssc.adoptOrphanRevisions(ss1)
 | 
						err = ssc.adoptOrphanRevisions(context.TODO(), ss1)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Errorf("adoptOrphanRevisions() error: %v", err)
 | 
							t.Errorf("adoptOrphanRevisions() error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -634,7 +635,7 @@ func TestGetPodsForStatefulSetRelease(t *testing.T) {
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pods, err := ssc.getPodsForStatefulSet(set, selector)
 | 
						pods, err := ssc.getPodsForStatefulSet(context.TODO(), set, selector)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("getPodsForStatefulSet() error: %v", err)
 | 
							t.Fatalf("getPodsForStatefulSet() error: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -673,7 +674,7 @@ func newFakeStatefulSetController(initialObjects ...runtime.Object) (*StatefulSe
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func fakeWorker(ssc *StatefulSetController) {
 | 
					func fakeWorker(ssc *StatefulSetController) {
 | 
				
			||||||
	if obj, done := ssc.queue.Get(); !done {
 | 
						if obj, done := ssc.queue.Get(); !done {
 | 
				
			||||||
		ssc.sync(obj.(string))
 | 
							ssc.sync(context.TODO(), obj.(string))
 | 
				
			||||||
		ssc.queue.Done(obj)
 | 
							ssc.queue.Done(obj)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -438,7 +438,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
 | 
				
			|||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ds := newDaemonSet("foo", ns.Name)
 | 
							ds := newDaemonSet("foo", ns.Name)
 | 
				
			||||||
		ds.Spec.UpdateStrategy = *strategy
 | 
							ds.Spec.UpdateStrategy = *strategy
 | 
				
			||||||
@@ -474,7 +474,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
 | 
				
			|||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Start Scheduler
 | 
							// Start Scheduler
 | 
				
			||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -510,7 +510,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
 | 
				
			|||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Start Scheduler
 | 
							// Start Scheduler
 | 
				
			||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -579,7 +579,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
 | 
				
			|||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Start Scheduler
 | 
							// Start Scheduler
 | 
				
			||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -626,7 +626,7 @@ func TestInsufficientCapacityNode(t *testing.T) {
 | 
				
			|||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Start Scheduler
 | 
							// Start Scheduler
 | 
				
			||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -689,7 +689,7 @@ func TestLaunchWithHashCollision(t *testing.T) {
 | 
				
			|||||||
	defer cancel()
 | 
						defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	informers.Start(ctx.Done())
 | 
						informers.Start(ctx.Done())
 | 
				
			||||||
	go dc.Run(5, ctx.Done())
 | 
						go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Start Scheduler
 | 
						// Start Scheduler
 | 
				
			||||||
	setupScheduler(ctx, t, clientset, informers)
 | 
						setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -799,7 +799,7 @@ func TestTaintedNode(t *testing.T) {
 | 
				
			|||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Start Scheduler
 | 
							// Start Scheduler
 | 
				
			||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -864,7 +864,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
 | 
				
			|||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		informers.Start(ctx.Done())
 | 
							informers.Start(ctx.Done())
 | 
				
			||||||
		go dc.Run(5, ctx.Done())
 | 
							go dc.Run(ctx, 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Start Scheduler
 | 
							// Start Scheduler
 | 
				
			||||||
		setupScheduler(ctx, t, clientset, informers)
 | 
							setupScheduler(ctx, t, clientset, informers)
 | 
				
			||||||
@@ -872,7 +872,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
 | 
				
			|||||||
		ds := newDaemonSet("foo", ns.Name)
 | 
							ds := newDaemonSet("foo", ns.Name)
 | 
				
			||||||
		ds.Spec.UpdateStrategy = *strategy
 | 
							ds.Spec.UpdateStrategy = *strategy
 | 
				
			||||||
		ds.Spec.Template.Spec.HostNetwork = true
 | 
							ds.Spec.Template.Spec.HostNetwork = true
 | 
				
			||||||
		_, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
 | 
							_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Fatalf("Failed to create DaemonSet: %v", err)
 | 
								t.Fatalf("Failed to create DaemonSet: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -889,7 +889,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		_, err = nodeClient.Create(context.TODO(), node, metav1.CreateOptions{})
 | 
							_, err = nodeClient.Create(ctx, node, metav1.CreateOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Fatalf("Failed to create node: %v", err)
 | 
								t.Fatalf("Failed to create node: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -907,7 +907,7 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		_, err = nodeClient.Create(context.TODO(), nodeNU, metav1.CreateOptions{})
 | 
							_, err = nodeClient.Create(ctx, nodeNU, metav1.CreateOptions{})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Fatalf("Failed to create node: %v", err)
 | 
								t.Fatalf("Failed to create node: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -57,8 +57,8 @@ func TestNewDeployment(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the Deployment to be updated to revision 1
 | 
						// Wait for the Deployment to be updated to revision 1
 | 
				
			||||||
	if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
						if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
				
			||||||
@@ -124,8 +124,8 @@ func TestDeploymentRollingUpdate(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	replicas := int32(20)
 | 
						replicas := int32(20)
 | 
				
			||||||
	tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
 | 
						tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)}
 | 
				
			||||||
@@ -267,8 +267,8 @@ func TestPausedDeployment(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that the paused deployment won't create new replica set.
 | 
						// Verify that the paused deployment won't create new replica set.
 | 
				
			||||||
	if err := tester.expectNoNewReplicaSet(); err != nil {
 | 
						if err := tester.expectNoNewReplicaSet(); err != nil {
 | 
				
			||||||
@@ -368,8 +368,8 @@ func TestScalePausedDeployment(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the Deployment to be updated to revision 1
 | 
						// Wait for the Deployment to be updated to revision 1
 | 
				
			||||||
	if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
						if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
				
			||||||
@@ -449,8 +449,8 @@ func TestDeploymentHashCollision(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the Deployment to be updated to revision 1
 | 
						// Wait for the Deployment to be updated to revision 1
 | 
				
			||||||
	if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
						if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
				
			||||||
@@ -552,8 +552,8 @@ func TestFailedDeployment(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
 | 
						if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
@@ -594,8 +594,8 @@ func TestOverlappingDeployments(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create 2 deployments with overlapping selectors
 | 
						// Create 2 deployments with overlapping selectors
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
@@ -668,8 +668,8 @@ func TestScaledRolloutDeployment(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create a deployment with rolling update strategy, max surge = 3, and max unavailable = 2
 | 
						// Create a deployment with rolling update strategy, max surge = 3, and max unavailable = 2
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
@@ -871,8 +871,8 @@ func TestSpecReplicasChange(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Scale up/down deployment and verify its replicaset has matching .spec.replicas
 | 
						// Scale up/down deployment and verify its replicaset has matching .spec.replicas
 | 
				
			||||||
	if err = tester.scaleDeployment(2); err != nil {
 | 
						if err = tester.scaleDeployment(2); err != nil {
 | 
				
			||||||
@@ -929,8 +929,8 @@ func TestDeploymentAvailableCondition(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the deployment to be observed by the controller and has at least specified number of updated replicas
 | 
						// Wait for the deployment to be observed by the controller and has at least specified number of updated replicas
 | 
				
			||||||
	if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
 | 
						if err = tester.waitForDeploymentUpdatedReplicasGTE(replicas); err != nil {
 | 
				
			||||||
@@ -1046,8 +1046,8 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the Deployment to be updated to revision 1
 | 
						// Wait for the Deployment to be updated to revision 1
 | 
				
			||||||
	if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
						if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
				
			||||||
@@ -1138,8 +1138,8 @@ func TestDeploymentScaleSubresource(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the Deployment to be updated to revision 1
 | 
						// Wait for the Deployment to be updated to revision 1
 | 
				
			||||||
	if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
						if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
				
			||||||
@@ -1182,8 +1182,8 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	defer close(stopCh)
 | 
						defer close(stopCh)
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	go dc.Run(5, stopCh)
 | 
						go dc.Run(context.TODO(), 5)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the Deployment to be updated to revision 1
 | 
						// Wait for the Deployment to be updated to revision 1
 | 
				
			||||||
	if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
						if err := tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -106,7 +106,7 @@ func TestQuota(t *testing.T) {
 | 
				
			|||||||
		replicationcontroller.BurstReplicas,
 | 
							replicationcontroller.BurstReplicas,
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
						rm.SetEventRecorder(&record.FakeRecorder{})
 | 
				
			||||||
	go rm.Run(3, controllerCh)
 | 
						go rm.Run(context.TODO(), 3)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
						discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
				
			||||||
	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
						listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
				
			||||||
@@ -337,7 +337,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
				
			|||||||
		replicationcontroller.BurstReplicas,
 | 
							replicationcontroller.BurstReplicas,
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
						rm.SetEventRecorder(&record.FakeRecorder{})
 | 
				
			||||||
	go rm.Run(3, controllerCh)
 | 
						go rm.Run(context.TODO(), 3)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
						discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
				
			||||||
	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
						listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
				
			||||||
@@ -467,7 +467,7 @@ func TestQuotaLimitService(t *testing.T) {
 | 
				
			|||||||
		replicationcontroller.BurstReplicas,
 | 
							replicationcontroller.BurstReplicas,
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
						rm.SetEventRecorder(&record.FakeRecorder{})
 | 
				
			||||||
	go rm.Run(3, controllerCh)
 | 
						go rm.Run(context.TODO(), 3)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
						discoveryFunc := clientset.Discovery().ServerPreferredNamespacedResources
 | 
				
			||||||
	listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
						listerFuncForResource := generic.ListerFuncForResourceFunc(informers.ForResource)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -156,7 +156,7 @@ func runControllerAndInformers(t *testing.T, rm *replicaset.ReplicaSetController
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum)
 | 
						waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	return stopCh
 | 
						return stopCh
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -137,7 +137,7 @@ func runControllerAndInformers(t *testing.T, rm *replication.ReplicationManager,
 | 
				
			|||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum)
 | 
						waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum)
 | 
				
			||||||
	go rm.Run(5, stopCh)
 | 
						go rm.Run(context.TODO(), 5)
 | 
				
			||||||
	return stopCh
 | 
						return stopCh
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -185,7 +185,7 @@ func scSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *statefulset.
 | 
				
			|||||||
func runControllerAndInformers(sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) chan struct{} {
 | 
					func runControllerAndInformers(sc *statefulset.StatefulSetController, informers informers.SharedInformerFactory) chan struct{} {
 | 
				
			||||||
	stopCh := make(chan struct{})
 | 
						stopCh := make(chan struct{})
 | 
				
			||||||
	informers.Start(stopCh)
 | 
						informers.Start(stopCh)
 | 
				
			||||||
	go sc.Run(5, stopCh)
 | 
						go sc.Run(context.TODO(), 5)
 | 
				
			||||||
	return stopCh
 | 
						return stopCh
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user