mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 12:18:16 +00:00 
			
		
		
		
	Enable Garbage collection by default for RS and RC
This commit is contained in:
		@@ -60,7 +60,6 @@ func startReplicationController(ctx ControllerContext) (bool, error) {
 | 
			
		||||
		ctx.ClientBuilder.ClientOrDie("replication-controller"),
 | 
			
		||||
		replicationcontroller.BurstReplicas,
 | 
			
		||||
		int(ctx.Options.LookupCacheSizeForRC),
 | 
			
		||||
		ctx.Options.EnableGarbageCollector,
 | 
			
		||||
	).Run(int(ctx.Options.ConcurrentRCSyncs), ctx.Stop)
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -64,7 +64,6 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) {
 | 
			
		||||
		ctx.ClientBuilder.ClientOrDie("replicaset-controller"),
 | 
			
		||||
		replicaset.BurstReplicas,
 | 
			
		||||
		int(ctx.Options.LookupCacheSizeForRS),
 | 
			
		||||
		ctx.Options.EnableGarbageCollector,
 | 
			
		||||
	).Run(int(ctx.Options.ConcurrentRSSyncs), ctx.Stop)
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -94,14 +94,10 @@ type ReplicaSetController struct {
 | 
			
		||||
 | 
			
		||||
	// Controllers that need to be synced
 | 
			
		||||
	queue workqueue.RateLimitingInterface
 | 
			
		||||
 | 
			
		||||
	// garbageCollectorEnabled denotes if the garbage collector is enabled. RC
 | 
			
		||||
	// manager behaves differently if GC is enabled.
 | 
			
		||||
	garbageCollectorEnabled bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewReplicaSetController configures a replica set controller with the specified event recorder
 | 
			
		||||
func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
 | 
			
		||||
func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
 | 
			
		||||
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
 | 
			
		||||
		metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter())
 | 
			
		||||
	}
 | 
			
		||||
@@ -118,7 +114,6 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer,
 | 
			
		||||
		burstReplicas: burstReplicas,
 | 
			
		||||
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
 | 
			
		||||
		queue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicaset"),
 | 
			
		||||
		garbageCollectorEnabled: garbageCollectorEnabled,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
 | 
			
		||||
@@ -487,7 +482,6 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte
 | 
			
		||||
				defer wg.Done()
 | 
			
		||||
				var err error
 | 
			
		||||
 | 
			
		||||
				if rsc.garbageCollectorEnabled {
 | 
			
		||||
				var trueVar = true
 | 
			
		||||
				controllerRef := &metav1.OwnerReference{
 | 
			
		||||
					APIVersion: getRSKind().GroupVersion().String(),
 | 
			
		||||
@@ -497,9 +491,6 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte
 | 
			
		||||
					Controller: &trueVar,
 | 
			
		||||
				}
 | 
			
		||||
				err = rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, controllerRef)
 | 
			
		||||
				} else {
 | 
			
		||||
					err = rsc.podControl.CreatePods(rs.Namespace, &rs.Spec.Template, rs)
 | 
			
		||||
				}
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					// Decrement the expected number of creates because the informer won't observe this pod
 | 
			
		||||
					glog.V(2).Infof("Failed creation, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name)
 | 
			
		||||
@@ -595,7 +586,6 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
 | 
			
		||||
	// modify them, you need to copy it first.
 | 
			
		||||
	// TODO: Do the List and Filter in a single pass, or use an index.
 | 
			
		||||
	var filteredPods []*v1.Pod
 | 
			
		||||
	if rsc.garbageCollectorEnabled {
 | 
			
		||||
	// list all pods to include the pods that don't match the rs`s selector
 | 
			
		||||
	// anymore but has the stale controller ref.
 | 
			
		||||
	pods, err := rsc.podLister.Pods(rs.Namespace).List(labels.Everything())
 | 
			
		||||
@@ -610,13 +600,6 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
 | 
			
		||||
		rsc.queue.Add(key)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	} else {
 | 
			
		||||
		pods, err := rsc.podLister.Pods(rs.Namespace).List(selector)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		filteredPods = controller.FilterActivePods(pods)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var manageReplicasErr error
 | 
			
		||||
	if rsNeedsSync && rs.DeletionTimestamp == nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -60,7 +60,6 @@ func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh ch
 | 
			
		||||
		client,
 | 
			
		||||
		burstReplicas,
 | 
			
		||||
		lookupCacheSize,
 | 
			
		||||
		false,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	ret.podListerSynced = alwaysReady
 | 
			
		||||
@@ -147,7 +146,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// create a pod with the given phase for the given rs (same selectors and namespace)
 | 
			
		||||
func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time) *v1.Pod {
 | 
			
		||||
func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
 | 
			
		||||
	var conditions []v1.PodCondition
 | 
			
		||||
	if status == v1.PodRunning {
 | 
			
		||||
		condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
 | 
			
		||||
@@ -156,11 +155,17 @@ func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTran
 | 
			
		||||
		}
 | 
			
		||||
		conditions = append(conditions, condition)
 | 
			
		||||
	}
 | 
			
		||||
	var controllerReference metav1.OwnerReference
 | 
			
		||||
	if properlyOwned {
 | 
			
		||||
		var trueVar = true
 | 
			
		||||
		controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
 | 
			
		||||
	}
 | 
			
		||||
	return &v1.Pod{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:            name,
 | 
			
		||||
			Namespace:       rs.Namespace,
 | 
			
		||||
			Labels:          rs.Spec.Selector.MatchLabels,
 | 
			
		||||
			OwnerReferences: []metav1.OwnerReference{controllerReference},
 | 
			
		||||
		},
 | 
			
		||||
		Status: v1.PodStatus{Phase: status, Conditions: conditions},
 | 
			
		||||
	}
 | 
			
		||||
@@ -172,7 +177,7 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[s
 | 
			
		||||
	var trueVar = true
 | 
			
		||||
	controllerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
 | 
			
		||||
	for i := 0; i < count; i++ {
 | 
			
		||||
		pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status, nil)
 | 
			
		||||
		pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status, nil, false)
 | 
			
		||||
		pod.ObjectMeta.Labels = labelMap
 | 
			
		||||
		pod.OwnerReferences = []metav1.OwnerReference{controllerReference}
 | 
			
		||||
		if store != nil {
 | 
			
		||||
@@ -532,7 +537,6 @@ func TestWatchControllers(t *testing.T) {
 | 
			
		||||
		client,
 | 
			
		||||
		BurstReplicas,
 | 
			
		||||
		0,
 | 
			
		||||
		false,
 | 
			
		||||
	)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
 | 
			
		||||
@@ -1135,12 +1139,10 @@ func TestDeletionTimestamp(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// setupManagerWithGCEnabled creates a RS manager with a fakePodControl
 | 
			
		||||
// and with garbageCollectorEnabled set to true
 | 
			
		||||
func setupManagerWithGCEnabled(stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl, informers informers.SharedInformerFactory) {
 | 
			
		||||
	c := fakeclientset.NewSimpleClientset(objs...)
 | 
			
		||||
	fakePodControl = &controller.FakePodControl{}
 | 
			
		||||
	manager, informers = testNewReplicaSetControllerFromClient(c, stopCh, BurstReplicas, 0)
 | 
			
		||||
	manager.garbageCollectorEnabled = true
 | 
			
		||||
 | 
			
		||||
	manager.podControl = fakePodControl
 | 
			
		||||
	return manager, fakePodControl, informers
 | 
			
		||||
@@ -1156,7 +1158,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
 | 
			
		||||
	var trueVar = true
 | 
			
		||||
	otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
 | 
			
		||||
	// add to podLister a matching Pod controlled by another controller. Expect no patch.
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil, true)
 | 
			
		||||
	pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
 | 
			
		||||
	err := manager.syncReplicaSet(getKey(rs, t))
 | 
			
		||||
@@ -1178,7 +1180,7 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
 | 
			
		||||
	// ref, but has an owner ref pointing to other object. Expect a patch to
 | 
			
		||||
	// take control of it.
 | 
			
		||||
	unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
 | 
			
		||||
 | 
			
		||||
@@ -1200,7 +1202,7 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
 | 
			
		||||
	// add to podLister a matching pod that has an ownerRef pointing to the rs,
 | 
			
		||||
	// but ownerRef.Controller is false. Expect a patch to take control it.
 | 
			
		||||
	rsOwnerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.OwnerReferences = []metav1.OwnerReference{rsOwnerReference}
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
 | 
			
		||||
 | 
			
		||||
@@ -1221,8 +1223,8 @@ func TestPatchPodFails(t *testing.T) {
 | 
			
		||||
	informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
 | 
			
		||||
	// add to podLister two matching pods. Expect two patches to take control
 | 
			
		||||
	// them.
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil, false))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false))
 | 
			
		||||
	// let both patches fail. The rs controller will assume it fails to take
 | 
			
		||||
	// control of the pods and requeue to try again.
 | 
			
		||||
	fakePodControl.Err = fmt.Errorf("Fake Error")
 | 
			
		||||
@@ -1249,9 +1251,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
 | 
			
		||||
	informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
 | 
			
		||||
	// add to podLister three matching pods. Expect three patches to take control
 | 
			
		||||
	// them, and later delete one of them.
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod3", rs, v1.PodRunning, nil))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil, false))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false))
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod3", rs, v1.PodRunning, nil, false))
 | 
			
		||||
	err := manager.syncReplicaSet(getKey(rs, t))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
@@ -1268,7 +1270,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
 | 
			
		||||
	manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
 | 
			
		||||
	informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
 | 
			
		||||
	// put one pod in the podLister
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.ResourceVersion = "1"
 | 
			
		||||
	var trueVar = true
 | 
			
		||||
	rsOwnerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
 | 
			
		||||
@@ -1346,7 +1348,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
 | 
			
		||||
	now := metav1.Now()
 | 
			
		||||
	rs.DeletionTimestamp = &now
 | 
			
		||||
	informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
 | 
			
		||||
	pod1 := newPod("pod1", rs, v1.PodRunning, nil)
 | 
			
		||||
	pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
 | 
			
		||||
 | 
			
		||||
	// no patch, no create
 | 
			
		||||
@@ -1425,12 +1427,12 @@ func TestAvailableReplicas(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// First pod becomes ready 20s ago
 | 
			
		||||
	moment := metav1.Time{Time: time.Now().Add(-2e10)}
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, &moment)
 | 
			
		||||
	pod := newPod("pod", rs, v1.PodRunning, &moment, true)
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
 | 
			
		||||
 | 
			
		||||
	// Second pod becomes ready now
 | 
			
		||||
	otherMoment := metav1.Now()
 | 
			
		||||
	otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment)
 | 
			
		||||
	otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment, true)
 | 
			
		||||
	informers.Core().V1().Pods().Informer().GetIndexer().Add(otherPod)
 | 
			
		||||
 | 
			
		||||
	// This response body is just so we don't err out decoding the http response
 | 
			
		||||
 
 | 
			
		||||
@@ -89,14 +89,10 @@ type ReplicationManager struct {
 | 
			
		||||
 | 
			
		||||
	// Controllers that need to be synced
 | 
			
		||||
	queue workqueue.RateLimitingInterface
 | 
			
		||||
 | 
			
		||||
	// garbageCollectorEnabled denotes if the garbage collector is enabled. RC
 | 
			
		||||
	// manager behaves differently if GC is enabled.
 | 
			
		||||
	garbageCollectorEnabled bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewReplicationManager configures a replication manager with the specified event recorder
 | 
			
		||||
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
 | 
			
		||||
func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int) *ReplicationManager {
 | 
			
		||||
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
 | 
			
		||||
		metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter())
 | 
			
		||||
	}
 | 
			
		||||
@@ -114,7 +110,6 @@ func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer cor
 | 
			
		||||
		burstReplicas: burstReplicas,
 | 
			
		||||
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
 | 
			
		||||
		queue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicationmanager"),
 | 
			
		||||
		garbageCollectorEnabled: garbageCollectorEnabled,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
 | 
			
		||||
@@ -484,7 +479,6 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*v1.Pod, rc *v1.Repl
 | 
			
		||||
			go func() {
 | 
			
		||||
				defer wg.Done()
 | 
			
		||||
				var err error
 | 
			
		||||
				if rm.garbageCollectorEnabled {
 | 
			
		||||
				var trueVar = true
 | 
			
		||||
				controllerRef := &metav1.OwnerReference{
 | 
			
		||||
					APIVersion: getRCKind().GroupVersion().String(),
 | 
			
		||||
@@ -494,9 +488,6 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*v1.Pod, rc *v1.Repl
 | 
			
		||||
					Controller: &trueVar,
 | 
			
		||||
				}
 | 
			
		||||
				err = rm.podControl.CreatePodsWithControllerRef(rc.Namespace, rc.Spec.Template, rc, controllerRef)
 | 
			
		||||
				} else {
 | 
			
		||||
					err = rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc)
 | 
			
		||||
				}
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					// Decrement the expected number of creates because the informer won't observe this pod
 | 
			
		||||
					glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
 | 
			
		||||
@@ -610,7 +601,6 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
 | 
			
		||||
	// modify them, you need to copy it first.
 | 
			
		||||
	// TODO: Do the List and Filter in a single pass, or use an index.
 | 
			
		||||
	var filteredPods []*v1.Pod
 | 
			
		||||
	if rm.garbageCollectorEnabled {
 | 
			
		||||
	// list all pods to include the pods that don't match the rc's selector
 | 
			
		||||
	// anymore but has the stale controller ref.
 | 
			
		||||
	pods, err := rm.podLister.Pods(rc.Namespace).List(labels.Everything())
 | 
			
		||||
@@ -627,15 +617,6 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
 | 
			
		||||
		rm.queue.Add(key)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	} else {
 | 
			
		||||
		pods, err := rm.podLister.Pods(rc.Namespace).List(labels.Set(rc.Spec.Selector).AsSelectorPreValidated())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			utilruntime.HandleError(fmt.Errorf("Error getting pods for rc %q: %v", key, err))
 | 
			
		||||
			rm.queue.Add(key)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		filteredPods = controller.FilterActivePods(pods)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var manageReplicasErr error
 | 
			
		||||
	if rcNeedsSync && rc.DeletionTimestamp == nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -102,7 +102,7 @@ func newReplicationController(replicas int) *v1.ReplicationController {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// create a pod with the given phase for the given rc (same selectors and namespace).
 | 
			
		||||
func newPod(name string, rc *v1.ReplicationController, status v1.PodPhase, lastTransitionTime *metav1.Time) *v1.Pod {
 | 
			
		||||
func newPod(name string, rc *v1.ReplicationController, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
 | 
			
		||||
	var conditions []v1.PodCondition
 | 
			
		||||
	if status == v1.PodRunning {
 | 
			
		||||
		condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
 | 
			
		||||
@@ -111,11 +111,18 @@ func newPod(name string, rc *v1.ReplicationController, status v1.PodPhase, lastT
 | 
			
		||||
		}
 | 
			
		||||
		conditions = append(conditions, condition)
 | 
			
		||||
	}
 | 
			
		||||
	var controllerReference metav1.OwnerReference
 | 
			
		||||
	if properlyOwned {
 | 
			
		||||
		var trueVar = true
 | 
			
		||||
		controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rc.Name, Controller: &trueVar}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &v1.Pod{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:            name,
 | 
			
		||||
			Labels:          rc.Spec.Selector,
 | 
			
		||||
			Namespace:       rc.Namespace,
 | 
			
		||||
			OwnerReferences: []metav1.OwnerReference{controllerReference},
 | 
			
		||||
		},
 | 
			
		||||
		Status: v1.PodStatus{Phase: status, Conditions: conditions},
 | 
			
		||||
	}
 | 
			
		||||
@@ -127,7 +134,7 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica
 | 
			
		||||
	var trueVar = true
 | 
			
		||||
	controllerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
 | 
			
		||||
	for i := 0; i < count; i++ {
 | 
			
		||||
		pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status, nil)
 | 
			
		||||
		pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status, nil, false)
 | 
			
		||||
		pod.OwnerReferences = []metav1.OwnerReference{controllerReference}
 | 
			
		||||
		if store != nil {
 | 
			
		||||
			store.Add(pod)
 | 
			
		||||
@@ -164,7 +171,7 @@ func NewReplicationManagerFromClient(kubeClient clientset.Interface, burstReplic
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
 | 
			
		||||
	podInformer := informerFactory.Core().V1().Pods()
 | 
			
		||||
	rcInformer := informerFactory.Core().V1().ReplicationControllers()
 | 
			
		||||
	rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize, false)
 | 
			
		||||
	rm := NewReplicationManager(podInformer, rcInformer, kubeClient, burstReplicas, lookupCacheSize)
 | 
			
		||||
	rm.podListerSynced = alwaysReady
 | 
			
		||||
	rm.rcListerSynced = alwaysReady
 | 
			
		||||
	return rm, podInformer, rcInformer
 | 
			
		||||
@@ -459,7 +466,7 @@ func TestWatchControllers(t *testing.T) {
 | 
			
		||||
	informers := informers.NewSharedInformerFactory(c, controller.NoResyncPeriodFunc())
 | 
			
		||||
	podInformer := informers.Core().V1().Pods()
 | 
			
		||||
	rcInformer := informers.Core().V1().ReplicationControllers()
 | 
			
		||||
	manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0, false)
 | 
			
		||||
	manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
 | 
			
		||||
	var testControllerSpec v1.ReplicationController
 | 
			
		||||
@@ -1114,12 +1121,11 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// setupManagerWithGCEnabled creates a RC manager with a fakePodControl and with garbageCollectorEnabled set to true
 | 
			
		||||
// setupManagerWithGCEnabled creates a RC manager with a fakePodControl
 | 
			
		||||
func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationManager, fakePodControl *controller.FakePodControl, podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer) {
 | 
			
		||||
	c := fakeclientset.NewSimpleClientset(objs...)
 | 
			
		||||
	fakePodControl = &controller.FakePodControl{}
 | 
			
		||||
	manager, podInformer, rcInformer = NewReplicationManagerFromClient(c, BurstReplicas, 0)
 | 
			
		||||
	manager.garbageCollectorEnabled = true
 | 
			
		||||
	manager.podControl = fakePodControl
 | 
			
		||||
	return manager, fakePodControl, podInformer, rcInformer
 | 
			
		||||
}
 | 
			
		||||
@@ -1131,7 +1137,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
 | 
			
		||||
	var trueVar = true
 | 
			
		||||
	otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
 | 
			
		||||
	// add to podLister a matching Pod controlled by another controller. Expect no patch.
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(pod)
 | 
			
		||||
	err := manager.syncReplicationController(getKey(rc, t))
 | 
			
		||||
@@ -1150,7 +1156,7 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
 | 
			
		||||
	// ref, but has an owner ref pointing to other object. Expect a patch to
 | 
			
		||||
	// take control of it.
 | 
			
		||||
	unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(pod)
 | 
			
		||||
 | 
			
		||||
@@ -1169,7 +1175,7 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
 | 
			
		||||
	// add to podLister a matching pod that has an ownerRef pointing to the rc,
 | 
			
		||||
	// but ownerRef.Controller is false. Expect a patch to take control it.
 | 
			
		||||
	rcOwnerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.OwnerReferences = []metav1.OwnerReference{rcOwnerReference}
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(pod)
 | 
			
		||||
 | 
			
		||||
@@ -1187,8 +1193,8 @@ func TestPatchPodFails(t *testing.T) {
 | 
			
		||||
	rcInformer.Informer().GetIndexer().Add(rc)
 | 
			
		||||
	// add to podLister two matching pods. Expect two patches to take control
 | 
			
		||||
	// them.
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil, false))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil, false))
 | 
			
		||||
	// let both patches fail. The rc manager will assume it fails to take
 | 
			
		||||
	// control of the pods and requeue to try again.
 | 
			
		||||
	fakePodControl.Err = fmt.Errorf("Fake Error")
 | 
			
		||||
@@ -1212,9 +1218,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
 | 
			
		||||
	rcInformer.Informer().GetIndexer().Add(rc)
 | 
			
		||||
	// add to podLister three matching pods. Expect three patches to take control
 | 
			
		||||
	// them, and later delete one of them.
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod3", rc, v1.PodRunning, nil))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod1", rc, v1.PodRunning, nil, false))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod2", rc, v1.PodRunning, nil, false))
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(newPod("pod3", rc, v1.PodRunning, nil, false))
 | 
			
		||||
	err := manager.syncReplicationController(getKey(rc, t))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
@@ -1228,7 +1234,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
 | 
			
		||||
	rc := newReplicationController(2)
 | 
			
		||||
	rcInformer.Informer().GetIndexer().Add(rc)
 | 
			
		||||
	// put one pod in the podLister
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil)
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, nil, false)
 | 
			
		||||
	pod.ResourceVersion = "1"
 | 
			
		||||
	var trueVar = true
 | 
			
		||||
	rcOwnerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
 | 
			
		||||
@@ -1300,7 +1306,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
 | 
			
		||||
	now := metav1.Now()
 | 
			
		||||
	rc.DeletionTimestamp = &now
 | 
			
		||||
	rcInformer.Informer().GetIndexer().Add(rc)
 | 
			
		||||
	pod1 := newPod("pod1", rc, v1.PodRunning, nil)
 | 
			
		||||
	pod1 := newPod("pod1", rc, v1.PodRunning, nil, false)
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(pod1)
 | 
			
		||||
 | 
			
		||||
	// no patch, no create
 | 
			
		||||
@@ -1371,12 +1377,12 @@ func TestAvailableReplicas(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// First pod becomes ready 20s ago
 | 
			
		||||
	moment := metav1.Time{Time: time.Now().Add(-2e10)}
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, &moment)
 | 
			
		||||
	pod := newPod("pod", rc, v1.PodRunning, &moment, true)
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(pod)
 | 
			
		||||
 | 
			
		||||
	// Second pod becomes ready now
 | 
			
		||||
	otherMoment := metav1.Now()
 | 
			
		||||
	otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment)
 | 
			
		||||
	otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment, true)
 | 
			
		||||
	podInformer.Informer().GetIndexer().Add(otherPod)
 | 
			
		||||
 | 
			
		||||
	// This response body is just so we don't err out decoding the http response
 | 
			
		||||
 
 | 
			
		||||
@@ -124,7 +124,7 @@ func NewMasterComponents(c *Config) *MasterComponents {
 | 
			
		||||
	clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}, QPS: c.QPS, Burst: c.Burst})
 | 
			
		||||
	rcStopCh := make(chan struct{})
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
 | 
			
		||||
	controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst, 4096, false)
 | 
			
		||||
	controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst, 4096)
 | 
			
		||||
 | 
			
		||||
	// TODO: Support events once we can cleanly shutdown an event recorder.
 | 
			
		||||
	controllerManager.SetEventRecorder(&record.FakeRecorder{})
 | 
			
		||||
 
 | 
			
		||||
@@ -97,7 +97,6 @@ func TestQuota(t *testing.T) {
 | 
			
		||||
		clientset,
 | 
			
		||||
		replicationcontroller.BurstReplicas,
 | 
			
		||||
		4096,
 | 
			
		||||
		false,
 | 
			
		||||
	)
 | 
			
		||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
			
		||||
	go rm.Run(3, controllerCh)
 | 
			
		||||
@@ -282,7 +281,6 @@ func TestQuotaLimitedResourceDenial(t *testing.T) {
 | 
			
		||||
		clientset,
 | 
			
		||||
		replicationcontroller.BurstReplicas,
 | 
			
		||||
		4096,
 | 
			
		||||
		false,
 | 
			
		||||
	)
 | 
			
		||||
	rm.SetEventRecorder(&record.FakeRecorder{})
 | 
			
		||||
	go rm.Run(3, controllerCh)
 | 
			
		||||
 
 | 
			
		||||
@@ -126,7 +126,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
 | 
			
		||||
	return ret, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
 | 
			
		||||
func rmSetup(t *testing.T) (*httptest.Server, *replicaset.ReplicaSetController, informers.SharedInformerFactory, clientset.Interface) {
 | 
			
		||||
	masterConfig := framework.NewIntegrationTestMasterConfig()
 | 
			
		||||
	_, s := framework.RunAMaster(masterConfig)
 | 
			
		||||
 | 
			
		||||
@@ -144,7 +144,6 @@ func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *repl
 | 
			
		||||
		clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
 | 
			
		||||
		replicaset.BurstReplicas,
 | 
			
		||||
		4096,
 | 
			
		||||
		enableGarbageCollector,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -220,7 +219,7 @@ func TestAdoption(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for i, tc := range testCases {
 | 
			
		||||
		s, rm, informers, clientSet := rmSetup(t, true)
 | 
			
		||||
		s, rm, informers, clientSet := rmSetup(t)
 | 
			
		||||
		podInformer := informers.Core().V1().Pods().Informer()
 | 
			
		||||
		ns := framework.CreateTestingNamespace(fmt.Sprintf("rs-adoption-%d", i), s, t)
 | 
			
		||||
		defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
@@ -298,7 +297,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
 | 
			
		||||
	// We have pod1, pod2 and rs. rs.spec.replicas=1. At first rs.Selector
 | 
			
		||||
	// matches pod1 only; change the selector to match pod2 as well. Verify
 | 
			
		||||
	// there is only one pod left.
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t, true)
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("rs-update-selector-to-adopt", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rs := newRS("rs", ns.Name, 1)
 | 
			
		||||
@@ -338,7 +337,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
 | 
			
		||||
	// matches pod1 and pod2; change the selector to match only pod1. Verify
 | 
			
		||||
	// that rs creates one more pod, so there are 3 pods. Also verify that
 | 
			
		||||
	// pod2's controllerRef is cleared.
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t, true)
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t)
 | 
			
		||||
	podInformer := informers.Core().V1().Pods().Informer()
 | 
			
		||||
	ns := framework.CreateTestingNamespace("rs-update-selector-to-remove-controllerref", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
@@ -385,7 +384,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
 | 
			
		||||
	// matches pod1 and pod2; change pod2's labels to non-matching. Verify
 | 
			
		||||
	// that rs creates one more pod, so there are 3 pods. Also verify that
 | 
			
		||||
	// pod2's controllerRef is cleared.
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t, true)
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("rs-update-label-to-remove-controllerref", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rs := newRS("rs", ns.Name, 2)
 | 
			
		||||
@@ -427,7 +426,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
 | 
			
		||||
	// matches pod1 only; change pod2's labels to be matching. Verify the RS
 | 
			
		||||
	// controller adopts pod2 and delete one of them, so there is only 1 pod
 | 
			
		||||
	// left.
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t, true)
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("rs-update-label-to-be-adopted", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rs := newRS("rs", ns.Name, 1)
 | 
			
		||||
 
 | 
			
		||||
@@ -123,7 +123,7 @@ func verifyRemainingObjects(t *testing.T, clientSet clientset.Interface, namespa
 | 
			
		||||
	return ret, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
 | 
			
		||||
func rmSetup(t *testing.T, stopCh chan struct{}) (*httptest.Server, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) {
 | 
			
		||||
	masterConfig := framework.NewIntegrationTestMasterConfig()
 | 
			
		||||
	_, s := framework.RunAMaster(masterConfig)
 | 
			
		||||
 | 
			
		||||
@@ -135,7 +135,7 @@ func rmSetup(t *testing.T, stopCh chan struct{}, enableGarbageCollector bool) (*
 | 
			
		||||
	resyncPeriod := 12 * time.Hour
 | 
			
		||||
 | 
			
		||||
	informers := informers.NewSharedInformerFactory(clientSet, resyncPeriod)
 | 
			
		||||
	rm := replication.NewReplicationManager(informers.Core().V1().Pods(), informers.Core().V1().ReplicationControllers(), clientSet, replication.BurstReplicas, 4096, enableGarbageCollector)
 | 
			
		||||
	rm := replication.NewReplicationManager(informers.Core().V1().Pods(), informers.Core().V1().ReplicationControllers(), clientSet, replication.BurstReplicas, 4096)
 | 
			
		||||
	informers.Start(stopCh)
 | 
			
		||||
 | 
			
		||||
	return s, rm, informers, clientSet
 | 
			
		||||
@@ -209,7 +209,7 @@ func TestAdoption(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	for i, tc := range testCases {
 | 
			
		||||
		stopCh := make(chan struct{})
 | 
			
		||||
		s, rm, informers, clientSet := rmSetup(t, stopCh, true)
 | 
			
		||||
		s, rm, informers, clientSet := rmSetup(t, stopCh)
 | 
			
		||||
		ns := framework.CreateTestingNamespace(fmt.Sprintf("adoption-%d", i), s, t)
 | 
			
		||||
		defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
 | 
			
		||||
@@ -286,7 +286,7 @@ func TestUpdateSelectorToAdopt(t *testing.T) {
 | 
			
		||||
	// matches pod1 only; change the selector to match pod2 as well. Verify
 | 
			
		||||
	// there is only one pod left.
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	s, rm, _, clientSet := rmSetup(t, stopCh, true)
 | 
			
		||||
	s, rm, _, clientSet := rmSetup(t, stopCh)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("update-selector-to-adopt", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rc := newRC("rc", ns.Name, 1)
 | 
			
		||||
@@ -325,7 +325,7 @@ func TestUpdateSelectorToRemoveControllerRef(t *testing.T) {
 | 
			
		||||
	// that rc creates one more pod, so there are 3 pods. Also verify that
 | 
			
		||||
	// pod2's controllerRef is cleared.
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t, stopCh, true)
 | 
			
		||||
	s, rm, informers, clientSet := rmSetup(t, stopCh)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("update-selector-to-remove-controllerref", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rc := newRC("rc", ns.Name, 2)
 | 
			
		||||
@@ -370,7 +370,7 @@ func TestUpdateLabelToRemoveControllerRef(t *testing.T) {
 | 
			
		||||
	// that rc creates one more pod, so there are 3 pods. Also verify that
 | 
			
		||||
	// pod2's controllerRef is cleared.
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	s, rm, _, clientSet := rmSetup(t, stopCh, true)
 | 
			
		||||
	s, rm, _, clientSet := rmSetup(t, stopCh)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("update-label-to-remove-controllerref", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rc := newRC("rc", ns.Name, 2)
 | 
			
		||||
@@ -411,7 +411,7 @@ func TestUpdateLabelToBeAdopted(t *testing.T) {
 | 
			
		||||
	// controller adopts pod2 and delete one of them, so there is only 1 pod
 | 
			
		||||
	// left.
 | 
			
		||||
	stopCh := make(chan struct{})
 | 
			
		||||
	s, rm, _, clientSet := rmSetup(t, stopCh, true)
 | 
			
		||||
	s, rm, _, clientSet := rmSetup(t, stopCh)
 | 
			
		||||
	ns := framework.CreateTestingNamespace("update-label-to-be-adopted", s, t)
 | 
			
		||||
	defer framework.DeleteTestingNamespace(ns, s, t)
 | 
			
		||||
	rc := newRC("rc", ns.Name, 1)
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user