mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 19:28:16 +00:00
Share rc cache from the rc manager
This commit is contained in:
@@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@@ -162,12 +163,12 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
@@ -178,13 +179,13 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
// 2 running pods and a controller with 1 replica, one pod delete expected
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
||||
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
validateSyncReplication(t, &fakePodControl, 0, 1, 0)
|
||||
@@ -194,7 +195,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
received := make(chan string)
|
||||
@@ -206,7 +207,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
// The DeletedFinalStateUnknown object should cause the rc manager to insert
|
||||
// the controller matching the selectors of the deleted pod into the work queue.
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
pods := newPodList(nil, 1, v1.PodRunning, controllerSpec, "pod")
|
||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||
|
||||
@@ -226,11 +227,11 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
func TestSyncReplicationControllerCreates(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// A controller with 2 replicas and no pods in the store, 2 creates expected
|
||||
rc := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -248,14 +249,14 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// Steady state for the replication controller, no Status.Replicas updates expected
|
||||
activePods := 5
|
||||
rc := newReplicationController(activePods)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
||||
newPodList(manager.podStore.Indexer, activePods, v1.PodRunning, rc, "pod")
|
||||
newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -289,19 +290,19 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// Insufficient number of pods in the system, and Status.Replicas is wrong;
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(5)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
||||
rc.Generation = 1
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
rcCopy := *rc
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rcCopy.Spec.Selector = extraLabelMap
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, &rcCopy, "podWithExtraLabel")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, &rcCopy, "podWithExtraLabel")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
@@ -335,12 +336,12 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Indexer, 1, v1.PodRunning, controllerSpec, "pod")
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, controllerSpec, "pod")
|
||||
|
||||
// Creates a replica and sets expectations
|
||||
controllerSpec.Status.Replicas = 1
|
||||
@@ -388,7 +389,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
|
||||
func TestPodControllerLookup(t *testing.T) {
|
||||
manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
inRCs []*v1.ReplicationController
|
||||
pod *v1.Pod
|
||||
@@ -434,7 +435,7 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
}
|
||||
for _, c := range testCases {
|
||||
for _, r := range c.inRCs {
|
||||
manager.rcStore.Indexer.Add(r)
|
||||
manager.rcLister.Indexer.Add(r)
|
||||
}
|
||||
if rc := manager.getPodController(c.pod); rc != nil {
|
||||
if c.outRCName != rc.Name {
|
||||
@@ -449,9 +450,15 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
func TestWatchControllers(t *testing.T) {
|
||||
fakeWatch := watch.NewFake()
|
||||
c := &fake.Clientset{}
|
||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
c.AddWatchReactor("replicationcontrollers", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers := informers.NewSharedInformerFactory(c, nil, controller.NoResyncPeriodFunc())
|
||||
podInformer := informers.Pods().Informer()
|
||||
rcInformer := informers.ReplicationControllers().Informer()
|
||||
manager := NewReplicationManager(podInformer, rcInformer, c, BurstReplicas, 0, false)
|
||||
informers.Start(stopCh)
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
var testControllerSpec v1.ReplicationController
|
||||
received := make(chan string)
|
||||
@@ -460,8 +467,7 @@ func TestWatchControllers(t *testing.T) {
|
||||
// and eventually into the syncHandler. The handler validates the received controller
|
||||
// and closes the received channel to indicate that the test can finish.
|
||||
manager.syncHandler = func(key string) error {
|
||||
|
||||
obj, exists, err := manager.rcStore.Indexer.GetByKey(key)
|
||||
obj, exists, err := manager.rcLister.Indexer.GetByKey(key)
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find controller under key %v", key)
|
||||
}
|
||||
@@ -472,11 +478,9 @@ func TestWatchControllers(t *testing.T) {
|
||||
close(received)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start only the rc watcher and the workqueue, send a watch event,
|
||||
// and make sure it hits the sync method.
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go manager.rcController.Run(stopCh)
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
testControllerSpec.Name = "foo"
|
||||
@@ -494,17 +498,17 @@ func TestWatchPods(t *testing.T) {
|
||||
c := &fake.Clientset{}
|
||||
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// Put one rc and one pod into the controller's stores
|
||||
testControllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(testControllerSpec)
|
||||
manager.rcLister.Indexer.Add(testControllerSpec)
|
||||
received := make(chan string)
|
||||
// The pod update sent through the fakeWatcher should figure out the managing rc and
|
||||
// send it into the syncHandler.
|
||||
manager.syncHandler = func(key string) error {
|
||||
|
||||
obj, exists, err := manager.rcStore.Indexer.GetByKey(key)
|
||||
obj, exists, err := manager.rcLister.Indexer.GetByKey(key)
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find controller under key %v", key)
|
||||
}
|
||||
@@ -519,7 +523,6 @@ func TestWatchPods(t *testing.T) {
|
||||
// and make sure it hits the sync method for the right rc.
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go manager.podController.Run(stopCh)
|
||||
go manager.internalPodInformer.Run(stopCh)
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
@@ -537,12 +540,12 @@ func TestWatchPods(t *testing.T) {
|
||||
|
||||
func TestUpdatePods(t *testing.T) {
|
||||
manager := NewReplicationManagerFromClient(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
received := make(chan string)
|
||||
|
||||
manager.syncHandler = func(key string) error {
|
||||
obj, exists, err := manager.rcStore.Indexer.GetByKey(key)
|
||||
obj, exists, err := manager.rcLister.Indexer.GetByKey(key)
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find controller under key %v", key)
|
||||
}
|
||||
@@ -556,16 +559,16 @@ func TestUpdatePods(t *testing.T) {
|
||||
|
||||
// Put 2 rcs and one pod into the controller's stores
|
||||
testControllerSpec1 := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(testControllerSpec1)
|
||||
manager.rcLister.Indexer.Add(testControllerSpec1)
|
||||
testControllerSpec2 := *testControllerSpec1
|
||||
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
|
||||
testControllerSpec2.Name = "barfoo"
|
||||
manager.rcStore.Indexer.Add(&testControllerSpec2)
|
||||
manager.rcLister.Indexer.Add(&testControllerSpec2)
|
||||
|
||||
// case 1: We put in the podStore a pod with labels matching
|
||||
// case 1: We put in the podLister a pod with labels matching
|
||||
// testControllerSpec1, then update its labels to match testControllerSpec2.
|
||||
// We expect to receive a sync request for both controllers.
|
||||
pod1 := newPodList(manager.podStore.Indexer, 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1.ResourceVersion = "1"
|
||||
pod2 := pod1
|
||||
pod2.Labels = testControllerSpec2.Spec.Selector
|
||||
@@ -584,7 +587,7 @@ func TestUpdatePods(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// case 2: pod1 in the podStore has labels matching testControllerSpec1.
|
||||
// case 2: pod1 in the podLister has labels matching testControllerSpec1.
|
||||
// We update its labels to match no replication controller. We expect to
|
||||
// receive a sync request for testControllerSpec1.
|
||||
pod2.Labels = make(map[string]string)
|
||||
@@ -615,12 +618,12 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
rc := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2}
|
||||
newPodList(manager.podStore.Indexer, 1, v1.PodRunning, rc, "pod")
|
||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -686,11 +689,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
controllerSpec := newReplicationController(numReplicas)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
|
||||
expectedPods := 0
|
||||
pods := newPodList(nil, numReplicas, v1.PodPending, controllerSpec, "pod")
|
||||
@@ -704,14 +707,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
for _, replicas := range []int{numReplicas, 0} {
|
||||
|
||||
*(controllerSpec.Spec.Replicas) = int32(replicas)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
|
||||
for i := 0; i < numReplicas; i += burstReplicas {
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
|
||||
// The store accrues active pods. It's also used by the rc to determine how many
|
||||
// replicas to create.
|
||||
activePods := len(manager.podStore.Indexer.List())
|
||||
activePods := len(manager.podLister.Indexer.List())
|
||||
if replicas != 0 {
|
||||
// This is the number of pods currently "in flight". They were created by the rc manager above,
|
||||
// which then puts the rc to sleep till all of them have been observed.
|
||||
@@ -725,7 +728,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// This simulates the watch events for all but 1 of the expected pods.
|
||||
// None of these should wake the controller because it has expectations==BurstReplicas.
|
||||
for i := 0; i < expectedPods-1; i++ {
|
||||
manager.podStore.Indexer.Add(&pods.Items[i])
|
||||
manager.podLister.Indexer.Add(&pods.Items[i])
|
||||
manager.addPod(&pods.Items[i])
|
||||
}
|
||||
|
||||
@@ -761,7 +764,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// has exactly one expectation at the end, to verify that we
|
||||
// don't double delete.
|
||||
for i := range podsToDelete[1:] {
|
||||
manager.podStore.Indexer.Delete(podsToDelete[i])
|
||||
manager.podLister.Indexer.Delete(podsToDelete[i])
|
||||
manager.deletePod(podsToDelete[i])
|
||||
}
|
||||
podExp, exists, err := manager.expectations.GetExpectations(rcKey)
|
||||
@@ -782,7 +785,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// The last add pod will decrease the expectation of the rc to 0,
|
||||
// which will cause it to create/delete the remaining replicas up to burstReplicas.
|
||||
if replicas != 0 {
|
||||
manager.podStore.Indexer.Add(&pods.Items[expectedPods-1])
|
||||
manager.podLister.Indexer.Add(&pods.Items[expectedPods-1])
|
||||
manager.addPod(&pods.Items[expectedPods-1])
|
||||
} else {
|
||||
expectedDel := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
||||
@@ -797,14 +800,14 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
},
|
||||
}
|
||||
manager.podStore.Indexer.Delete(lastPod)
|
||||
manager.podLister.Indexer.Delete(lastPod)
|
||||
manager.deletePod(lastPod)
|
||||
}
|
||||
pods.Items = pods.Items[expectedPods:]
|
||||
}
|
||||
|
||||
// Confirm that we've created the right number of replicas
|
||||
activePods := int32(len(manager.podStore.Indexer.List()))
|
||||
activePods := int32(len(manager.podLister.Indexer.List()))
|
||||
if activePods != *(controllerSpec.Spec.Replicas) {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(controllerSpec.Spec.Replicas), activePods)
|
||||
}
|
||||
@@ -836,13 +839,13 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
pods := newPodList(nil, 2, v1.PodPending, controllerSpec, "pod")
|
||||
manager.podStore.Indexer.Add(&pods.Items[0])
|
||||
manager.podLister.Indexer.Add(&pods.Items[0])
|
||||
postExpectationsPod := pods.Items[1]
|
||||
|
||||
manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRCExpectations{
|
||||
@@ -850,7 +853,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
// If we check active pods before checking expectataions, the rc
|
||||
// will create a new replica because it doesn't see this pod, but
|
||||
// has fulfilled its expectations.
|
||||
manager.podStore.Indexer.Add(&postExpectationsPod)
|
||||
manager.podLister.Indexer.Add(&postExpectationsPod)
|
||||
},
|
||||
})
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
@@ -860,10 +863,10 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
rc := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -885,7 +888,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("No expectations found for rc")
|
||||
}
|
||||
manager.rcStore.Indexer.Delete(rc)
|
||||
manager.rcLister.Indexer.Delete(rc)
|
||||
manager.syncReplicationController(getKey(rc, t))
|
||||
|
||||
if _, exists, err = manager.expectations.GetExpectations(rcKey); exists {
|
||||
@@ -894,37 +897,11 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
|
||||
// This should have no effect, since we've deleted the rc.
|
||||
podExp.Add(-1, 0)
|
||||
manager.podStore.Indexer.Replace(make([]interface{}, 0), "0")
|
||||
manager.podLister.Indexer.Replace(make([]interface{}, 0), "0")
|
||||
manager.syncReplicationController(getKey(rc, t))
|
||||
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
|
||||
}
|
||||
|
||||
func TestRCManagerNotReady(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = func() bool { return false }
|
||||
|
||||
// Simulates the rc reflector running before the pod reflector. We don't
|
||||
// want to end up creating replicas in this case until the pod reflector
|
||||
// has synced, so the rc manager should just requeue the rc.
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
|
||||
rcKey := getKey(controllerSpec, t)
|
||||
manager.syncReplicationController(rcKey)
|
||||
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
|
||||
queueRC, _ := manager.queue.Get()
|
||||
if queueRC != rcKey {
|
||||
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
|
||||
}
|
||||
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.syncReplicationController(rcKey)
|
||||
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
|
||||
}
|
||||
|
||||
// shuffle returns a new shuffled list of container controllers.
|
||||
func shuffle(controllers []*v1.ReplicationController) []*v1.ReplicationController {
|
||||
numControllers := len(controllers)
|
||||
@@ -941,7 +918,7 @@ func TestOverlappingRCs(t *testing.T) {
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
||||
var controllers []*v1.ReplicationController
|
||||
@@ -953,7 +930,7 @@ func TestOverlappingRCs(t *testing.T) {
|
||||
}
|
||||
shuffledControllers := shuffle(controllers)
|
||||
for j := range shuffledControllers {
|
||||
manager.rcStore.Indexer.Add(shuffledControllers[j])
|
||||
manager.rcLister.Indexer.Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod and make sure only the oldest rc is synced
|
||||
pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod")
|
||||
@@ -970,10 +947,10 @@ func TestOverlappingRCs(t *testing.T) {
|
||||
func TestDeletionTimestamp(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
manager.rcLister.Indexer.Add(controllerSpec)
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
||||
@@ -1085,7 +1062,7 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
ns := fmt.Sprintf("ns-%d", i)
|
||||
for j := 0; j < 10; j++ {
|
||||
rcName := fmt.Sprintf("rc-%d", j)
|
||||
manager.rcStore.Indexer.Add(&v1.ReplicationController{
|
||||
manager.rcLister.Indexer.Add(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: ns},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"rcName": rcName},
|
||||
@@ -1127,7 +1104,7 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||
|
||||
for i := 0; i < rcNum; i++ {
|
||||
rcName := fmt.Sprintf("rc-%d", i)
|
||||
manager.rcStore.Indexer.Add(&v1.ReplicationController{
|
||||
manager.rcLister.Indexer.Add(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: "foo"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"rcName": rcName},
|
||||
@@ -1149,7 +1126,7 @@ func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationMana
|
||||
fakePodControl = &controller.FakePodControl{}
|
||||
manager = NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.garbageCollectorEnabled = true
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
manager.podControl = fakePodControl
|
||||
return manager, fakePodControl
|
||||
}
|
||||
@@ -1157,13 +1134,13 @@ func setupManagerWithGCEnabled(objs ...runtime.Object) (manager *ReplicationMana
|
||||
func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
||||
rc := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
var trueVar = true
|
||||
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
|
||||
// add to podStore a matching Pod controlled by another controller. Expect no patch.
|
||||
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1175,14 +1152,14 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||
func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore one more matching pod that doesn't have a controller
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
// add to podLister one more matching pod that doesn't have a controller
|
||||
// ref, but has an owner ref pointing to other object. Expect a patch to
|
||||
// take control of it.
|
||||
unrelatedOwnerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []metav1.OwnerReference{unrelatedOwnerReference}
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
if err != nil {
|
||||
@@ -1195,13 +1172,13 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||
func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore a matching pod that has an ownerRef pointing to the rc,
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
// add to podLister a matching pod that has an ownerRef pointing to the rc,
|
||||
// but ownerRef.Controller is false. Expect a patch to take control it.
|
||||
rcOwnerReference := metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []metav1.OwnerReference{rcOwnerReference}
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
if err != nil {
|
||||
@@ -1214,11 +1191,11 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||
func TestPatchPodFails(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore two matching pods. Expect two patches to take control
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
// add to podLister two matching pods. Expect two patches to take control
|
||||
// them.
|
||||
manager.podStore.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||
// let both patches fail. The rc manager will assume it fails to take
|
||||
// control of the pods and create new ones.
|
||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||
@@ -1233,12 +1210,12 @@ func TestPatchPodFails(t *testing.T) {
|
||||
func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(rc)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore three matching pods. Expect three patches to take control
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
// add to podLister three matching pods. Expect three patches to take control
|
||||
// them, and later delete one of them.
|
||||
manager.podStore.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod3", rc, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod3", rc, v1.PodRunning, nil))
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1250,8 +1227,8 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||
func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
||||
rc := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// put one pod in the podStore
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
// put one pod in the podLister
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.ResourceVersion = "1"
|
||||
var trueVar = true
|
||||
@@ -1264,7 +1241,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
// add the updatedPod to the store. This is consistent with the behavior of
|
||||
// the Informer: Informer updates the store before call the handler
|
||||
// (updatePod() in this case).
|
||||
manager.podStore.Indexer.Add(&updatedPod)
|
||||
manager.podLister.Indexer.Add(&updatedPod)
|
||||
// send a update of the same pod with modified labels
|
||||
manager.updatePod(pod, &updatedPod)
|
||||
// verifies that rc is added to the queue
|
||||
@@ -1288,15 +1265,15 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
||||
rc := newReplicationController(2)
|
||||
// put 2 pods in the podStore
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
// put 2 pods in the podLister
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
// update the RC so that its selector no longer matches the pods
|
||||
updatedRC := *rc
|
||||
updatedRC.Spec.Selector = map[string]string{"foo": "baz"}
|
||||
// put the updatedRC into the store. This is consistent with the behavior of
|
||||
// the Informer: Informer updates the store before call the handler
|
||||
// (updateRC() in this case).
|
||||
manager.rcStore.Indexer.Add(&updatedRC)
|
||||
manager.rcLister.Indexer.Add(&updatedRC)
|
||||
manager.updateRC(rc, &updatedRC)
|
||||
// verifies that the rc is added to the queue
|
||||
rcKey := getKey(rc, t)
|
||||
@@ -1323,9 +1300,9 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
now := metav1.Now()
|
||||
rc.DeletionTimestamp = &now
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
pod1 := newPod("pod1", rc, v1.PodRunning, nil)
|
||||
manager.podStore.Indexer.Add(pod1)
|
||||
manager.podLister.Indexer.Add(pod1)
|
||||
|
||||
// no patch, no create
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
@@ -1346,16 +1323,16 @@ func TestReadyReplicas(t *testing.T) {
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(2)
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
||||
rc.Generation = 1
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodPending, rc, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodPending, rc, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
@@ -1385,7 +1362,7 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(2)
|
||||
@@ -1393,17 +1370,17 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
rc.Generation = 1
|
||||
// minReadySeconds set to 15s
|
||||
rc.Spec.MinReadySeconds = 15
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
manager.rcLister.Indexer.Add(rc)
|
||||
|
||||
// First pod becomes ready 20s ago
|
||||
moment := metav1.Time{Time: time.Now().Add(-2e10)}
|
||||
pod := newPod("pod", rc, v1.PodRunning, &moment)
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
|
||||
// Second pod becomes ready now
|
||||
otherMoment := metav1.Now()
|
||||
otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment)
|
||||
manager.podStore.Indexer.Add(otherPod)
|
||||
manager.podLister.Indexer.Add(otherPod)
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
|
||||
Reference in New Issue
Block a user