adding pods lister

This commit is contained in:
Krzysztof Siedlecki
2019-08-01 18:59:22 +02:00
parent 08410cbf06
commit 9406e5bf2a
4 changed files with 286 additions and 71 deletions

View File

@@ -91,27 +91,25 @@ type nodeLifecycleController struct {
// doEviction does the fake eviction and returns the status of eviction operation.
func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool {
var podEvicted bool
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
zones := testutil.GetZones(fakeNodeHandler)
for _, zone := range zones {
nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
uid, _ := value.UID.(string)
pods, err := listPodsFromNode(fakeNodeHandler, value.Value)
if err != nil {
return false, 0
}
pods, _ := nc.getPodsAssignedToNode(value.Value)
nodeutil.DeletePods(fakeNodeHandler, pods, nc.recorder, value.Value, uid, nc.daemonSetStore)
_ = nc.nodeEvictionMap.setStatus(value.Value, evicted)
return true, 0
})
}
for _, action := range fakeNodeHandler.Actions() {
if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" {
podEvicted = true
return podEvicted
return true
}
}
return podEvicted
return false
}
func createNodeLease(nodeName string, renewTime metav1.MicroTime) *coordv1.Lease {
@@ -701,10 +699,11 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) {
if _, ok := nodeController.zonePodEvictor[zone]; ok {
nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
nodeUID, _ := value.UID.(string)
pods, err := listPodsFromNode(item.fakeNodeHandler, value.Value)
pods, err := nodeController.getPodsAssignedToNode(value.Value)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
t.Logf("listed pods %d for node %v", len(pods), value.Value)
nodeutil.DeletePods(item.fakeNodeHandler, pods, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
return true, 0
})
@@ -858,7 +857,7 @@ func TestPodStatusChange(t *testing.T) {
for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
nodeUID, _ := value.UID.(string)
pods, err := listPodsFromNode(item.fakeNodeHandler, value.Value)
pods, err := nodeController.getPodsAssignedToNode(value.Value)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@@ -879,7 +878,7 @@ func TestPodStatusChange(t *testing.T) {
}
if podReasonUpdate != item.expectedPodUpdate {
t.Errorf("expected pod update: %+v, got %+v for %+v", podReasonUpdate, item.expectedPodUpdate, item.description)
t.Errorf("expected pod update: %+v, got %+v for %+v", item.expectedPodUpdate, podReasonUpdate, item.description)
}
}
}
@@ -2418,11 +2417,12 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
}
}
table := []struct {
desc string
fakeNodeHandler *testutil.FakeNodeHandler
updateReactor func(action testcore.Action) (bool, runtime.Object, error)
nodeIterations []nodeIteration
expectedPodStatusUpdates int
desc string
fakeNodeHandler *testutil.FakeNodeHandler
updateReactor func(action testcore.Action) (bool, runtime.Object, error)
fakeGetPodsAssignedToNode func(c *fake.Clientset) func(string) ([]*v1.Pod, error)
nodeIterations []nodeIteration
expectedPodStatusUpdates int
}{
// Node created long time ago, with status updated by kubelet exceeds grace period.
// First monitorNodeHealth check will update pod status to NotReady.
@@ -2432,6 +2432,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
fakeNodeHandler: &testutil.FakeNodeHandler{
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
fakeGetPodsAssignedToNode: fakeGetPodsAssignedToNode,
nodeIterations: []nodeIteration{
{
timeToPass: 0,
@@ -2472,6 +2473,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
return true, nil, fmt.Errorf("unsupported action")
}
}(),
fakeGetPodsAssignedToNode: fakeGetPodsAssignedToNode,
nodeIterations: []nodeIteration{
{
timeToPass: 0,
@@ -2488,6 +2490,41 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
},
expectedPodStatusUpdates: 2, // One failed and one retry.
},
// Node created long time ago, with status updated by kubelet exceeds grace period.
// First monitorNodeHealth check will fail to list pods.
// Second monitorNodeHealth check will update pod status to NotReady (retry).
{
desc: "unsuccessful pod list, retry required",
fakeNodeHandler: &testutil.FakeNodeHandler{
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
fakeGetPodsAssignedToNode: func(c *fake.Clientset) func(string) ([]*v1.Pod, error) {
i := 0
f := fakeGetPodsAssignedToNode(c)
return func(nodeName string) ([]*v1.Pod, error) {
i++
if i == 1 {
return nil, fmt.Errorf("fake error")
}
return f(nodeName)
}
},
nodeIterations: []nodeIteration{
{
timeToPass: 0,
newNodes: makeNodes(v1.ConditionTrue, timeNow, timeNow),
},
{
timeToPass: 1 * time.Minute,
newNodes: makeNodes(v1.ConditionTrue, timeNow, timeNow),
},
{
timeToPass: 1 * time.Minute,
newNodes: makeNodes(v1.ConditionFalse, timePlusTwoMinutes, timePlusTwoMinutes),
},
},
expectedPodStatusUpdates: 1,
},
}
for _, item := range table {
@@ -2508,7 +2545,7 @@ func TestMonitorNodeHealthMarkPodsNotReadyRetry(t *testing.T) {
}
nodeController.now = func() metav1.Time { return timeNow }
nodeController.recorder = testutil.NewFakeRecorder()
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
nodeController.getPodsAssignedToNode = item.fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
for _, itertion := range item.nodeIterations {
nodeController.now = func() metav1.Time { return metav1.Time{Time: timeNow.Add(itertion.timeToPass)} }
item.fakeNodeHandler.Existing = itertion.newNodes