mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-25 02:45:12 +00:00
test/e2e: move funcs from test/e2e/pod to other folders
This commit is contained in:
@@ -1237,7 +1237,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
timeout = timeout + 3*time.Minute
|
||||
}
|
||||
|
||||
err = e2epod.WaitForPodsInactive(ps, interval, timeout)
|
||||
err = waitForPodsInactive(ps, interval, timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
||||
}
|
||||
@@ -1247,13 +1247,56 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
// In gce, at any point, small percentage of nodes can disappear for
|
||||
// ~10 minutes due to hostError. 20 minutes should be long enough to
|
||||
// restart VM in that case and delete the pod.
|
||||
err = e2epod.WaitForPodsGone(ps, interval, 20*time.Minute)
|
||||
err = waitForPodsGone(ps, interval, 20*time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForPodsGone waits until there are no pods left in the PodStore.
|
||||
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var pods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods = ps.List(); len(pods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range pods {
|
||||
Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForPodsInactive waits until there are no active pods left in the PodStore.
|
||||
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
||||
// when the pod is inactvie.
|
||||
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var activePods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
activePods = controller.FilterActivePods(pods)
|
||||
if len(activePods) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range activePods {
|
||||
Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
|
||||
Reference in New Issue
Block a user