mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #86199 from hwdef/clean-e2e-framework
test/e2e: move funcs from test/e2e/pod to other folders
This commit is contained in:
		@@ -14,7 +14,6 @@ go_library(
 | 
				
			|||||||
    deps = [
 | 
					    deps = [
 | 
				
			||||||
        "//pkg/api/v1/pod:go_default_library",
 | 
					        "//pkg/api/v1/pod:go_default_library",
 | 
				
			||||||
        "//pkg/client/conditions:go_default_library",
 | 
					        "//pkg/client/conditions:go_default_library",
 | 
				
			||||||
        "//pkg/controller:go_default_library",
 | 
					 | 
				
			||||||
        "//pkg/kubelet/types:go_default_library",
 | 
					        "//pkg/kubelet/types:go_default_library",
 | 
				
			||||||
        "//pkg/kubelet/util/format:go_default_library",
 | 
					        "//pkg/kubelet/util/format:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/api/core/v1:go_default_library",
 | 
					        "//staging/src/k8s.io/api/core/v1:go_default_library",
 | 
				
			||||||
@@ -23,7 +22,6 @@ go_library(
 | 
				
			|||||||
        "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
 | 
					        "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
 | 
					        "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
 | 
					        "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
 | 
					 | 
				
			||||||
        "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
 | 
					        "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
					        "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
				
			||||||
        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
					        "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
					 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -33,40 +32,6 @@ var (
 | 
				
			|||||||
	BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
 | 
						BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CreateWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
 | 
					 | 
				
			||||||
// Note: need named return value so that the err assignment in the defer sets the returned error.
 | 
					 | 
				
			||||||
//       Has been shown to be necessary using Go 1.7.
 | 
					 | 
				
			||||||
func CreateWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
 | 
					 | 
				
			||||||
	e2elog.Logf("Creating nfs test pod")
 | 
					 | 
				
			||||||
	pod := MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
 | 
					 | 
				
			||||||
	runPod, err := c.CoreV1().Pods(ns).Create(pod)
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("pod Create API error: %v", err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	defer func() {
 | 
					 | 
				
			||||||
		delErr := DeletePodWithWait(c, runPod)
 | 
					 | 
				
			||||||
		if err == nil { // don't override previous err value
 | 
					 | 
				
			||||||
			err = delErr // assign to returned err, can be nil
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	err = testPodSuccessOrFail(c, ns, runPod)
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return // note: named return value
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// testPodSuccessOrFail tests whether the pod's exit code is zero.
 | 
					 | 
				
			||||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
 | 
					 | 
				
			||||||
	e2elog.Logf("Pod should terminate with exitcode 0 (success)")
 | 
					 | 
				
			||||||
	if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
 | 
					 | 
				
			||||||
		return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	e2elog.Logf("Pod %v succeeded ", pod.Name)
 | 
					 | 
				
			||||||
	return nil
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// CreateUnschedulablePod with given claims based on node selector
 | 
					// CreateUnschedulablePod with given claims based on node selector
 | 
				
			||||||
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
 | 
					func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
 | 
				
			||||||
	pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
 | 
						pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,7 +30,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
						"k8s.io/apimachinery/pkg/labels"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/types"
 | 
						"k8s.io/apimachinery/pkg/types"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
					 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
@@ -147,33 +146,6 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
 | 
				
			|||||||
	return true, nil
 | 
						return true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
 | 
					 | 
				
			||||||
func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
 | 
					 | 
				
			||||||
	// check for remaining pods
 | 
					 | 
				
			||||||
	pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		return 0, 0, err
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// nothing remains!
 | 
					 | 
				
			||||||
	if len(pods.Items) == 0 {
 | 
					 | 
				
			||||||
		return 0, 0, nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// stuff remains, log about it
 | 
					 | 
				
			||||||
	LogPodStates(pods.Items)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// check if there were any pods with missing deletion timestamp
 | 
					 | 
				
			||||||
	numPods := len(pods.Items)
 | 
					 | 
				
			||||||
	missingTimestamp := 0
 | 
					 | 
				
			||||||
	for _, pod := range pods.Items {
 | 
					 | 
				
			||||||
		if pod.DeletionTimestamp == nil {
 | 
					 | 
				
			||||||
			missingTimestamp++
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return numPods, missingTimestamp, nil
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
 | 
					func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
 | 
				
			||||||
	return func() (bool, error) {
 | 
						return func() (bool, error) {
 | 
				
			||||||
		pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
 | 
							pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
 | 
				
			||||||
@@ -321,15 +293,6 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
 | 
				
			|||||||
	return e
 | 
						return e
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DumpAllPodInfo logs basic info for all pods.
 | 
					 | 
				
			||||||
func DumpAllPodInfo(c clientset.Interface) {
 | 
					 | 
				
			||||||
	pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		e2elog.Logf("unable to fetch pod debug info: %v", err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	LogPodStates(pods.Items)
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// LogPodStates logs basic info of provided pods for debugging.
 | 
					// LogPodStates logs basic info of provided pods for debugging.
 | 
				
			||||||
func LogPodStates(pods []v1.Pod) {
 | 
					func LogPodStates(pods []v1.Pod) {
 | 
				
			||||||
	// Find maximum widths for pod, node, and phase strings for column printing.
 | 
						// Find maximum widths for pod, node, and phase strings for column printing.
 | 
				
			||||||
@@ -578,40 +541,3 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	return filtered, nil
 | 
						return filtered, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
 | 
					 | 
				
			||||||
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
 | 
					 | 
				
			||||||
	for _, pod := range pods.Items {
 | 
					 | 
				
			||||||
		if !masterNodes.Has(pod.Spec.NodeName) {
 | 
					 | 
				
			||||||
			if pod.Spec.NodeName != "" {
 | 
					 | 
				
			||||||
				_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
 | 
					 | 
				
			||||||
				gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
 | 
					 | 
				
			||||||
				gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue))
 | 
					 | 
				
			||||||
				scheduledPods = append(scheduledPods, pod)
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
 | 
					 | 
				
			||||||
				gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
 | 
					 | 
				
			||||||
				gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse))
 | 
					 | 
				
			||||||
				if scheduledCondition.Reason == "Unschedulable" {
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
					notScheduledPods = append(notScheduledPods, pod)
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// PatchContainerImages replaces the specified Container Registry with a custom
 | 
					 | 
				
			||||||
// one provided via the KUBE_TEST_REPO_LIST env variable
 | 
					 | 
				
			||||||
func PatchContainerImages(containers []v1.Container) error {
 | 
					 | 
				
			||||||
	var err error
 | 
					 | 
				
			||||||
	for _, c := range containers {
 | 
					 | 
				
			||||||
		c.Image, err = imageutils.ReplaceRegistryInImageURL(c.Image)
 | 
					 | 
				
			||||||
		if err != nil {
 | 
					 | 
				
			||||||
			return err
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return nil
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -34,7 +34,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/util/format"
 | 
						"k8s.io/kubernetes/pkg/kubelet/util/format"
 | 
				
			||||||
	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
						e2elog "k8s.io/kubernetes/test/e2e/framework/log"
 | 
				
			||||||
	e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
 | 
						e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
 | 
				
			||||||
@@ -536,49 +535,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
 | 
				
			|||||||
	return pods, err
 | 
						return pods, err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// WaitForPodsInactive waits until there are no active pods left in the PodStore.
 | 
					 | 
				
			||||||
// This is to make a fair comparison of deletion time between DeleteRCAndPods
 | 
					 | 
				
			||||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
 | 
					 | 
				
			||||||
// when the pod is inactvie.
 | 
					 | 
				
			||||||
func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
 | 
					 | 
				
			||||||
	var activePods []*v1.Pod
 | 
					 | 
				
			||||||
	err := wait.PollImmediate(interval, timeout, func() (bool, error) {
 | 
					 | 
				
			||||||
		pods := ps.List()
 | 
					 | 
				
			||||||
		activePods = controller.FilterActivePods(pods)
 | 
					 | 
				
			||||||
		if len(activePods) != 0 {
 | 
					 | 
				
			||||||
			return false, nil
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return true, nil
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if err == wait.ErrWaitTimeout {
 | 
					 | 
				
			||||||
		for _, pod := range activePods {
 | 
					 | 
				
			||||||
			e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return err
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// WaitForPodsGone waits until there are no pods left in the PodStore.
 | 
					 | 
				
			||||||
func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
 | 
					 | 
				
			||||||
	var pods []*v1.Pod
 | 
					 | 
				
			||||||
	err := wait.PollImmediate(interval, timeout, func() (bool, error) {
 | 
					 | 
				
			||||||
		if pods = ps.List(); len(pods) == 0 {
 | 
					 | 
				
			||||||
			return true, nil
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return false, nil
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if err == wait.ErrWaitTimeout {
 | 
					 | 
				
			||||||
		for _, pod := range pods {
 | 
					 | 
				
			||||||
			e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return err
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// WaitForPodsReady waits for the pods to become ready.
 | 
					// WaitForPodsReady waits for the pods to become ready.
 | 
				
			||||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
 | 
					func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
 | 
				
			||||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
						label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1237,7 +1237,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
 | 
				
			|||||||
		timeout = timeout + 3*time.Minute
 | 
							timeout = timeout + 3*time.Minute
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = e2epod.WaitForPodsInactive(ps, interval, timeout)
 | 
						err = waitForPodsInactive(ps, interval, timeout)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
 | 
							return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1247,13 +1247,56 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
 | 
				
			|||||||
	// In gce, at any point, small percentage of nodes can disappear for
 | 
						// In gce, at any point, small percentage of nodes can disappear for
 | 
				
			||||||
	// ~10 minutes due to hostError. 20 minutes should be long enough to
 | 
						// ~10 minutes due to hostError. 20 minutes should be long enough to
 | 
				
			||||||
	// restart VM in that case and delete the pod.
 | 
						// restart VM in that case and delete the pod.
 | 
				
			||||||
	err = e2epod.WaitForPodsGone(ps, interval, 20*time.Minute)
 | 
						err = waitForPodsGone(ps, interval, 20*time.Minute)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
 | 
							return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// waitForPodsGone waits until there are no pods left in the PodStore.
 | 
				
			||||||
 | 
					func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
 | 
				
			||||||
 | 
						var pods []*v1.Pod
 | 
				
			||||||
 | 
						err := wait.PollImmediate(interval, timeout, func() (bool, error) {
 | 
				
			||||||
 | 
							if pods = ps.List(); len(pods) == 0 {
 | 
				
			||||||
 | 
								return true, nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return false, nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == wait.ErrWaitTimeout {
 | 
				
			||||||
 | 
							for _, pod := range pods {
 | 
				
			||||||
 | 
								Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// waitForPodsInactive waits until there are no active pods left in the PodStore.
 | 
				
			||||||
 | 
					// This is to make a fair comparison of deletion time between DeleteRCAndPods
 | 
				
			||||||
 | 
					// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
 | 
				
			||||||
 | 
					// when the pod is inactvie.
 | 
				
			||||||
 | 
					func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
 | 
				
			||||||
 | 
						var activePods []*v1.Pod
 | 
				
			||||||
 | 
						err := wait.PollImmediate(interval, timeout, func() (bool, error) {
 | 
				
			||||||
 | 
							pods := ps.List()
 | 
				
			||||||
 | 
							activePods = controller.FilterActivePods(pods)
 | 
				
			||||||
 | 
							if len(activePods) != 0 {
 | 
				
			||||||
 | 
								return false, nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return true, nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if err == wait.ErrWaitTimeout {
 | 
				
			||||||
 | 
							for _, pod := range activePods {
 | 
				
			||||||
 | 
								Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
 | 
					// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
 | 
				
			||||||
// inside of a shell.
 | 
					// inside of a shell.
 | 
				
			||||||
func RunHostCmd(ns, name, cmd string) (string, error) {
 | 
					func RunHostCmd(ns, name, cmd string) (string, error) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,6 +17,7 @@ go_library(
 | 
				
			|||||||
    importpath = "k8s.io/kubernetes/test/e2e/scheduling",
 | 
					    importpath = "k8s.io/kubernetes/test/e2e/scheduling",
 | 
				
			||||||
    visibility = ["//visibility:public"],
 | 
					    visibility = ["//visibility:public"],
 | 
				
			||||||
    deps = [
 | 
					    deps = [
 | 
				
			||||||
 | 
					        "//pkg/api/v1/pod:go_default_library",
 | 
				
			||||||
        "//pkg/apis/core/v1/helper/qos:go_default_library",
 | 
					        "//pkg/apis/core/v1/helper/qos:go_default_library",
 | 
				
			||||||
        "//pkg/apis/extensions:go_default_library",
 | 
					        "//pkg/apis/extensions:go_default_library",
 | 
				
			||||||
        "//pkg/apis/scheduling:go_default_library",
 | 
					        "//pkg/apis/scheduling:go_default_library",
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,7 +27,6 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
						"k8s.io/apimachinery/pkg/util/sets"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var (
 | 
					var (
 | 
				
			||||||
@@ -90,7 +89,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pods.Items = filteredPods
 | 
						pods.Items = filteredPods
 | 
				
			||||||
	return e2epod.GetPodsScheduled(masterNodes, pods)
 | 
						return GetPodsScheduled(masterNodes, pods)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// getDeletingPods returns whether there are any pods marked for deletion.
 | 
					// getDeletingPods returns whether there are any pods marked for deletion.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,6 +18,7 @@ package scheduling
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
@@ -730,7 +731,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n
 | 
				
			|||||||
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
 | 
					func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
 | 
				
			||||||
	allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
 | 
						allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
	scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods)
 | 
						scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
 | 
						framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
 | 
				
			||||||
	framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
 | 
						framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
 | 
				
			||||||
@@ -817,3 +818,26 @@ func translateIPv4ToIPv6(ip string) string {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	return ip
 | 
						return ip
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
 | 
				
			||||||
 | 
					func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
 | 
				
			||||||
 | 
						for _, pod := range pods.Items {
 | 
				
			||||||
 | 
							if !masterNodes.Has(pod.Spec.NodeName) {
 | 
				
			||||||
 | 
								if pod.Spec.NodeName != "" {
 | 
				
			||||||
 | 
									_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
 | 
				
			||||||
 | 
									framework.ExpectEqual(scheduledCondition != nil, true)
 | 
				
			||||||
 | 
									framework.ExpectEqual(scheduledCondition.Status, v1.ConditionTrue)
 | 
				
			||||||
 | 
									scheduledPods = append(scheduledPods, pod)
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
 | 
				
			||||||
 | 
									framework.ExpectEqual(scheduledCondition != nil, true)
 | 
				
			||||||
 | 
									framework.ExpectEqual(scheduledCondition.Status, v1.ConditionFalse)
 | 
				
			||||||
 | 
									if scheduledCondition.Reason == "Unschedulable" {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
										notScheduledPods = append(notScheduledPods, pod)
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -47,7 +47,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
 | 
				
			|||||||
	// 2. create the nfs writer pod, test if the write was successful,
 | 
						// 2. create the nfs writer pod, test if the write was successful,
 | 
				
			||||||
	//    then delete the pod and verify that it was deleted
 | 
						//    then delete the pod and verify that it was deleted
 | 
				
			||||||
	ginkgo.By("Checking pod has write access to PersistentVolume")
 | 
						ginkgo.By("Checking pod has write access to PersistentVolume")
 | 
				
			||||||
	framework.ExpectNoError(e2epod.CreateWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
 | 
						framework.ExpectNoError(createWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// 3. delete the PVC, wait for PV to become "Released"
 | 
						// 3. delete the PVC, wait for PV to become "Released"
 | 
				
			||||||
	ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
 | 
						ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
 | 
				
			||||||
@@ -78,7 +78,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
 | 
				
			|||||||
			return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
 | 
								return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// TODO: currently a serialized test of each PV
 | 
							// TODO: currently a serialized test of each PV
 | 
				
			||||||
		if err = e2epod.CreateWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
 | 
							if err = createWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -426,3 +426,37 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// createWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
 | 
				
			||||||
 | 
					// Note: need named return value so that the err assignment in the defer sets the returned error.
 | 
				
			||||||
 | 
					//       Has been shown to be necessary using Go 1.7.
 | 
				
			||||||
 | 
					func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
 | 
				
			||||||
 | 
						framework.Logf("Creating nfs test pod")
 | 
				
			||||||
 | 
						pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
 | 
				
			||||||
 | 
						runPod, err := c.CoreV1().Pods(ns).Create(pod)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("pod Create API error: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						defer func() {
 | 
				
			||||||
 | 
							delErr := e2epod.DeletePodWithWait(c, runPod)
 | 
				
			||||||
 | 
							if err == nil { // don't override previous err value
 | 
				
			||||||
 | 
								err = delErr // assign to returned err, can be nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = testPodSuccessOrFail(c, ns, runPod)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return // note: named return value
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// testPodSuccessOrFail tests whether the pod's exit code is zero.
 | 
				
			||||||
 | 
					func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
 | 
				
			||||||
 | 
						framework.Logf("Pod should terminate with exitcode 0 (success)")
 | 
				
			||||||
 | 
						if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
 | 
				
			||||||
 | 
							return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						framework.Logf("Pod %v succeeded ", pod.Name)
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -20,6 +20,7 @@ import (
 | 
				
			|||||||
	"bytes"
 | 
						"bytes"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/pkg/errors"
 | 
						"github.com/pkg/errors"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -35,7 +36,6 @@ import (
 | 
				
			|||||||
	"k8s.io/client-go/kubernetes/scheme"
 | 
						"k8s.io/client-go/kubernetes/scheme"
 | 
				
			||||||
	"k8s.io/client-go/tools/cache"
 | 
						"k8s.io/client-go/tools/cache"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework/testfiles"
 | 
						"k8s.io/kubernetes/test/e2e/framework/testfiles"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -360,18 +360,18 @@ func patchItemRecursively(f *framework.Framework, item interface{}) error {
 | 
				
			|||||||
		PatchNamespace(f, &item.ObjectMeta.Namespace)
 | 
							PatchNamespace(f, &item.ObjectMeta.Namespace)
 | 
				
			||||||
	case *appsv1.StatefulSet:
 | 
						case *appsv1.StatefulSet:
 | 
				
			||||||
		PatchNamespace(f, &item.ObjectMeta.Namespace)
 | 
							PatchNamespace(f, &item.ObjectMeta.Namespace)
 | 
				
			||||||
		if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
 | 
							if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
 | 
							if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	case *appsv1.DaemonSet:
 | 
						case *appsv1.DaemonSet:
 | 
				
			||||||
		PatchNamespace(f, &item.ObjectMeta.Namespace)
 | 
							PatchNamespace(f, &item.ObjectMeta.Namespace)
 | 
				
			||||||
		if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
 | 
							if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if err := e2epod.PatchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
 | 
							if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
 | 
				
			||||||
			return err
 | 
								return err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
@@ -624,3 +624,17 @@ func PrettyPrint(item interface{}) string {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	return fmt.Sprintf("%+v", item)
 | 
						return fmt.Sprintf("%+v", item)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// patchContainerImages replaces the specified Container Registry with a custom
 | 
				
			||||||
 | 
					// one provided via the KUBE_TEST_REPO_LIST env variable
 | 
				
			||||||
 | 
					func patchContainerImages(containers []v1.Container) error {
 | 
				
			||||||
 | 
						var err error
 | 
				
			||||||
 | 
						for _, c := range containers {
 | 
				
			||||||
 | 
							c.Image, err = imageutils.ReplaceRegistryInImageURL(c.Image)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user