mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Direct reference to the packages
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
		@@ -55,21 +55,24 @@ import (
 | 
			
		||||
 | 
			
		||||
// imported from testutils
 | 
			
		||||
var (
 | 
			
		||||
	createPausePod                  = testutils.CreatePausePod
 | 
			
		||||
	initPausePod                    = testutils.InitPausePod
 | 
			
		||||
	getPod                          = testutils.GetPod
 | 
			
		||||
	deletePod                       = testutils.DeletePod
 | 
			
		||||
	podUnschedulable                = testutils.PodUnschedulable
 | 
			
		||||
	podSchedulingError              = testutils.PodSchedulingError
 | 
			
		||||
	createAndWaitForNodesInCache    = testutils.CreateAndWaitForNodesInCache
 | 
			
		||||
	waitForPodUnschedulable         = testutils.WaitForPodUnschedulable
 | 
			
		||||
	waitForPodSchedulingGated       = testutils.WaitForPodSchedulingGated
 | 
			
		||||
	waitForPodToScheduleWithTimeout = testutils.WaitForPodToScheduleWithTimeout
 | 
			
		||||
	initRegistryAndConfig = func(t *testing.T, plugins ...framework.Plugin) (frameworkruntime.Registry, schedulerconfig.KubeSchedulerProfile) {
 | 
			
		||||
		return schedulerutils.InitRegistryAndConfig(t, newPlugin, plugins...)
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// newPlugin returns a plugin factory with specified Plugin.
 | 
			
		||||
func newPlugin(plugin framework.Plugin) frameworkruntime.PluginFactory {
 | 
			
		||||
	return func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
 | 
			
		||||
		switch pl := plugin.(type) {
 | 
			
		||||
		case *PermitPlugin:
 | 
			
		||||
			pl.fh = fh
 | 
			
		||||
		case *PostFilterPlugin:
 | 
			
		||||
			pl.fh = fh
 | 
			
		||||
		}
 | 
			
		||||
		return plugin, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type PreEnqueuePlugin struct {
 | 
			
		||||
	called int
 | 
			
		||||
	admit  bool
 | 
			
		||||
@@ -303,19 +306,6 @@ var _ framework.BindPlugin = &BindPlugin{}
 | 
			
		||||
var _ framework.PostBindPlugin = &PostBindPlugin{}
 | 
			
		||||
var _ framework.PermitPlugin = &PermitPlugin{}
 | 
			
		||||
 | 
			
		||||
// newPlugin returns a plugin factory with specified Plugin.
 | 
			
		||||
func newPlugin(plugin framework.Plugin) frameworkruntime.PluginFactory {
 | 
			
		||||
	return func(_ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
 | 
			
		||||
		switch pl := plugin.(type) {
 | 
			
		||||
		case *PermitPlugin:
 | 
			
		||||
			pl.fh = fh
 | 
			
		||||
		case *PostFilterPlugin:
 | 
			
		||||
			pl.fh = fh
 | 
			
		||||
		}
 | 
			
		||||
		return plugin, nil
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ep *PreEnqueuePlugin) Name() string {
 | 
			
		||||
	return enqueuePluginName
 | 
			
		||||
}
 | 
			
		||||
@@ -671,18 +661,18 @@ func TestPreFilterPlugin(t *testing.T) {
 | 
			
		||||
			preFilterPlugin.failPreFilter = test.fail
 | 
			
		||||
			preFilterPlugin.rejectPreFilter = test.reject
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.reject {
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
@@ -848,13 +838,13 @@ func TestPostFilterPlugin(t *testing.T) {
 | 
			
		||||
			defer teardown()
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet, testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if tt.rejectFilter {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 10*time.Second, podUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 10*time.Second, testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the pod to be scheduled.")
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
@@ -915,21 +905,21 @@ func TestScorePlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			scorePlugin.failScore = test.fail
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					t.Errorf("Expected the pod to be scheduled. error: %v", err)
 | 
			
		||||
				} else {
 | 
			
		||||
					p, err := getPod(testCtx.ClientSet, pod.Name, pod.Namespace)
 | 
			
		||||
					p, err := testutils.GetPod(testCtx.ClientSet, pod.Name, pod.Namespace)
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						t.Errorf("Failed to retrieve the pod. error: %v", err)
 | 
			
		||||
					} else if p.Spec.NodeName != scorePlugin.highScoreNode {
 | 
			
		||||
@@ -956,8 +946,8 @@ func TestNormalizeScorePlugin(t *testing.T) {
 | 
			
		||||
		scheduler.WithFrameworkOutOfTreeRegistry(registry))
 | 
			
		||||
 | 
			
		||||
	// Create a best effort pod.
 | 
			
		||||
	pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
		initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
	pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
		testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Error while creating a test pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -1006,15 +996,15 @@ func TestReservePluginReserve(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			reservePlugin.failReserve = test.fail
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second,
 | 
			
		||||
					podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
@@ -1122,7 +1112,7 @@ func TestPrebindPlugin(t *testing.T) {
 | 
			
		||||
			if p := test.unschedulablePod; p != nil {
 | 
			
		||||
				p.Spec.SchedulerName = "2nd-scheduler"
 | 
			
		||||
				filterPlugin.rejectFilter = true
 | 
			
		||||
				if _, err := createPausePod(testCtx.ClientSet, p); err != nil {
 | 
			
		||||
				if _, err := testutils.CreatePausePod(testCtx.ClientSet, p); err != nil {
 | 
			
		||||
					t.Fatalf("Error while creating an unschedulable pod: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
@@ -1130,8 +1120,8 @@ func TestPrebindPlugin(t *testing.T) {
 | 
			
		||||
			preBindPlugin.set(test.fail, test.reject, test.succeedOnRetry)
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -1141,7 +1131,7 @@ func TestPrebindPlugin(t *testing.T) {
 | 
			
		||||
					if err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod, 10*time.Second); err != nil {
 | 
			
		||||
						t.Errorf("Expected the pod to be schedulable on retry, but got an error: %v", err)
 | 
			
		||||
					}
 | 
			
		||||
				} else if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				} else if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else if test.reject {
 | 
			
		||||
@@ -1276,14 +1266,14 @@ func TestUnReserveReservePlugins(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			podName := "test-pod"
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
@@ -1369,14 +1359,14 @@ func TestUnReservePermitPlugins(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			podName := "test-pod"
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.reject {
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
@@ -1441,14 +1431,14 @@ func TestUnReservePreBindPlugins(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Create a pause pod.
 | 
			
		||||
			podName := "test-pod"
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.wantReject {
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
@@ -1512,14 +1502,14 @@ func TestUnReserveBindPlugins(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Create a pause pod.
 | 
			
		||||
			podName := "test-pod"
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
@@ -1655,8 +1645,8 @@ func TestBindPlugin(t *testing.T) {
 | 
			
		||||
			postBindPlugin.pluginInvokeEventChan = pluginInvokeEventChan
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -1702,7 +1692,7 @@ func TestBindPlugin(t *testing.T) {
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				// bind plugin fails to bind the pod
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				p := postBindPlugin.deepCopy()
 | 
			
		||||
@@ -1765,14 +1755,14 @@ func TestPostBindPlugin(t *testing.T) {
 | 
			
		||||
			defer teardown()
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.preBindFail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				if postBindPlugin.numPostBindCalled > 0 {
 | 
			
		||||
@@ -1862,18 +1852,18 @@ func TestPermitPlugin(t *testing.T) {
 | 
			
		||||
			perPlugin.waitAndAllowPermit = false
 | 
			
		||||
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if test.reject || test.timeout {
 | 
			
		||||
					if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
						t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
 | 
			
		||||
					}
 | 
			
		||||
				} else {
 | 
			
		||||
@@ -1909,8 +1899,8 @@ func TestMultiplePermitPlugins(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Create a test pod.
 | 
			
		||||
	podName := "test-pod"
 | 
			
		||||
	pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
		initPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
	pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
		testutils.InitPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -1961,8 +1951,8 @@ func TestPermitPluginsCancelled(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Create a test pod.
 | 
			
		||||
	podName := "test-pod"
 | 
			
		||||
	pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
		initPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
	pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
		testutils.InitPausePod(&testutils.PausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -2027,22 +2017,22 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Create two pods. First pod to enter Permit() will wait and a second one will either
 | 
			
		||||
			// reject or allow first one.
 | 
			
		||||
			podA, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "pod-a", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			podA, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "pod-a", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating the first pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			podB, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "pod-b", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			podB, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "pod-b", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating the second pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.waitReject {
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, podA); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, podA); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the first pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, podB); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, podB); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the second pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				if !((permitPlugin.waitingPod == podA.Name && permitPlugin.rejectingPod == podB.Name) ||
 | 
			
		||||
@@ -2103,14 +2093,14 @@ func TestFilterPlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			filterPlugin.failFilter = test.fail
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				if filterPlugin.numFilterCalled < 1 {
 | 
			
		||||
@@ -2159,14 +2149,14 @@ func TestPreScorePlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			preScorePlugin.failPreScore = test.fail
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.fail {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
				if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
@@ -2220,13 +2210,13 @@ func TestPreEnqueuePlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			enqueuePlugin.admit = tt.admitEnqueue
 | 
			
		||||
			// Create a best effort pod.
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet, tt.pod)
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet, tt.pod)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if tt.admitEnqueue {
 | 
			
		||||
				if err := waitForPodToScheduleWithTimeout(testCtx.ClientSet, pod, 10*time.Second); err != nil {
 | 
			
		||||
				if err := testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod, 10*time.Second); err != nil {
 | 
			
		||||
					t.Errorf("Expected the pod to be schedulable, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				// Also verify enqueuePlugin is called.
 | 
			
		||||
@@ -2234,7 +2224,7 @@ func TestPreEnqueuePlugin(t *testing.T) {
 | 
			
		||||
					t.Errorf("Expected the enqueuePlugin plugin to be called at least once, but got 0")
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if err := waitForPodSchedulingGated(testCtx.ClientSet, pod, 10*time.Second); err != nil {
 | 
			
		||||
				if err := testutils.WaitForPodSchedulingGated(testCtx.ClientSet, pod, 10*time.Second); err != nil {
 | 
			
		||||
					t.Errorf("Expected the pod to be scheduling waiting, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				// Also verify preFilterPlugin is not called.
 | 
			
		||||
@@ -2348,7 +2338,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
 | 
			
		||||
			)
 | 
			
		||||
			defer teardown()
 | 
			
		||||
 | 
			
		||||
			_, err := createAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode().Capacity(nodeRes), 1)
 | 
			
		||||
			_, err := testutils.CreateAndWaitForNodesInCache(testCtx, "test-node", st.MakeNode().Capacity(nodeRes), 1)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatal(err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -2357,7 +2347,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
 | 
			
		||||
			permitPlugin.waitingPod = "waiting-pod"
 | 
			
		||||
 | 
			
		||||
			if r := tt.runningPod; r != nil {
 | 
			
		||||
				if _, err := createPausePod(testCtx.ClientSet, r); err != nil {
 | 
			
		||||
				if _, err := testutils.CreatePausePod(testCtx.ClientSet, r); err != nil {
 | 
			
		||||
					t.Fatalf("Error while creating the running pod: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				// Wait until the pod to be scheduled.
 | 
			
		||||
@@ -2367,7 +2357,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if w := tt.waitingPod; w != nil {
 | 
			
		||||
				if _, err := createPausePod(testCtx.ClientSet, w); err != nil {
 | 
			
		||||
				if _, err := testutils.CreatePausePod(testCtx.ClientSet, w); err != nil {
 | 
			
		||||
					t.Fatalf("Error while creating the waiting pod: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				// Wait until the waiting-pod is actually waiting.
 | 
			
		||||
@@ -2381,12 +2371,12 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if p := tt.preemptor; p != nil {
 | 
			
		||||
				if _, err := createPausePod(testCtx.ClientSet, p); err != nil {
 | 
			
		||||
				if _, err := testutils.CreatePausePod(testCtx.ClientSet, p); err != nil {
 | 
			
		||||
					t.Fatalf("Error while creating the preemptor pod: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				// Delete the waiting pod if specified.
 | 
			
		||||
				if w := tt.waitingPod; w != nil && tt.deleteWaitingPod {
 | 
			
		||||
					if err := deletePod(testCtx.ClientSet, w.Name, w.Namespace); err != nil {
 | 
			
		||||
					if err := testutils.DeletePod(testCtx.ClientSet, w.Name, w.Namespace); err != nil {
 | 
			
		||||
						t.Fatalf("Error while deleting the waiting pod: %v", err)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
@@ -2412,7 +2402,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
				if !tt.deleteWaitingPod {
 | 
			
		||||
					// Expect the waitingPod to be still present.
 | 
			
		||||
					if _, err := getPod(testCtx.ClientSet, w.Name, w.Namespace); err != nil {
 | 
			
		||||
					if _, err := testutils.GetPod(testCtx.ClientSet, w.Name, w.Namespace); err != nil {
 | 
			
		||||
						t.Error("Get waiting pod in waiting pod failed.")
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
@@ -2424,7 +2414,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			if r := tt.runningPod; r != nil {
 | 
			
		||||
				// Expect the runningPod to be deleted physically.
 | 
			
		||||
				if _, err = getPod(testCtx.ClientSet, r.Name, r.Namespace); err == nil {
 | 
			
		||||
				if _, err = testutils.GetPod(testCtx.ClientSet, r.Name, r.Namespace); err == nil {
 | 
			
		||||
					t.Error("The running pod still exists.")
 | 
			
		||||
				} else if !errors.IsNotFound(err) {
 | 
			
		||||
					t.Errorf("Get running pod failed: %v", err)
 | 
			
		||||
@@ -2543,7 +2533,7 @@ func TestActivatePods(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Wait for the 2 executor pods to be unschedulable.
 | 
			
		||||
	for _, pod := range pods {
 | 
			
		||||
		if err := waitForPodUnschedulable(cs, pod); err != nil {
 | 
			
		||||
		if err := testutils.WaitForPodUnschedulable(cs, pod); err != nil {
 | 
			
		||||
			t.Errorf("Failed to wait for Pod %v to be unschedulable: %v", pod.Name, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -2557,7 +2547,7 @@ func TestActivatePods(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Verify all pods to be scheduled.
 | 
			
		||||
	for _, pod := range pods {
 | 
			
		||||
		if err := waitForPodToScheduleWithTimeout(cs, pod, wait.ForeverTestTimeout); err != nil {
 | 
			
		||||
		if err := testutils.WaitForPodToScheduleWithTimeout(cs, pod, wait.ForeverTestTimeout); err != nil {
 | 
			
		||||
			t.Fatalf("Failed to wait for Pod %v to be schedulable: %v", pod.Name, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -145,7 +145,7 @@ func TestSchedulingGates(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// Pop the expected pods out. They should be de-queueable.
 | 
			
		||||
			for _, wantPod := range tt.want {
 | 
			
		||||
				podInfo := nextPodOrDie(t, testCtx)
 | 
			
		||||
				podInfo := testutils.NextPodOrDie(t, testCtx)
 | 
			
		||||
				if got := podInfo.Pod.Name; got != wantPod {
 | 
			
		||||
					t.Errorf("Want %v to be popped out, but got %v", wantPod, got)
 | 
			
		||||
				}
 | 
			
		||||
@@ -164,7 +164,7 @@ func TestSchedulingGates(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
			// Pop the expected pods out. They should be de-queueable.
 | 
			
		||||
			for _, wantPod := range tt.wantPostGatesRemoval {
 | 
			
		||||
				podInfo := nextPodOrDie(t, testCtx)
 | 
			
		||||
				podInfo := testutils.NextPodOrDie(t, testCtx)
 | 
			
		||||
				if got := podInfo.Pod.Name; got != wantPod {
 | 
			
		||||
					t.Errorf("Want %v to be popped out, but got %v", wantPod, got)
 | 
			
		||||
				}
 | 
			
		||||
@@ -222,7 +222,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Pop the three pods out. They should be unschedulable.
 | 
			
		||||
	for i := 0; i < 3; i++ {
 | 
			
		||||
		podInfo := nextPodOrDie(t, testCtx)
 | 
			
		||||
		podInfo := testutils.NextPodOrDie(t, testCtx)
 | 
			
		||||
		fwk, ok := testCtx.Scheduler.Profiles[podInfo.Pod.Spec.SchedulerName]
 | 
			
		||||
		if !ok {
 | 
			
		||||
			t.Fatalf("Cannot find the profile for Pod %v", podInfo.Pod.Name)
 | 
			
		||||
@@ -243,7 +243,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Now we should be able to pop the Pod from activeQ again.
 | 
			
		||||
	podInfo := nextPodOrDie(t, testCtx)
 | 
			
		||||
	podInfo := testutils.NextPodOrDie(t, testCtx)
 | 
			
		||||
	if podInfo.Attempts != 2 {
 | 
			
		||||
		t.Fatalf("Expected the Pod to be attempted 2 times, but got %v", podInfo.Attempts)
 | 
			
		||||
	}
 | 
			
		||||
@@ -255,7 +255,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
 | 
			
		||||
	// - Although the failure reason has been lifted, Pod2 still won't be moved to active due to
 | 
			
		||||
	//   the node event's preCheckForNode().
 | 
			
		||||
	// - Regarding Pod3, the NodeTaintChange event is irrelevant with its scheduling failure.
 | 
			
		||||
	podInfo = nextPod(t, testCtx)
 | 
			
		||||
	podInfo = testutils.NextPod(t, testCtx)
 | 
			
		||||
	if podInfo != nil {
 | 
			
		||||
		t.Fatalf("Unexpected pod %v get popped out", podInfo.Pod.Name)
 | 
			
		||||
	}
 | 
			
		||||
@@ -402,7 +402,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Pop fake-pod out. It should be unschedulable.
 | 
			
		||||
	podInfo := nextPodOrDie(t, testCtx)
 | 
			
		||||
	podInfo := testutils.NextPodOrDie(t, testCtx)
 | 
			
		||||
	fwk, ok := testCtx.Scheduler.Profiles[podInfo.Pod.Spec.SchedulerName]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		t.Fatalf("Cannot find the profile for Pod %v", podInfo.Pod.Name)
 | 
			
		||||
@@ -434,7 +434,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Now we should be able to pop the Pod from activeQ again.
 | 
			
		||||
	podInfo = nextPodOrDie(t, testCtx)
 | 
			
		||||
	podInfo = testutils.NextPodOrDie(t, testCtx)
 | 
			
		||||
	if podInfo.Attempts != 2 {
 | 
			
		||||
		t.Errorf("Expected the Pod to be attempted 2 times, but got %v", podInfo.Attempts)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -133,7 +133,7 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				&PermitPlugin{name: "permit", statusCode: framework.Unschedulable},
 | 
			
		||||
			},
 | 
			
		||||
			action: func() error {
 | 
			
		||||
				_, err := createNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				_, err := testutils.CreateNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				return err
 | 
			
		||||
			},
 | 
			
		||||
			wantScheduled: true,
 | 
			
		||||
@@ -144,8 +144,8 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				&PermitPlugin{name: "permit", statusCode: framework.Unschedulable},
 | 
			
		||||
			},
 | 
			
		||||
			action: func() error {
 | 
			
		||||
				_, err := createPausePod(testContext.ClientSet,
 | 
			
		||||
					initPausePod(&testutils.PausePodConfig{Name: "test-pod-2", Namespace: testContext.NS.Name}))
 | 
			
		||||
				_, err := testutils.CreatePausePod(testContext.ClientSet,
 | 
			
		||||
					testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod-2", Namespace: testContext.NS.Name}))
 | 
			
		||||
				return err
 | 
			
		||||
			},
 | 
			
		||||
			wantScheduled: false,
 | 
			
		||||
@@ -156,7 +156,7 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				&PermitPlugin{name: "permit", statusCode: framework.Error},
 | 
			
		||||
			},
 | 
			
		||||
			action: func() error {
 | 
			
		||||
				_, err := createNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				_, err := testutils.CreateNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				return err
 | 
			
		||||
			},
 | 
			
		||||
			wantFirstSchedulingError: true,
 | 
			
		||||
@@ -168,7 +168,7 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				&ReservePlugin{name: "reserve", statusCode: framework.Unschedulable},
 | 
			
		||||
			},
 | 
			
		||||
			action: func() error {
 | 
			
		||||
				_, err := createNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				_, err := testutils.CreateNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				return err
 | 
			
		||||
			},
 | 
			
		||||
			wantScheduled: true,
 | 
			
		||||
@@ -179,8 +179,8 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				&ReservePlugin{name: "reserve", statusCode: framework.Unschedulable},
 | 
			
		||||
			},
 | 
			
		||||
			action: func() error {
 | 
			
		||||
				_, err := createPausePod(testContext.ClientSet,
 | 
			
		||||
					initPausePod(&testutils.PausePodConfig{Name: "test-pod-2", Namespace: testContext.NS.Name}))
 | 
			
		||||
				_, err := testutils.CreatePausePod(testContext.ClientSet,
 | 
			
		||||
					testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod-2", Namespace: testContext.NS.Name}))
 | 
			
		||||
				return err
 | 
			
		||||
			},
 | 
			
		||||
			wantScheduled: false,
 | 
			
		||||
@@ -191,7 +191,7 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				&ReservePlugin{name: "reserve", statusCode: framework.Error},
 | 
			
		||||
			},
 | 
			
		||||
			action: func() error {
 | 
			
		||||
				_, err := createNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				_, err := testutils.CreateNode(testContext.ClientSet, st.MakeNode().Name("fake-node").Obj())
 | 
			
		||||
				return err
 | 
			
		||||
			},
 | 
			
		||||
			wantFirstSchedulingError: true,
 | 
			
		||||
@@ -209,8 +209,8 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
				scheduler.WithFrameworkOutOfTreeRegistry(registry))
 | 
			
		||||
			defer teardown()
 | 
			
		||||
 | 
			
		||||
			pod, err := createPausePod(testCtx.ClientSet,
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			pod, err := testutils.CreatePausePod(testCtx.ClientSet,
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating a test pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -221,7 +221,7 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
@@ -241,7 +241,7 @@ func TestReScheduling(t *testing.T) {
 | 
			
		||||
					t.Errorf("Expected a scheduling error, but got: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
				if err = testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
 | 
			
		||||
					t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -50,7 +50,7 @@ type nodeStateManager struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestUnschedulableNodes(t *testing.T) {
 | 
			
		||||
	testCtx := initTest(t, "unschedulable-nodes")
 | 
			
		||||
	testCtx := testutils.InitTestSchedulerWithNS(t, "unschedulable-nodes")
 | 
			
		||||
 | 
			
		||||
	nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister()
 | 
			
		||||
	// NOTE: This test cannot run in parallel, because it is creating and deleting
 | 
			
		||||
@@ -97,7 +97,7 @@ func TestUnschedulableNodes(t *testing.T) {
 | 
			
		||||
				if _, err := c.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
					t.Fatalf("Failed to update node with unschedulable=true: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
 | 
			
		||||
				err = testutils.WaitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
 | 
			
		||||
					// An unschedulable node should still be present in the store
 | 
			
		||||
					// Nodes that are unschedulable or that are not ready or
 | 
			
		||||
					// have their disk full (Node.Spec.Conditions) are excluded
 | 
			
		||||
@@ -113,7 +113,7 @@ func TestUnschedulableNodes(t *testing.T) {
 | 
			
		||||
				if _, err := c.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
					t.Fatalf("Failed to update node with unschedulable=false: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
 | 
			
		||||
				err = testutils.WaitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
 | 
			
		||||
					return node != nil && node.(*v1.Node).Spec.Unschedulable == false
 | 
			
		||||
				})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
@@ -124,7 +124,7 @@ func TestUnschedulableNodes(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, mod := range nodeModifications {
 | 
			
		||||
		unSchedNode, err := createNode(testCtx.ClientSet, node)
 | 
			
		||||
		unSchedNode, err := testutils.CreateNode(testCtx.ClientSet, node)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Fatalf("Failed to create node: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -134,7 +134,7 @@ func TestUnschedulableNodes(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
		// Create the new pod, note that this needs to happen post unschedulable
 | 
			
		||||
		// modification or we have a race in the test.
 | 
			
		||||
		myPod, err := createPausePodWithResource(testCtx.ClientSet, "node-scheduling-test-pod", testCtx.NS.Name, nil)
 | 
			
		||||
		myPod, err := testutils.CreatePausePodWithResource(testCtx.ClientSet, "node-scheduling-test-pod", testCtx.NS.Name, nil)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Fatalf("Failed to create pod: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
@@ -164,7 +164,7 @@ func TestUnschedulableNodes(t *testing.T) {
 | 
			
		||||
			t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
 | 
			
		||||
		}
 | 
			
		||||
		// Clean up.
 | 
			
		||||
		if err := deletePod(testCtx.ClientSet, myPod.Name, myPod.Namespace); err != nil {
 | 
			
		||||
		if err := testutils.DeletePod(testCtx.ClientSet, myPod.Name, myPod.Namespace); err != nil {
 | 
			
		||||
			t.Errorf("Failed to delete pod: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		err = testCtx.ClientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, metav1.DeleteOptions{})
 | 
			
		||||
@@ -189,7 +189,7 @@ func TestMultipleSchedulers(t *testing.T) {
 | 
			
		||||
	//     - testPodWithAnnotationFitsFoo should be scheduled
 | 
			
		||||
 | 
			
		||||
	// 1. create and start default-scheduler
 | 
			
		||||
	testCtx := initTest(t, "multi-scheduler")
 | 
			
		||||
	testCtx := testutils.InitTestSchedulerWithNS(t, "multi-scheduler")
 | 
			
		||||
 | 
			
		||||
	// 2. create a node
 | 
			
		||||
	node := &v1.Node{
 | 
			
		||||
@@ -201,23 +201,23 @@ func TestMultipleSchedulers(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	createNode(testCtx.ClientSet, node)
 | 
			
		||||
	testutils.CreateNode(testCtx.ClientSet, node)
 | 
			
		||||
 | 
			
		||||
	// 3. create 3 pods for testing
 | 
			
		||||
	t.Logf("create 3 pods for testing")
 | 
			
		||||
	testPod, err := createPausePodWithResource(testCtx.ClientSet, "pod-without-scheduler-name", testCtx.NS.Name, nil)
 | 
			
		||||
	testPod, err := testutils.CreatePausePodWithResource(testCtx.ClientSet, "pod-without-scheduler-name", testCtx.NS.Name, nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	defaultScheduler := "default-scheduler"
 | 
			
		||||
	testPodFitsDefault, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "pod-fits-default", Namespace: testCtx.NS.Name, SchedulerName: defaultScheduler}))
 | 
			
		||||
	testPodFitsDefault, err := testutils.CreatePausePod(testCtx.ClientSet, testutils.InitPausePod(&testutils.PausePodConfig{Name: "pod-fits-default", Namespace: testCtx.NS.Name, SchedulerName: defaultScheduler}))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fooScheduler := "foo-scheduler"
 | 
			
		||||
	testPodFitsFoo, err := createPausePod(testCtx.ClientSet, initPausePod(&testutils.PausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.NS.Name, SchedulerName: fooScheduler}))
 | 
			
		||||
	testPodFitsFoo, err := testutils.CreatePausePod(testCtx.ClientSet, testutils.InitPausePod(&testutils.PausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.NS.Name, SchedulerName: fooScheduler}))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -282,7 +282,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	testCtx := initTest(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...))
 | 
			
		||||
	testCtx := testutils.InitTestSchedulerWithNS(t, "multi-scheduler", scheduler.WithProfiles(cfg.Profiles...))
 | 
			
		||||
 | 
			
		||||
	node := &v1.Node{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
 | 
			
		||||
@@ -293,7 +293,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	if _, err := createNode(testCtx.ClientSet, node); err != nil {
 | 
			
		||||
	if _, err := testutils.CreateNode(testCtx.ClientSet, node); err != nil {
 | 
			
		||||
		t.Fatal(err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -309,7 +309,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
 | 
			
		||||
		{Name: "baz", Namespace: testCtx.NS.Name, SchedulerName: "default-scheduler"},
 | 
			
		||||
		{Name: "zet", Namespace: testCtx.NS.Name, SchedulerName: "custom-scheduler"},
 | 
			
		||||
	} {
 | 
			
		||||
		if _, err := createPausePod(testCtx.ClientSet, initPausePod(pc)); err != nil {
 | 
			
		||||
		if _, err := testutils.CreatePausePod(testCtx.ClientSet, testutils.InitPausePod(pc)); err != nil {
 | 
			
		||||
			t.Fatal(err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -345,7 +345,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
 | 
			
		||||
func TestAllocatable(t *testing.T) {
 | 
			
		||||
	testCtx := initTest(t, "allocatable")
 | 
			
		||||
	testCtx := testutils.InitTestSchedulerWithNS(t, "allocatable")
 | 
			
		||||
 | 
			
		||||
	// 2. create a node without allocatable awareness
 | 
			
		||||
	nodeRes := map[v1.ResourceName]string{
 | 
			
		||||
@@ -353,7 +353,7 @@ func TestAllocatable(t *testing.T) {
 | 
			
		||||
		v1.ResourceCPU:    "30m",
 | 
			
		||||
		v1.ResourceMemory: "30",
 | 
			
		||||
	}
 | 
			
		||||
	allocNode, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node-allocatable-scheduler-test-node").Capacity(nodeRes).Obj())
 | 
			
		||||
	allocNode, err := testutils.CreateNode(testCtx.ClientSet, st.MakeNode().Name("node-allocatable-scheduler-test-node").Capacity(nodeRes).Obj())
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create node: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -364,7 +364,7 @@ func TestAllocatable(t *testing.T) {
 | 
			
		||||
		v1.ResourceCPU:    *resource.NewMilliQuantity(20, resource.DecimalSI),
 | 
			
		||||
		v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
 | 
			
		||||
	}
 | 
			
		||||
	testAllocPod, err := createPausePodWithResource(testCtx.ClientSet, podName, testCtx.NS.Name, podRes)
 | 
			
		||||
	testAllocPod, err := testutils.CreatePausePodWithResource(testCtx.ClientSet, podName, testCtx.NS.Name, podRes)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -395,13 +395,13 @@ func TestAllocatable(t *testing.T) {
 | 
			
		||||
		t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := deletePod(testCtx.ClientSet, testAllocPod.Name, testCtx.NS.Name); err != nil {
 | 
			
		||||
	if err := testutils.DeletePod(testCtx.ClientSet, testAllocPod.Name, testCtx.NS.Name); err != nil {
 | 
			
		||||
		t.Fatalf("Failed to remove the first pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 6. Make another pod with different name, same resource request
 | 
			
		||||
	podName2 := "pod-test-allocatable2"
 | 
			
		||||
	testAllocPod2, err := createPausePodWithResource(testCtx.ClientSet, podName2, testCtx.NS.Name, podRes)
 | 
			
		||||
	testAllocPod2, err := testutils.CreatePausePodWithResource(testCtx.ClientSet, podName2, testCtx.NS.Name, podRes)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -418,7 +418,7 @@ func TestAllocatable(t *testing.T) {
 | 
			
		||||
// pods are scheduled by other schedulers.
 | 
			
		||||
func TestSchedulerInformers(t *testing.T) {
 | 
			
		||||
	// Initialize scheduler.
 | 
			
		||||
	testCtx := initTest(t, "scheduler-informer")
 | 
			
		||||
	testCtx := testutils.InitTestSchedulerWithNS(t, "scheduler-informer")
 | 
			
		||||
	cs := testCtx.ClientSet
 | 
			
		||||
 | 
			
		||||
	defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
 | 
			
		||||
@@ -447,7 +447,7 @@ func TestSchedulerInformers(t *testing.T) {
 | 
			
		||||
			name:  "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
 | 
			
		||||
			nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
 | 
			
		||||
			existingPods: []*v1.Pod{
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{
 | 
			
		||||
					Name:          "pod1",
 | 
			
		||||
					Namespace:     testCtx.NS.Name,
 | 
			
		||||
					Resources:     defaultPodRes,
 | 
			
		||||
@@ -455,7 +455,7 @@ func TestSchedulerInformers(t *testing.T) {
 | 
			
		||||
					NodeName:      "node-1",
 | 
			
		||||
					SchedulerName: "foo-scheduler",
 | 
			
		||||
				}),
 | 
			
		||||
				initPausePod(&testutils.PausePodConfig{
 | 
			
		||||
				testutils.InitPausePod(&testutils.PausePodConfig{
 | 
			
		||||
					Name:          "pod2",
 | 
			
		||||
					Namespace:     testCtx.NS.Name,
 | 
			
		||||
					Resources:     defaultPodRes,
 | 
			
		||||
@@ -464,7 +464,7 @@ func TestSchedulerInformers(t *testing.T) {
 | 
			
		||||
					SchedulerName: "bar-scheduler",
 | 
			
		||||
				}),
 | 
			
		||||
			},
 | 
			
		||||
			pod: initPausePod(&testutils.PausePodConfig{
 | 
			
		||||
			pod: testutils.InitPausePod(&testutils.PausePodConfig{
 | 
			
		||||
				Name:      "unschedulable-pod",
 | 
			
		||||
				Namespace: testCtx.NS.Name,
 | 
			
		||||
				Resources: defaultPodRes,
 | 
			
		||||
@@ -476,13 +476,13 @@ func TestSchedulerInformers(t *testing.T) {
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			for _, nodeConf := range test.nodes {
 | 
			
		||||
				_, err := createNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj())
 | 
			
		||||
				_, err := testutils.CreateNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj())
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			// Ensure nodes are present in scheduler cache.
 | 
			
		||||
			if err := waitForNodesInCache(testCtx.Scheduler, len(test.nodes)); err != nil {
 | 
			
		||||
			if err := testutils.WaitForNodesInCache(testCtx.Scheduler, len(test.nodes)); err != nil {
 | 
			
		||||
				t.Fatal(err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@@ -490,16 +490,16 @@ func TestSchedulerInformers(t *testing.T) {
 | 
			
		||||
			var err error
 | 
			
		||||
			// Create and run existingPods.
 | 
			
		||||
			for i, p := range test.existingPods {
 | 
			
		||||
				if pods[i], err = runPausePod(cs, p); err != nil {
 | 
			
		||||
				if pods[i], err = testutils.RunPausePod(cs, p); err != nil {
 | 
			
		||||
					t.Fatalf("Error running pause pod: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			// Create the new "pod".
 | 
			
		||||
			unschedulable, err := createPausePod(cs, test.pod)
 | 
			
		||||
			unschedulable, err := testutils.CreatePausePod(cs, test.pod)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Error while creating new pod: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := waitForPodUnschedulable(cs, unschedulable); err != nil {
 | 
			
		||||
			if err := testutils.WaitForPodUnschedulable(cs, unschedulable); err != nil {
 | 
			
		||||
				t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
@@ -524,11 +524,11 @@ func TestNodeEvents(t *testing.T) {
 | 
			
		||||
	// 3. Create node2 with a taint, pod2 should still not schedule
 | 
			
		||||
	// 4. Remove the taint from node2; pod2 should now schedule on node2
 | 
			
		||||
 | 
			
		||||
	testCtx := initTest(t, "node-events")
 | 
			
		||||
	testCtx := testutils.InitTestSchedulerWithNS(t, "node-events")
 | 
			
		||||
	defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
 | 
			
		||||
 | 
			
		||||
	// 1.1 create pod1
 | 
			
		||||
	pod1, err := createPausePodWithResource(testCtx.ClientSet, "pod1", testCtx.NS.Name, &v1.ResourceList{
 | 
			
		||||
	pod1, err := testutils.CreatePausePodWithResource(testCtx.ClientSet, "pod1", testCtx.NS.Name, &v1.ResourceList{
 | 
			
		||||
		v1.ResourceCPU: *resource.NewMilliQuantity(80, resource.DecimalSI),
 | 
			
		||||
	})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -536,7 +536,7 @@ func TestNodeEvents(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 1.2 Create node1
 | 
			
		||||
	node1, err := createNode(testCtx.ClientSet, st.MakeNode().
 | 
			
		||||
	node1, err := testutils.CreateNode(testCtx.ClientSet, st.MakeNode().
 | 
			
		||||
		Name("node-events-test-node1").
 | 
			
		||||
		Capacity(map[v1.ResourceName]string{
 | 
			
		||||
			v1.ResourcePods:   "32",
 | 
			
		||||
@@ -554,14 +554,14 @@ func TestNodeEvents(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 2. create pod2
 | 
			
		||||
	pod2, err := createPausePodWithResource(testCtx.ClientSet, "pod2", testCtx.NS.Name, &v1.ResourceList{
 | 
			
		||||
	pod2, err := testutils.CreatePausePodWithResource(testCtx.ClientSet, "pod2", testCtx.NS.Name, &v1.ResourceList{
 | 
			
		||||
		v1.ResourceCPU: *resource.NewMilliQuantity(40, resource.DecimalSI),
 | 
			
		||||
	})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create pod %v: %v", pod2.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := waitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
 | 
			
		||||
	if err := testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
 | 
			
		||||
		t.Errorf("Pod %v got scheduled: %v", pod2.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -575,7 +575,7 @@ func TestNodeEvents(t *testing.T) {
 | 
			
		||||
		}).
 | 
			
		||||
		Label("affinity-key", "affinity-value").
 | 
			
		||||
		Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj()
 | 
			
		||||
	node2, err = createNode(testCtx.ClientSet, node2)
 | 
			
		||||
	node2, err = testutils.CreateNode(testCtx.ClientSet, node2)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to create %s: %v", node2.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -594,13 +594,13 @@ func TestNodeEvents(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 3.2 pod2 still unschedulable
 | 
			
		||||
	if err := waitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
 | 
			
		||||
	if err := testutils.WaitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
 | 
			
		||||
		t.Errorf("Pod %v got scheduled: %v", pod2.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 4. Remove node taint, pod2 should schedule
 | 
			
		||||
	node2.Spec.Taints = nil
 | 
			
		||||
	node2, err = updateNode(testCtx.ClientSet, node2)
 | 
			
		||||
	node2, err = testutils.UpdateNode(testCtx.ClientSet, node2)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Failed to update %s: %v", node2.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -36,22 +36,6 @@ import (
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	createNode                 = testutils.CreateNode
 | 
			
		||||
	createPausePod             = testutils.CreatePausePod
 | 
			
		||||
	createPausePodWithResource = testutils.CreatePausePodWithResource
 | 
			
		||||
	deletePod                  = testutils.DeletePod
 | 
			
		||||
	initPausePod               = testutils.InitPausePod
 | 
			
		||||
	initTest                   = testutils.InitTestSchedulerWithNS
 | 
			
		||||
	nextPod                    = testutils.NextPod
 | 
			
		||||
	nextPodOrDie               = testutils.NextPodOrDie
 | 
			
		||||
	runPausePod                = testutils.RunPausePod
 | 
			
		||||
	updateNode                 = testutils.UpdateNode
 | 
			
		||||
	waitForNodesInCache        = testutils.WaitForNodesInCache
 | 
			
		||||
	waitForPodUnschedulable    = testutils.WaitForPodUnschedulable
 | 
			
		||||
	waitForReflection          = testutils.WaitForReflection
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// The returned shutdown func will delete created resources and scheduler, resources should be those
 | 
			
		||||
// that will affect the scheduling result, like nodes, pods, etc.. Namespaces should not be
 | 
			
		||||
// deleted here because it's created together with the apiserver, they should be deleted
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user