mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 12:18:16 +00:00 
			
		
		
		
	add integration-test for NonPreemption
This commit is contained in:
		@@ -33,13 +33,16 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/types"
 | 
						"k8s.io/apimachinery/pkg/types"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/intstr"
 | 
						"k8s.io/apimachinery/pkg/util/intstr"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
 | 
						utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
				
			||||||
	"k8s.io/client-go/informers"
 | 
						"k8s.io/client-go/informers"
 | 
				
			||||||
	"k8s.io/client-go/kubernetes"
 | 
						"k8s.io/client-go/kubernetes"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	restclient "k8s.io/client-go/rest"
 | 
						restclient "k8s.io/client-go/rest"
 | 
				
			||||||
 | 
						featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
				
			||||||
	"k8s.io/klog/v2"
 | 
						"k8s.io/klog/v2"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/scheduling"
 | 
						"k8s.io/kubernetes/pkg/apis/scheduling"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler"
 | 
						"k8s.io/kubernetes/pkg/scheduler"
 | 
				
			||||||
	schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
						schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
				
			||||||
	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
@@ -434,6 +437,87 @@ func TestPreemption(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// TestNonPreemption tests NonPreempt option of PriorityClass of scheduler works as expected.
 | 
				
			||||||
 | 
					func TestNonPreemption(t *testing.T) {
 | 
				
			||||||
 | 
						defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NonPreemptingPriority, true)()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						var preemptNever = v1.PreemptNever
 | 
				
			||||||
 | 
						// Initialize scheduler.
 | 
				
			||||||
 | 
						testCtx := initTest(t, "non-preemption")
 | 
				
			||||||
 | 
						defer testutils.CleanupTest(t, testCtx)
 | 
				
			||||||
 | 
						cs := testCtx.ClientSet
 | 
				
			||||||
 | 
						tests := []struct {
 | 
				
			||||||
 | 
							name             string
 | 
				
			||||||
 | 
							PreemptionPolicy *v1.PreemptionPolicy
 | 
				
			||||||
 | 
						}{
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								name:             "pod preemption will happen",
 | 
				
			||||||
 | 
								PreemptionPolicy: nil,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								name:             "pod preemption will not happen",
 | 
				
			||||||
 | 
								PreemptionPolicy: &preemptNever,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						victim := initPausePod(&pausePodConfig{
 | 
				
			||||||
 | 
							Name:      "victim-pod",
 | 
				
			||||||
 | 
							Namespace: testCtx.NS.Name,
 | 
				
			||||||
 | 
							Priority:  &lowPriority,
 | 
				
			||||||
 | 
							Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
 | 
				
			||||||
 | 
								v1.ResourceCPU:    *resource.NewMilliQuantity(400, resource.DecimalSI),
 | 
				
			||||||
 | 
								v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						preemptor := initPausePod(&pausePodConfig{
 | 
				
			||||||
 | 
							Name:      "preemptor-pod",
 | 
				
			||||||
 | 
							Namespace: testCtx.NS.Name,
 | 
				
			||||||
 | 
							Priority:  &highPriority,
 | 
				
			||||||
 | 
							Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
 | 
				
			||||||
 | 
								v1.ResourceCPU:    *resource.NewMilliQuantity(300, resource.DecimalSI),
 | 
				
			||||||
 | 
								v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Create a node with some resources and a label.
 | 
				
			||||||
 | 
						nodeRes := &v1.ResourceList{
 | 
				
			||||||
 | 
							v1.ResourcePods:   *resource.NewQuantity(32, resource.DecimalSI),
 | 
				
			||||||
 | 
							v1.ResourceCPU:    *resource.NewMilliQuantity(500, resource.DecimalSI),
 | 
				
			||||||
 | 
							v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("Error creating nodes: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						for _, test := range tests {
 | 
				
			||||||
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
 | 
								defer testutils.CleanupPods(cs, t, []*v1.Pod{preemptor, victim})
 | 
				
			||||||
 | 
								preemptor.Spec.PreemptionPolicy = test.PreemptionPolicy
 | 
				
			||||||
 | 
								victimPod, err := createPausePod(cs, victim)
 | 
				
			||||||
 | 
								if err != nil {
 | 
				
			||||||
 | 
									t.Fatalf("Error while creating victim: %v", err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								if err := waitForPodToScheduleWithTimeout(cs, victimPod, 5*time.Second); err != nil {
 | 
				
			||||||
 | 
									t.Fatalf("victim %v should be become scheduled", victimPod.Name)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								preemptorPod, err := createPausePod(cs, preemptor)
 | 
				
			||||||
 | 
								if err != nil {
 | 
				
			||||||
 | 
									t.Fatalf("Error while creating preemptor: %v", err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								err = waitForNominatedNodeNameWithTimeout(cs, preemptorPod, 5*time.Second)
 | 
				
			||||||
 | 
								// test.PreemptionPolicy == nil means we expect the preemptor to be nominated.
 | 
				
			||||||
 | 
								expect := test.PreemptionPolicy == nil
 | 
				
			||||||
 | 
								// err == nil indicates the preemptor is indeed nominated.
 | 
				
			||||||
 | 
								got := err == nil
 | 
				
			||||||
 | 
								if got != expect {
 | 
				
			||||||
 | 
									t.Errorf("Expect preemptor to be nominated=%v, but got=%v", expect, got)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
 | 
					// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
 | 
				
			||||||
func TestDisablePreemption(t *testing.T) {
 | 
					func TestDisablePreemption(t *testing.T) {
 | 
				
			||||||
	// Initialize scheduler, and disable preemption.
 | 
						// Initialize scheduler, and disable preemption.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -204,6 +204,7 @@ type pausePodConfig struct {
 | 
				
			|||||||
	NodeName                          string
 | 
						NodeName                          string
 | 
				
			||||||
	SchedulerName                     string
 | 
						SchedulerName                     string
 | 
				
			||||||
	Priority                          *int32
 | 
						Priority                          *int32
 | 
				
			||||||
 | 
						PreemptionPolicy                  *v1.PreemptionPolicy
 | 
				
			||||||
	PriorityClassName                 string
 | 
						PriorityClassName                 string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -230,6 +231,7 @@ func initPausePod(conf *pausePodConfig) *v1.Pod {
 | 
				
			|||||||
			NodeName:          conf.NodeName,
 | 
								NodeName:          conf.NodeName,
 | 
				
			||||||
			SchedulerName:     conf.SchedulerName,
 | 
								SchedulerName:     conf.SchedulerName,
 | 
				
			||||||
			Priority:          conf.Priority,
 | 
								Priority:          conf.Priority,
 | 
				
			||||||
 | 
								PreemptionPolicy:  conf.PreemptionPolicy,
 | 
				
			||||||
			PriorityClassName: conf.PriorityClassName,
 | 
								PriorityClassName: conf.PriorityClassName,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -398,6 +400,12 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
 | 
				
			|||||||
	return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
 | 
						return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
 | 
				
			||||||
 | 
					// an error if it does not scheduled within the given timeout.
 | 
				
			||||||
 | 
					func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
 | 
				
			||||||
 | 
						return wait.Poll(100*time.Millisecond, timeout, podScheduled(cs, pod.Namespace, pod.Name))
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
 | 
					// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
 | 
				
			||||||
// the expected values.
 | 
					// the expected values.
 | 
				
			||||||
func waitForPDBsStable(testCtx *testutils.TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
 | 
					func waitForPDBsStable(testCtx *testutils.TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
 | 
				
			||||||
@@ -485,3 +493,18 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
 | 
				
			|||||||
		t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
 | 
							t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// podScheduled returns true if a node is assigned to the given pod.
 | 
				
			||||||
 | 
					func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
 | 
				
			||||||
 | 
						return func() (bool, error) {
 | 
				
			||||||
 | 
							pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								// This could be a connection error so we want to retry.
 | 
				
			||||||
 | 
								return false, nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if pod.Spec.NodeName == "" {
 | 
				
			||||||
 | 
								return false, nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return true, nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user