mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 03:08:15 +00:00
Integration tests for KEP Pod Scheduling Readiness
- test generic integration in plugins_test.go - test integration with SchedulingGates plugin in queue_test.go
This commit is contained in:
@@ -30,12 +30,16 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
configv1 "k8s.io/kube-scheduler/config/v1"
|
||||
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@@ -47,6 +51,128 @@ import (
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestSchedulingGates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
featureEnabled bool
|
||||
want []string
|
||||
rmPodsSchedulingGates []int
|
||||
wantPostGatesRemoval []string
|
||||
}{
|
||||
{
|
||||
name: "feature disabled, regular pods",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Name("p1").Container("pause").Obj(),
|
||||
st.MakePod().Name("p2").Container("pause").Obj(),
|
||||
},
|
||||
featureEnabled: false,
|
||||
want: []string{"p1", "p2"},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, regular pods",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Name("p1").Container("pause").Obj(),
|
||||
st.MakePod().Name("p2").Container("pause").Obj(),
|
||||
},
|
||||
featureEnabled: true,
|
||||
want: []string{"p1", "p2"},
|
||||
},
|
||||
{
|
||||
name: "feature disabled, one pod carrying scheduling gates",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Name("p1").SchedulingGates([]string{"foo"}).Container("pause").Obj(),
|
||||
st.MakePod().Name("p2").Container("pause").Obj(),
|
||||
},
|
||||
featureEnabled: false,
|
||||
want: []string{"p1", "p2"},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, one pod carrying scheduling gates",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Name("p1").SchedulingGates([]string{"foo"}).Container("pause").Obj(),
|
||||
st.MakePod().Name("p2").Container("pause").Obj(),
|
||||
},
|
||||
featureEnabled: true,
|
||||
want: []string{"p2"},
|
||||
},
|
||||
{
|
||||
name: "feature enabled, two pod carrying scheduling gates, and remove gates of one pod",
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Name("p1").SchedulingGates([]string{"foo"}).Container("pause").Obj(),
|
||||
st.MakePod().Name("p2").SchedulingGates([]string{"bar"}).Container("pause").Obj(),
|
||||
st.MakePod().Name("p3").Container("pause").Obj(),
|
||||
},
|
||||
featureEnabled: true,
|
||||
want: []string{"p3"},
|
||||
rmPodsSchedulingGates: []int{1}, // remove gates of 'p2'
|
||||
wantPostGatesRemoval: []string{"p2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodSchedulingReadiness, tt.featureEnabled)()
|
||||
|
||||
// Use zero backoff seconds to bypass backoffQ.
|
||||
// It's intended to not start the scheduler's queue, and hence to
|
||||
// not start any flushing logic. We will pop and schedule the Pods manually later.
|
||||
testCtx := testutils.InitTestSchedulerWithOptions(
|
||||
t,
|
||||
testutils.InitTestAPIServer(t, "pod-scheduling-gates", nil),
|
||||
0,
|
||||
scheduler.WithPodInitialBackoffSeconds(0),
|
||||
scheduler.WithPodMaxBackoffSeconds(0),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
|
||||
for _, p := range tt.pods {
|
||||
p.Namespace = ns
|
||||
if _, err := cs.CoreV1().Pods(ns).Create(ctx, p, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create Pod %q: %v", p.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the pods to be present in the scheduling queue.
|
||||
if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
|
||||
return len(pendingPods) == len(tt.pods), nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Pop the expected pods out. They should be de-queueable.
|
||||
for _, wantPod := range tt.want {
|
||||
podInfo := nextPodOrDie(t, testCtx)
|
||||
if got := podInfo.Pod.Name; got != wantPod {
|
||||
t.Errorf("Want %v to be popped out, but got %v", wantPod, got)
|
||||
}
|
||||
}
|
||||
|
||||
if len(tt.rmPodsSchedulingGates) == 0 {
|
||||
return
|
||||
}
|
||||
// Remove scheduling gates from the pod spec.
|
||||
for _, idx := range tt.rmPodsSchedulingGates {
|
||||
patch := `{"spec": {"schedulingGates": null}}`
|
||||
podName := tt.pods[idx].Name
|
||||
if _, err := cs.CoreV1().Pods(ns).Patch(ctx, podName, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}); err != nil {
|
||||
t.Fatalf("Failed to patch pod %v: %v", podName, err)
|
||||
}
|
||||
}
|
||||
// Pop the expected pods out. They should be de-queueable.
|
||||
for _, wantPod := range tt.wantPostGatesRemoval {
|
||||
podInfo := nextPodOrDie(t, testCtx)
|
||||
if got := podInfo.Pod.Name; got != wantPod {
|
||||
t.Errorf("Want %v to be popped out, but got %v", wantPod, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCoreResourceEnqueue verify Pods failed by in-tree default plugins can be
|
||||
// moved properly upon their registered events.
|
||||
func TestCoreResourceEnqueue(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user