noderesourcefit: scheduler queueing hints

Co-authored-by: Kensei Nakada <handbomusic@gmail.com>
This commit is contained in:
carlory
2023-07-08 21:13:46 +08:00
parent 46f4248d56
commit 9e1adced5d
2 changed files with 355 additions and 2 deletions

View File

@@ -23,9 +23,11 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2/ktesting"
_ "k8s.io/klog/v2/ktesting/init"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -1109,9 +1111,246 @@ func TestEventsToRegister(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
fp := &Fit{enableInPlacePodVerticalScaling: test.inPlacePodVerticalScalingEnabled}
actualClusterEvents := fp.EventsToRegister()
for i := range actualClusterEvents {
actualClusterEvents[i].QueueingHintFn = nil
}
if diff := cmp.Diff(test.expectedClusterEvents, actualClusterEvents); diff != "" {
t.Error("Cluster Events doesn't match extected events (-expected +actual):\n", diff)
}
})
}
}
func Test_isSchedulableAfterPodChange(t *testing.T) {
testcases := map[string]struct {
pod *v1.Pod
oldObj, newObj interface{}
enableInPlacePodVerticalScaling bool
expectedHint framework.QueueingHint
expectedErr bool
}{
"backoff-wrong-old-object": {
pod: &v1.Pod{},
oldObj: "not-a-pod",
enableInPlacePodVerticalScaling: true,
expectedHint: framework.Queue,
expectedErr: true,
},
"backoff-wrong-new-object": {
pod: &v1.Pod{},
newObj: "not-a-pod",
enableInPlacePodVerticalScaling: true,
expectedHint: framework.Queue,
expectedErr: true,
},
"queue-on-deleted": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.Queue,
},
"skip-queue-on-unscheduled-pod-deleted": {
pod: &v1.Pod{},
oldObj: &v1.Pod{},
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-disable-inplace-pod-vertical-scaling": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: false,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-unscheduled-pod": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-non-resource-changes": {
pod: &v1.Pod{},
oldObj: st.MakePod().Label("k", "v").Node("fake").Obj(),
newObj: st.MakePod().Label("foo", "bar").Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-unrelated-resource-changes": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-resource-scale-up": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"queue-on-some-resource-scale-down": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.Queue,
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{
EnableInPlacePodVerticalScaling: tc.enableInPlacePodVerticalScaling,
})
if err != nil {
t.Fatal(err)
}
actualHint, err := p.(*Fit).isSchedulableAfterPodChange(logger, tc.pod, tc.oldObj, tc.newObj)
if tc.expectedErr {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Equal(t, tc.expectedHint, actualHint)
})
}
}
func Test_isSchedulableAfterNodeChange(t *testing.T) {
testcases := map[string]struct {
pod *v1.Pod
oldObj, newObj interface{}
expectedHint framework.QueueingHint
expectedErr bool
}{
"backoff-wrong-new-object": {
pod: &v1.Pod{},
newObj: "not-a-node",
expectedHint: framework.Queue,
expectedErr: true,
},
"backoff-wrong-old-object": {
pod: &v1.Pod{},
oldObj: "not-a-node",
newObj: &v1.Node{},
expectedHint: framework.Queue,
expectedErr: true,
},
"skip-queue-on-node-add-without-sufficient-resources": {
pod: newResourcePod(framework.Resource{Memory: 2}),
newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
v1.ResourceMemory: "1",
}).Obj(),
expectedHint: framework.QueueSkip,
},
"skip-queue-on-node-add-without-required-resource-type": {
pod: newResourcePod(framework.Resource{
ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}},
),
newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
extendedResourceB: "1",
}).Obj(),
expectedHint: framework.QueueSkip,
},
"queue-on-node-add-with-sufficient-resources": {
pod: newResourcePod(framework.Resource{
Memory: 2,
ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1},
}),
newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
v1.ResourceMemory: "4",
extendedResourceA: "2",
}).Obj(),
expectedHint: framework.Queue,
},
// uncomment this case when the isSchedulableAfterNodeChange also check the
// original node's resources.
// "skip-queue-on-node-unrelated-changes": {
// pod: &v1.Pod{},
// oldObj: st.MakeNode().Obj(),
// newObj: st.MakeNode().Label("foo", "bar").Obj(),
// expectedHint: framework.QueueSkip,
// },
"skip-queue-on-node-changes-from-suitable-to-unsuitable": {
pod: newResourcePod(framework.Resource{
Memory: 2,
ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1},
}),
oldObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
v1.ResourceMemory: "4",
extendedResourceA: "2",
}).Obj(),
newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
v1.ResourceMemory: "1",
extendedResourceA: "2",
}).Obj(),
expectedHint: framework.QueueSkip,
},
"queue-on-node-changes-from-unsuitable-to-suitable": {
pod: newResourcePod(framework.Resource{
Memory: 2,
ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1},
}),
oldObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
v1.ResourceMemory: "1",
extendedResourceA: "2",
}).Obj(),
newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
v1.ResourceMemory: "4",
extendedResourceA: "2",
}).Obj(),
expectedHint: framework.Queue,
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
if err != nil {
t.Fatal(err)
}
actualHint, err := p.(*Fit).isSchedulableAfterNodeChange(logger, tc.pod, tc.oldObj, tc.newObj)
if tc.expectedErr {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Equal(t, tc.expectedHint, actualHint)
})
}
}
func TestIsFit(t *testing.T) {
testCases := map[string]struct {
pod *v1.Pod
node *v1.Node
expected bool
}{
"nil node": {
pod: &v1.Pod{},
expected: false,
},
"insufficient resource": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
expected: false,
},
"sufficient resource": {
pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
expected: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
if got := isFit(tc.pod, tc.node); got != tc.expected {
t.Errorf("expected: %v, got: %v", tc.expected, got)
}
})
}
}