kube-scheduler: NewFramework function to pass the context parameter

Co-authored-by: Aldo Culquicondor <1299064+alculquicondor@users.noreply.github.com>
This commit is contained in:
Mengjiao Liu
2023-05-15 18:36:17 +08:00
parent b31774d39b
commit 1c05cf1d51
31 changed files with 222 additions and 164 deletions

View File

@@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -384,9 +385,10 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
snapshot := cache.NewSnapshot(test.pods, test.nodes)
ctx, cancel := context.WithCancel(context.Background())
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
fh, _ := runtime.NewFramework(nil, nil, ctx.Done(), runtime.WithSnapshotSharedLister(snapshot))
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, _ := NewBalancedAllocation(&test.args, fh, feature.Features{})
state := framework.NewCycleState()
for i := range test.nodes {

View File

@@ -25,6 +25,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -836,12 +837,13 @@ func TestFitScore(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, ctx.Done(), runtime.WithSnapshotSharedLister(snapshot))
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
args := test.nodeResourcesFitArgs
p, err := NewFit(&args, fh, plfeature.Features{})
if err != nil {
@@ -958,6 +960,9 @@ func BenchmarkTestFitScore(b *testing.B) {
for _, test := range tests {
b.Run(test.name, func(b *testing.B) {
_, ctx := ktesting.NewTestContext(b)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
existingPods := []*v1.Pod{
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
}
@@ -966,15 +971,14 @@ func BenchmarkTestFitScore(b *testing.B) {
}
state := framework.NewCycleState()
var nodeResourcesFunc = runtime.FactoryAdapter(plfeature.Features{}, NewFit)
pl := plugintesting.SetupPlugin(b, nodeResourcesFunc, &test.nodeResourcesFitArgs, cache.NewSnapshot(existingPods, nodes))
pl := plugintesting.SetupPlugin(ctx, b, nodeResourcesFunc, &test.nodeResourcesFitArgs, cache.NewSnapshot(existingPods, nodes))
p := pl.(*Fit)
b.ResetTimer()
requestedPod := st.MakePod().Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj()
for i := 0; i < b.N; i++ {
_, status := p.Score(context.Background(), state, requestedPod, nodes[0].Name)
_, status := p.Score(ctx, state, requestedPod, nodes[0].Name)
if !status.IsSuccess() {
b.Errorf("unexpected status: %v", status)
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -385,12 +386,13 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, ctx.Done(), runtime.WithSnapshotSharedLister(snapshot))
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, err := NewFit(
&config.NodeResourcesFitArgs{

View File

@@ -23,6 +23,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -342,12 +343,13 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, ctx.Done(), runtime.WithSnapshotSharedLister(snapshot))
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, err := NewFit(
&config.NodeResourcesFitArgs{

View File

@@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -103,12 +103,13 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, ctx.Done(), runtime.WithSnapshotSharedLister(snapshot))
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, err := NewFit(&config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
@@ -303,7 +304,8 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, wait.NeverStop, runtime.WithSnapshotSharedLister(snapshot))
_, ctx := ktesting.NewTestContext(t)
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
args := config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{
Type: config.RequestedToCapacityRatio,
@@ -527,7 +529,8 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState()
snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, wait.NeverStop, runtime.WithSnapshotSharedLister(snapshot))
_, ctx := ktesting.NewTestContext(t)
fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
args := config.NodeResourcesFitArgs{
ScoringStrategy: &config.ScoringStrategy{