Merge pull request #131887 from ania-borowiec/extract_cyclestate_interface

Moving Scheduler interfaces to staging: split CycleState into interface and implementation, move interface to staging repo
This commit is contained in:
Kubernetes Prow Robot
2025-05-30 04:00:18 -07:00
committed by GitHub
52 changed files with 518 additions and 358 deletions

View File

@@ -40,6 +40,7 @@ import (
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/component-base/featuregate"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
@@ -514,7 +515,7 @@ func newFoo(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.
return &foo{}, nil
}
func (*foo) PreFilter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (*foo) PreFilter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
return nil, nil
}
@@ -522,6 +523,6 @@ func (*foo) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
func (*foo) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (*foo) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return nil
}

View File

@@ -29,14 +29,15 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
type frameworkContract interface {
RunPreFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status, sets.Set[string])
RunFilterPlugins(context.Context, *framework.CycleState, *v1.Pod, *framework.NodeInfo) *framework.Status
RunReservePluginsReserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status, sets.Set[string])
RunFilterPlugins(context.Context, fwk.CycleState, *v1.Pod, *framework.NodeInfo) *framework.Status
RunReservePluginsReserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status
}
func TestFrameworkContract(t *testing.T) {

View File

@@ -17,32 +17,12 @@ limitations under the License.
package framework
import (
"errors"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
)
var (
// ErrNotFound is the not found error message.
ErrNotFound = errors.New("not found")
)
// StateData is a generic type for arbitrary data stored in CycleState.
type StateData interface {
// Clone is an interface to make a copy of StateData. For performance reasons,
// clone should make shallow copies for members (e.g., slices or maps) that are not
// impacted by PreFilter's optional AddPod/RemovePod methods.
Clone() StateData
}
// StateKey is the type of keys stored in CycleState.
type StateKey string
// CycleState provides a mechanism for plugins to store and retrieve arbitrary data.
// StateData stored by one plugin can be read, altered, or deleted by another plugin.
// CycleState does not provide any data protection, as all plugins are assumed to be
// trusted.
// Note: CycleState uses a sync.Map to back the storage, because it is thread safe. It's aimed to optimize for the "write once and read many times" scenarios.
// It is the recommended pattern used in all in-tree plugins - plugin-specific state is written once in PreFilter/PreScore and afterward read many times in Filter/Score.
type CycleState struct {
@@ -50,10 +30,10 @@ type CycleState struct {
storage sync.Map
// if recordPluginMetrics is true, metrics.PluginExecutionDuration will be recorded for this cycle.
recordPluginMetrics bool
// SkipFilterPlugins are plugins that will be skipped in the Filter extension point.
SkipFilterPlugins sets.Set[string]
// SkipScorePlugins are plugins that will be skipped in the Score extension point.
SkipScorePlugins sets.Set[string]
// skipFilterPlugins are plugins that will be skipped in the Filter extension point.
skipFilterPlugins sets.Set[string]
// skipScorePlugins are plugins that will be skipped in the Score extension point.
skipScorePlugins sets.Set[string]
}
// NewCycleState initializes a new CycleState and returns its pointer.
@@ -77,22 +57,38 @@ func (c *CycleState) SetRecordPluginMetrics(flag bool) {
c.recordPluginMetrics = flag
}
func (c *CycleState) SetSkipFilterPlugins(plugins sets.Set[string]) {
c.skipFilterPlugins = plugins
}
func (c *CycleState) GetSkipFilterPlugins() sets.Set[string] {
return c.skipFilterPlugins
}
func (c *CycleState) SetSkipScorePlugins(plugins sets.Set[string]) {
c.skipScorePlugins = plugins
}
func (c *CycleState) GetSkipScorePlugins() sets.Set[string] {
return c.skipScorePlugins
}
// Clone creates a copy of CycleState and returns its pointer. Clone returns
// nil if the context being cloned is nil.
func (c *CycleState) Clone() *CycleState {
func (c *CycleState) Clone() fwk.CycleState {
if c == nil {
return nil
}
copy := NewCycleState()
// Safe copy storage in case of overwriting.
c.storage.Range(func(k, v interface{}) bool {
copy.storage.Store(k, v.(StateData).Clone())
copy.storage.Store(k, v.(fwk.StateData).Clone())
return true
})
// The below are not mutated, so we don't have to safe copy.
copy.recordPluginMetrics = c.recordPluginMetrics
copy.SkipFilterPlugins = c.SkipFilterPlugins
copy.SkipScorePlugins = c.SkipScorePlugins
copy.skipFilterPlugins = c.skipFilterPlugins
copy.skipScorePlugins = c.skipScorePlugins
return copy
}
@@ -101,23 +97,23 @@ func (c *CycleState) Clone() *CycleState {
// present, ErrNotFound is returned.
//
// See CycleState for notes on concurrency.
func (c *CycleState) Read(key StateKey) (StateData, error) {
func (c *CycleState) Read(key fwk.StateKey) (fwk.StateData, error) {
if v, ok := c.storage.Load(key); ok {
return v.(StateData), nil
return v.(fwk.StateData), nil
}
return nil, ErrNotFound
return nil, fwk.ErrNotFound
}
// Write stores the given "val" in CycleState with the given "key".
//
// See CycleState for notes on concurrency.
func (c *CycleState) Write(key StateKey, val StateData) {
func (c *CycleState) Write(key fwk.StateKey, val fwk.StateData) {
c.storage.Store(key, val)
}
// Delete deletes data with the given key from CycleState.
//
// See CycleState for notes on concurrency.
func (c *CycleState) Delete(key StateKey) {
func (c *CycleState) Delete(key fwk.StateKey) {
c.storage.Delete(key)
}

View File

@@ -19,29 +19,39 @@ package framework
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/util/sets"
fwk "k8s.io/kube-scheduler/framework"
)
type fakeData struct {
data string
}
func (f *fakeData) Clone() StateData {
func (f *fakeData) Clone() fwk.StateData {
copy := &fakeData{
data: f.data,
}
return copy
}
var key StateKey = "fakedata_key"
var key fwk.StateKey = "fakedata_key"
// createCycleStateWithFakeData creates *CycleState with fakeData.
// The given data is used in stored fakeData.
func createCycleStateWithFakeData(data string, recordPluginMetrics bool) *CycleState {
func createCycleStateWithFakeData(data string, recordPluginMetrics bool, skipPlugins ...[]string) *CycleState {
c := NewCycleState()
c.Write(key, &fakeData{
data: data,
})
c.SetRecordPluginMetrics(recordPluginMetrics)
if len(skipPlugins) > 0 {
c.SetSkipFilterPlugins(sets.New(skipPlugins[0]...))
}
if len(skipPlugins) > 1 {
c.SetSkipScorePlugins(sets.New(skipPlugins[1]...))
}
return c
}
@@ -58,6 +68,12 @@ func isCycleStateEqual(a, b *CycleState) (bool, string) {
if a.recordPluginMetrics != b.recordPluginMetrics {
return false, fmt.Sprintf("CycleState A and B have a different recordPluginMetrics. A: %v, B: %v", a.recordPluginMetrics, b.recordPluginMetrics)
}
if diff := cmp.Diff(a.skipFilterPlugins, b.skipFilterPlugins); diff != "" {
return false, fmt.Sprintf("CycleState A and B have different SkipFilterPlugin sets. -wanted,+got:\n%s", diff)
}
if diff := cmp.Diff(a.skipScorePlugins, b.skipScorePlugins); diff != "" {
return false, fmt.Sprintf("CycleState A and B have different SkipScorePlugins sets. -wanted,+got:\n%s", diff)
}
var msg string
isEqual := true
@@ -75,7 +91,7 @@ func isCycleStateEqual(a, b *CycleState) (bool, string) {
typed2, ok2 := v2.(*fakeData)
if !ok1 || !ok2 {
isEqual = false
msg = fmt.Sprintf("CycleState has the data which is not type *fakeData.")
msg = "CycleState has the data which is not type *fakeData."
return false
}
@@ -121,6 +137,21 @@ func TestCycleStateClone(t *testing.T) {
state: createCycleStateWithFakeData("data", false),
wantClonedState: createCycleStateWithFakeData("data", false),
},
{
name: "clone with SkipFilterPlugins",
state: createCycleStateWithFakeData("data", true, []string{"p1", "p2", "p3"}),
wantClonedState: createCycleStateWithFakeData("data", true, []string{"p1", "p2", "p3"}),
},
{
name: "clone with SkipScorePlugins",
state: createCycleStateWithFakeData("data", false, []string{}, []string{"p1", "p2", "p3"}),
wantClonedState: createCycleStateWithFakeData("data", false, []string{}, []string{"p1", "p2", "p3"}),
},
{
name: "clone with SkipScorePlugins and SkipFilterPlugins",
state: createCycleStateWithFakeData("data", true, []string{"p0"}, []string{"p1", "p2", "p3"}),
wantClonedState: createCycleStateWithFakeData("data", true, []string{"p0"}, []string{"p1", "p2", "p3"}),
},
{
name: "clone with nil CycleState",
state: nil,
@@ -131,7 +162,11 @@ func TestCycleStateClone(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
state := tt.state
stateCopy := state.Clone()
copy := state.Clone()
var stateCopy *CycleState
if copy != nil {
stateCopy = copy.(*CycleState)
}
if isEqual, msg := isCycleStateEqual(stateCopy, tt.wantClonedState); !isEqual {
t.Errorf("unexpected cloned state: %v", msg)

View File

@@ -37,6 +37,7 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
)
@@ -266,7 +267,7 @@ const (
// activated (i.e., moved to activeQ) in two phases:
// - end of a scheduling cycle if it succeeds (will be cleared from `PodsToActivate` if activated)
// - end of a binding cycle if it succeeds
var PodsToActivateKey StateKey = "kubernetes.io/pods-to-activate"
var PodsToActivateKey fwk.StateKey = "kubernetes.io/pods-to-activate"
// PodsToActivate stores pods to be activated.
type PodsToActivate struct {
@@ -276,7 +277,7 @@ type PodsToActivate struct {
}
// Clone just returns the same state.
func (s *PodsToActivate) Clone() StateData {
func (s *PodsToActivate) Clone() fwk.StateData {
return s
}
@@ -506,10 +507,10 @@ type EnqueueExtensions interface {
type PreFilterExtensions interface {
// AddPod is called by the framework while trying to evaluate the impact
// of adding podToAdd to the node while scheduling podToSchedule.
AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToAdd *PodInfo, nodeInfo *NodeInfo) *Status
AddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *PodInfo, nodeInfo *NodeInfo) *Status
// RemovePod is called by the framework while trying to evaluate the impact
// of removing podToRemove from the node while scheduling podToSchedule.
RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToRemove *PodInfo, nodeInfo *NodeInfo) *Status
RemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *PodInfo, nodeInfo *NodeInfo) *Status
}
// PreFilterPlugin is an interface that must be implemented by "PreFilter" plugins.
@@ -525,7 +526,7 @@ type PreFilterPlugin interface {
//
// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored,
// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle.
PreFilter(ctx context.Context, state *CycleState, p *v1.Pod, nodes []*NodeInfo) (*PreFilterResult, *Status)
PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []*NodeInfo) (*PreFilterResult, *Status)
// PreFilterExtensions returns a PreFilterExtensions interface if the plugin implements one,
// or nil if it does not. A Pre-filter plugin can provide extensions to incrementally
// modify its pre-processed info. The framework guarantees that the extensions
@@ -558,7 +559,7 @@ type FilterPlugin interface {
// For example, during preemption, we may pass a copy of the original
// nodeInfo object that has some pods removed from it to evaluate the
// possibility of preempting them to schedule the target pod.
Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status
Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status
}
// PostFilterPlugin is an interface for "PostFilter" plugins. These plugins are called
@@ -582,7 +583,7 @@ type PostFilterPlugin interface {
// Optionally, a non-nil PostFilterResult may be returned along with a Success status. For example,
// a preemption plugin may choose to return nominatedNodeName, so that framework can reuse that to update the
// preemptor pod's .spec.status.nominatedNodeName field.
PostFilter(ctx context.Context, state *CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status)
PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status)
}
// PreScorePlugin is an interface for "PreScore" plugin. PreScore is an
@@ -596,7 +597,7 @@ type PreScorePlugin interface {
// the pod will be rejected
// When it returns Skip status, other fields in status are just ignored,
// and coupled Score plugin will be skipped in this scheduling cycle.
PreScore(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*NodeInfo) *Status
PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*NodeInfo) *Status
}
// ScoreExtensions is an interface for Score extended functionality.
@@ -604,7 +605,7 @@ type ScoreExtensions interface {
// NormalizeScore is called for all node scores produced by the same plugin's "Score"
// method. A successful run of NormalizeScore will update the scores list and return
// a success status.
NormalizeScore(ctx context.Context, state *CycleState, p *v1.Pod, scores NodeScoreList) *Status
NormalizeScore(ctx context.Context, state fwk.CycleState, p *v1.Pod, scores NodeScoreList) *Status
}
// ScorePlugin is an interface that must be implemented by "Score" plugins to rank
@@ -614,7 +615,7 @@ type ScorePlugin interface {
// Score is called on each filtered node. It must return success and an integer
// indicating the rank of the node. All scoring plugins must return success or
// the pod will be rejected.
Score(ctx context.Context, state *CycleState, p *v1.Pod, nodeInfo *NodeInfo) (int64, *Status)
Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *NodeInfo) (int64, *Status)
// ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not.
ScoreExtensions() ScoreExtensions
@@ -631,13 +632,13 @@ type ReservePlugin interface {
// Reserve is called by the scheduling framework when the scheduler cache is
// updated. If this method returns a failed Status, the scheduler will call
// the Unreserve method for all enabled ReservePlugins.
Reserve(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status
Reserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *Status
// Unreserve is called by the scheduling framework when a reserved pod was
// rejected, an error occurred during reservation of subsequent plugins, or
// in a later phase. The Unreserve method implementation must be idempotent
// and may be called by the scheduler even if the corresponding Reserve
// method for the same plugin was not called.
Unreserve(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string)
Unreserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string)
}
// PreBindPlugin is an interface that must be implemented by "PreBind" plugins.
@@ -646,7 +647,7 @@ type PreBindPlugin interface {
Plugin
// PreBind is called before binding a pod. All prebind plugins must return
// success or the pod will be rejected and won't be sent for binding.
PreBind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status
PreBind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *Status
}
// PostBindPlugin is an interface that must be implemented by "PostBind" plugins.
@@ -657,7 +658,7 @@ type PostBindPlugin interface {
// informational. A common application of this extension point is for cleaning
// up. If a plugin needs to clean-up its state after a pod is scheduled and
// bound, PostBind is the extension point that it should register.
PostBind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string)
PostBind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string)
}
// PermitPlugin is an interface that must be implemented by "Permit" plugins.
@@ -670,7 +671,7 @@ type PermitPlugin interface {
// The pod will also be rejected if the wait timeout or the pod is rejected while
// waiting. Note that if the plugin returns "wait", the framework will wait only
// after running the remaining plugins given that no other plugin rejects the pod.
Permit(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) (*Status, time.Duration)
Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*Status, time.Duration)
}
// BindPlugin is an interface that must be implemented by "Bind" plugins. Bind
@@ -683,7 +684,7 @@ type BindPlugin interface {
// remaining bind plugins are skipped. When a bind plugin does not handle a pod,
// it must return Skip in its Status code. If a bind plugin returns an Error, the
// pod is rejected and will not be bound.
Bind(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) *Status
Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *Status
}
// Framework manages the set of plugins in use by the scheduling framework.
@@ -709,33 +710,33 @@ type Framework interface {
// The third returns value contains PreFilter plugin that rejected some or all Nodes with PreFilterResult.
// But, note that it doesn't contain any plugin when a plugin rejects this Pod with non-success status,
// not with PreFilterResult.
RunPreFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod) (*PreFilterResult, *Status, sets.Set[string])
RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (*PreFilterResult, *Status, sets.Set[string])
// RunPostFilterPlugins runs the set of configured PostFilter plugins.
// PostFilter plugins can either be informational, in which case should be configured
// to execute first and return Unschedulable status, or ones that try to change the
// cluster state to make the pod potentially schedulable in a future scheduling cycle.
RunPostFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status)
RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap NodeToStatusReader) (*PostFilterResult, *Status)
// RunPreBindPlugins runs the set of configured PreBind plugins. It returns
// *Status and its code is set to non-success if any of the plugins returns
// anything but Success. If the Status code is "Unschedulable", it is
// considered as a scheduling check failure, otherwise, it is considered as an
// internal error. In either case the pod is not going to be bound.
RunPreBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status
RunPreBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *Status
// RunPostBindPlugins runs the set of configured PostBind plugins.
RunPostBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string)
RunPostBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string)
// RunReservePluginsReserve runs the Reserve method of the set of
// configured Reserve plugins. If any of these calls returns an error, it
// does not continue running the remaining ones and returns the error. In
// such case, pod will not be scheduled.
RunReservePluginsReserve(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status
RunReservePluginsReserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *Status
// RunReservePluginsUnreserve runs the Unreserve method of the set of
// configured Reserve plugins.
RunReservePluginsUnreserve(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string)
RunReservePluginsUnreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string)
// RunPermitPlugins runs the set of configured Permit plugins. If any of these
// plugins returns a status other than "Success" or "Wait", it does not continue
@@ -743,7 +744,7 @@ type Framework interface {
// plugins returns "Wait", then this function will create and add waiting pod
// to a map of currently waiting pods and return status with "Wait" code.
// Pod will remain waiting pod for the minimum duration returned by the Permit plugins.
RunPermitPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status
RunPermitPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *Status
// WaitOnPermit will block, if the pod is a waiting pod, until the waiting pod is rejected or allowed.
WaitOnPermit(ctx context.Context, pod *v1.Pod) *Status
@@ -753,7 +754,7 @@ type Framework interface {
// binding, it should return code=5("skip") status. Otherwise, it should return "Error"
// or "Success". If none of the plugins handled binding, RunBindPlugins returns
// code=5("skip") status.
RunBindPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeName string) *Status
RunBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *Status
// HasFilterPlugins returns true if at least one Filter plugin is defined.
HasFilterPlugins() bool
@@ -833,7 +834,7 @@ type Handle interface {
SharedDRAManager() SharedDRAManager
// RunFilterPluginsWithNominatedPods runs the set of configured filter plugins for nominated pod on the given node.
RunFilterPluginsWithNominatedPods(ctx context.Context, state *CycleState, pod *v1.Pod, info *NodeInfo) *Status
RunFilterPluginsWithNominatedPods(ctx context.Context, state fwk.CycleState, pod *v1.Pod, info *NodeInfo) *Status
// Extenders returns registered scheduler extenders.
Extenders() []Extender
@@ -934,12 +935,12 @@ type PodNominator interface {
type PluginsRunner interface {
// RunPreScorePlugins runs the set of configured PreScore plugins. If any
// of these plugins returns any status other than "Success", the given pod is rejected.
RunPreScorePlugins(context.Context, *CycleState, *v1.Pod, []*NodeInfo) *Status
RunPreScorePlugins(context.Context, fwk.CycleState, *v1.Pod, []*NodeInfo) *Status
// RunScorePlugins runs the set of configured scoring plugins.
// It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status.
RunScorePlugins(context.Context, *CycleState, *v1.Pod, []*NodeInfo) ([]NodePluginScores, *Status)
RunScorePlugins(context.Context, fwk.CycleState, *v1.Pod, []*NodeInfo) ([]NodePluginScores, *Status)
// RunFilterPlugins runs the set of configured Filter plugins for pod on
// the given node. Note that for the node being evaluated, the passed nodeInfo
// reference could be different from the one in NodeInfoSnapshot map (e.g., pods
@@ -947,13 +948,13 @@ type PluginsRunner interface {
// preemption, we may pass a copy of the original nodeInfo object that has some pods
// removed from it to evaluate the possibility of preempting them to
// schedule the target pod.
RunFilterPlugins(context.Context, *CycleState, *v1.Pod, *NodeInfo) *Status
RunFilterPlugins(context.Context, fwk.CycleState, *v1.Pod, *NodeInfo) *Status
// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToAdd *PodInfo, nodeInfo *NodeInfo) *Status
RunPreFilterExtensionAddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *PodInfo, nodeInfo *NodeInfo) *Status
// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podInfoToRemove *PodInfo, nodeInfo *NodeInfo) *Status
RunPreFilterExtensionRemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *PodInfo, nodeInfo *NodeInfo) *Status
}

View File

@@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@@ -48,7 +49,7 @@ func (b DefaultBinder) Name() string {
}
// Bind binds pods to nodes using the k8s client.
func (b DefaultBinder) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (b DefaultBinder) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
logger := klog.FromContext(ctx)
logger.V(3).Info("Attempting to bind pod to node", "pod", klog.KObj(p), "node", klog.KRef("", nodeName))
binding := &v1.Binding{

View File

@@ -22,6 +22,8 @@ import (
"math/rand"
"sort"
fwk "k8s.io/kube-scheduler/framework"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -128,7 +130,7 @@ func New(_ context.Context, dpArgs runtime.Object, fh framework.Handle, fts feat
}
// PostFilter invoked at the postFilter extension point.
func (pl *DefaultPreemption) PostFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
func (pl *DefaultPreemption) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
defer func() {
metrics.PreemptionAttempts.Inc()
}()
@@ -216,7 +218,7 @@ func (pl *DefaultPreemption) CandidatesToVictimsMap(candidates []preemption.Cand
// for "pod" to be scheduled.
func (pl *DefaultPreemption) SelectVictimsOnNode(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
pod *v1.Pod,
nodeInfo *framework.NodeInfo,
pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status) {

View File

@@ -48,6 +48,7 @@ import (
"k8s.io/klog/v2/ktesting"
kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1"
@@ -123,14 +124,14 @@ func newTestPlugin(_ context.Context, injArgs runtime.Object, f framework.Handle
return &TestPlugin{name: "test-plugin"}, nil
}
func (pl *TestPlugin) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) AddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo.Node().GetLabels()["error"] == "true" {
return framework.AsStatus(fmt.Errorf("failed to add pod: %v", podToSchedule.Name))
}
return nil
}
func (pl *TestPlugin) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) RemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo.Node().GetLabels()["error"] == "true" {
return framework.AsStatus(fmt.Errorf("failed to remove pod: %v", podToSchedule.Name))
}
@@ -145,11 +146,11 @@ func (pl *TestPlugin) PreFilterExtensions() framework.PreFilterExtensions {
return pl
}
func (pl *TestPlugin) PreFilter(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *TestPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
return nil, nil
}
func (pl *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return nil
}

View File

@@ -41,6 +41,7 @@ import (
"k8s.io/dynamic-resource-allocation/resourceclaim"
"k8s.io/dynamic-resource-allocation/structured"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@@ -51,11 +52,11 @@ const (
// Name is the name of the plugin used in Registry and configurations.
Name = names.DynamicResources
stateKey framework.StateKey = Name
stateKey fwk.StateKey = Name
)
// The state is initialized in PreFilter phase. Because we save the pointer in
// framework.CycleState, in the later phases we don't need to call Write method
// fwk.CycleState, in the later phases we don't need to call Write method
// to update the value
type stateData struct {
// A copy of all claims for the Pod (i.e. 1:1 match with
@@ -88,7 +89,7 @@ type stateData struct {
nodeAllocations map[string][]resourceapi.AllocationResult
}
func (d *stateData) Clone() framework.StateData {
func (d *stateData) Clone() fwk.StateData {
return d
}
@@ -348,7 +349,7 @@ func (pl *DynamicResources) foreachPodResourceClaim(pod *v1.Pod, cb func(podReso
// PreFilter invoked at the prefilter extension point to check if pod has all
// immediate claims bound. UnschedulableAndUnresolvable is returned if
// the pod cannot be scheduled at the moment on any node.
func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *DynamicResources) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
if !pl.enabled {
return nil, framework.NewStatus(framework.Skip)
}
@@ -496,7 +497,7 @@ func (pl *DynamicResources) PreFilterExtensions() framework.PreFilterExtensions
return nil
}
func getStateData(cs *framework.CycleState) (*stateData, error) {
func getStateData(cs fwk.CycleState) (*stateData, error) {
state, err := cs.Read(stateKey)
if err != nil {
return nil, err
@@ -517,7 +518,7 @@ func getStateData(cs *framework.CycleState) (*stateData, error) {
//
// For claims that are unbound, it checks whether the claim might get allocated
// for the node.
func (pl *DynamicResources) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *DynamicResources) Filter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if !pl.enabled {
return nil
}
@@ -604,7 +605,7 @@ func (pl *DynamicResources) Filter(ctx context.Context, cs *framework.CycleState
// deallocated to help get the Pod schedulable. If yes, it picks one and
// requests its deallocation. This only gets called when filtering found no
// suitable node.
func (pl *DynamicResources) PostFilter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
func (pl *DynamicResources) PostFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
if !pl.enabled {
return nil, framework.NewStatus(framework.Unschedulable, "plugin disabled")
}
@@ -642,7 +643,7 @@ func (pl *DynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS
}
// Reserve reserves claims for the pod.
func (pl *DynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
func (pl *DynamicResources) Reserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
if !pl.enabled {
return nil
}
@@ -721,7 +722,7 @@ func (pl *DynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
// Unreserve clears the ReservedFor field for all claims.
// It's idempotent, and does nothing if no state found for the given pod.
func (pl *DynamicResources) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) {
func (pl *DynamicResources) Unreserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) {
if !pl.enabled {
return
}
@@ -769,7 +770,7 @@ func (pl *DynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
// If anything fails, we return an error and
// the pod will have to go into the backoff queue. The scheduler will call
// Unreserve as part of the error handling.
func (pl *DynamicResources) PreBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (pl *DynamicResources) PreBind(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if !pl.enabled {
return nil
}

View File

@@ -39,6 +39,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
cgotesting "k8s.io/client-go/testing"
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
@@ -1109,7 +1110,7 @@ type testContext struct {
draManager *DefaultDRAManager
p *DynamicResources
nodeInfos []*framework.NodeInfo
state *framework.CycleState
state fwk.CycleState
}
func (tc *testContext) verify(t *testing.T, expected result, initialObjects []metav1.Object, result interface{}, status *framework.Status) {

View File

@@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -43,7 +44,7 @@ type stateData struct {
data string
}
func (s *stateData) Clone() framework.StateData {
func (s *stateData) Clone() fwk.StateData {
copy := &stateData{
data: s.data,
}
@@ -51,33 +52,33 @@ func (s *stateData) Clone() framework.StateData {
}
// Reserve is the function invoked by the framework at "reserve" extension point.
func (mc CommunicatingPlugin) Reserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (mc CommunicatingPlugin) Reserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if pod == nil {
return framework.NewStatus(framework.Error, "pod cannot be nil")
}
if pod.Name == "my-test-pod" {
state.Write(framework.StateKey(pod.Name), &stateData{data: "never bind"})
state.Write(fwk.StateKey(pod.Name), &stateData{data: "never bind"})
}
return nil
}
// Unreserve is the function invoked by the framework when any error happens
// during "reserve" extension point or later.
func (mc CommunicatingPlugin) Unreserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (mc CommunicatingPlugin) Unreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if pod.Name == "my-test-pod" {
// The pod is at the end of its lifecycle -- let's clean up the allocated
// resources. In this case, our clean up is simply deleting the key written
// in the Reserve operation.
state.Delete(framework.StateKey(pod.Name))
state.Delete(fwk.StateKey(pod.Name))
}
}
// PreBind is the function invoked by the framework at "prebind" extension point.
func (mc CommunicatingPlugin) PreBind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (mc CommunicatingPlugin) PreBind(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if pod == nil {
return framework.NewStatus(framework.Error, "pod cannot be nil")
}
if v, e := state.Read(framework.StateKey(pod.Name)); e == nil {
if v, e := state.Read(fwk.StateKey(pod.Name)); e == nil {
if value, ok := v.(*stateData); ok && value.data == "never bind" {
return framework.NewStatus(framework.Unschedulable, "pod is not permitted")
}

View File

@@ -18,8 +18,10 @@ package prebind
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -38,7 +40,7 @@ func (sr StatelessPreBindExample) Name() string {
}
// PreBind is the functions invoked by the framework at "prebind" extension point.
func (sr StatelessPreBindExample) PreBind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (sr StatelessPreBindExample) PreBind(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if pod == nil {
return framework.NewStatus(framework.Error, "pod cannot be nil")
}

View File

@@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -49,7 +50,7 @@ func (mp *MultipointExample) Name() string {
// Reserve is the function invoked by the framework at "reserve" extension
// point. In this trivial example, the Reserve method allocates an array of
// strings.
func (mp *MultipointExample) Reserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (mp *MultipointExample) Reserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
// Reserve is not called concurrently, and so we don't need to lock.
mp.executionPoints = append(mp.executionPoints, "reserve")
return nil
@@ -59,7 +60,7 @@ func (mp *MultipointExample) Reserve(ctx context.Context, state *framework.Cycle
// during "reserve" extension point or later. In this example, the Unreserve
// method loses its reference to the string slice, allowing it to be garbage
// collected, and thereby "unallocating" the reserved resources.
func (mp *MultipointExample) Unreserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (mp *MultipointExample) Unreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
// Unlike Reserve, the Unreserve method may be called concurrently since
// there is no guarantee that there will only one unreserve operation at any
// given point in time (for example, during the binding cycle).
@@ -70,7 +71,7 @@ func (mp *MultipointExample) Unreserve(ctx context.Context, state *framework.Cyc
// PreBind is the function invoked by the framework at "prebind" extension
// point.
func (mp *MultipointExample) PreBind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (mp *MultipointExample) PreBind(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
// PreBind could be called concurrently for different pods.
mp.mu.Lock()
defer mp.mu.Unlock()

View File

@@ -22,6 +22,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
@@ -50,7 +51,7 @@ func (pl *ImageLocality) Name() string {
}
// Score invoked at the score extension point.
func (pl *ImageLocality) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *ImageLocality) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
nodeInfos, err := pl.handle.SnapshotSharedLister().NodeInfos().List()
if err != nil {
return 0, framework.AsStatus(err)

View File

@@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -55,7 +56,7 @@ type preFilterState struct {
}
// Clone the prefilter state.
func (s *preFilterState) Clone() framework.StateData {
func (s *preFilterState) Clone() fwk.StateData {
if s == nil {
return nil
}
@@ -270,7 +271,7 @@ func (pl *InterPodAffinity) getIncomingAffinityAntiAffinityCounts(ctx context.Co
}
// PreFilter invoked at the prefilter extension point.
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, allNodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, allNodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
var nodesWithRequiredAntiAffinityPods []*framework.NodeInfo
var err error
if nodesWithRequiredAntiAffinityPods, err = pl.sharedLister.NodeInfos().HavePodsWithRequiredAntiAffinityList(); err != nil {
@@ -313,7 +314,7 @@ func (pl *InterPodAffinity) PreFilterExtensions() framework.PreFilterExtensions
}
// AddPod from pre-computed data in cycleState.
func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -323,7 +324,7 @@ func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.Cy
}
// RemovePod from pre-computed data in cycleState.
func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -332,7 +333,7 @@ func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework
return nil
}
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
@@ -408,7 +409,7 @@ func satisfyPodAffinity(state *preFilterState, nodeInfo *framework.NodeInfo) boo
// Filter invoked at the filter extension point.
// It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {

View File

@@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/backend/cache"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -1005,7 +1006,7 @@ func TestPreFilterDisabled(t *testing.T) {
p := plugintesting.SetupPluginWithInformers(ctx, t, schedruntime.FactoryAdapter(feature.Features{}, New), &config.InterPodAffinityArgs{}, cache.NewEmptySnapshot(), nil)
cycleState := framework.NewCycleState()
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := framework.AsStatus(framework.ErrNotFound)
wantStatus := framework.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(gotStatus, wantStatus); diff != "" {
t.Errorf("Status does not match (-want,+got):\n%s", diff)
}
@@ -1242,7 +1243,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// getMeta creates predicate meta data given the list of pods.
getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) {
getState := func(pods []*v1.Pod) (*InterPodAffinity, fwk.CycleState, *preFilterState, *cache.Snapshot) {
snapshot := cache.NewSnapshot(pods, test.nodes)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)

View File

@@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -43,7 +44,7 @@ type preScoreState struct {
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() framework.StateData {
func (s *preScoreState) Clone() fwk.StateData {
return s
}
@@ -126,7 +127,7 @@ func (pl *InterPodAffinity) processExistingPod(
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *InterPodAffinity) PreScore(
pCtx context.Context,
cycleState *framework.CycleState,
cycleState fwk.CycleState,
pod *v1.Pod,
nodes []*framework.NodeInfo,
) *framework.Status {
@@ -219,7 +220,7 @@ func (pl *InterPodAffinity) PreScore(
return nil
}
func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) {
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("failed to read %q from cycleState: %w", preScoreStateKey, err)
@@ -236,7 +237,7 @@ func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error)
// The "score" returned in this function is the sum of weights got from cycleState which have its topologyKey matching with the node's labels.
// it is normalized later.
// Note: the returned "score" is positive for pod-affinity, and negative for pod-antiaffinity.
func (pl *InterPodAffinity) Score(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *InterPodAffinity) Score(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
s, err := getPreScoreState(cycleState)
@@ -254,7 +255,7 @@ func (pl *InterPodAffinity) Score(ctx context.Context, cycleState *framework.Cyc
}
// NormalizeScore normalizes the score for each filteredNode.
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
s, err := getPreScoreState(cycleState)
if err != nil {
return framework.AsStatus(err)

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -79,7 +80,7 @@ type preFilterState struct {
}
// Clone just returns the same state because it is not affected by pod additions or deletions.
func (s *preFilterState) Clone() framework.StateData {
func (s *preFilterState) Clone() fwk.StateData {
return s
}
@@ -143,7 +144,7 @@ func (pl *NodeAffinity) isSchedulableAfterNodeChange(logger klog.Logger, pod *v1
}
// PreFilter builds and writes cycle state used by Filter.
func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
affinity := pod.Spec.Affinity
noNodeAffinity := (affinity == nil ||
affinity.NodeAffinity == nil ||
@@ -202,7 +203,7 @@ func (pl *NodeAffinity) PreFilterExtensions() framework.PreFilterExtensions {
// Filter checks if the Node matches the Pod .spec.affinity.nodeAffinity and
// the plugin's added affinity.
func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *NodeAffinity) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if pl.addedNodeSelector != nil && !pl.addedNodeSelector.Match(node) {
@@ -232,12 +233,12 @@ type preScoreState struct {
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() framework.StateData {
func (s *preScoreState) Clone() fwk.StateData {
return s
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
preferredNodeAffinity, err := getPodPreferredNodeAffinity(pod)
if err != nil {
return framework.AsStatus(err)
@@ -256,7 +257,7 @@ func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState *framework.Cycl
// Score returns the sum of the weights of the terms that match the Node.
// Terms came from the Pod .spec.affinity.nodeAffinity and from the plugin's
// default affinity.
func (pl *NodeAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *NodeAffinity) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
var count int64
@@ -284,7 +285,7 @@ func (pl *NodeAffinity) Score(ctx context.Context, state *framework.CycleState,
}
// NormalizeScore invoked after scoring all nodes.
func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
func (pl *NodeAffinity) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
return helper.DefaultNormalizeScore(framework.MaxNodeScore, false, scores)
}
@@ -337,7 +338,7 @@ func getPodPreferredNodeAffinity(pod *v1.Pod) (*nodeaffinity.PreferredScheduling
return nil, nil
}
func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) {
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err)
@@ -350,7 +351,7 @@ func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error)
return s, nil
}
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %v", preFilterStateKey, err)

View File

@@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@@ -68,7 +69,7 @@ func (pl *NodeName) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *NodeName) Filter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if !Fits(pod, nodeInfo) {
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason)

View File

@@ -23,6 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@@ -53,7 +54,7 @@ const (
type preFilterState []*v1.ContainerPort
// Clone the prefilter state.
func (s preFilterState) Clone() framework.StateData {
func (s preFilterState) Clone() fwk.StateData {
// The state is not impacted by adding/removing existing pods, hence we don't need to make a deep copy.
return s
}
@@ -83,7 +84,7 @@ func getContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort {
}
// PreFilter invoked at the prefilter extension point.
func (pl *NodePorts) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *NodePorts) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
s := getContainerPorts(pod)
// Skip if a pod has no ports.
if len(s) == 0 {
@@ -98,7 +99,7 @@ func (pl *NodePorts) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error) {
func getPreFilterState(cycleState fwk.CycleState) (preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
@@ -177,7 +178,7 @@ func (pl *NodePorts) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Po
}
// Filter invoked at the filter extension point.
func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *NodePorts) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
wantPorts, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)

View File

@@ -28,6 +28,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2/ktesting"
_ "k8s.io/klog/v2/ktesting/init"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
st "k8s.io/kubernetes/pkg/scheduler/testing"
@@ -182,7 +183,7 @@ func TestPreFilterDisabled(t *testing.T) {
}
cycleState := framework.NewCycleState()
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := framework.AsStatus(framework.ErrNotFound)
wantStatus := framework.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}

View File

@@ -23,6 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -57,12 +58,12 @@ type balancedAllocationPreScoreState struct {
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *balancedAllocationPreScoreState) Clone() framework.StateData {
func (s *balancedAllocationPreScoreState) Clone() fwk.StateData {
return s
}
// PreScore calculates incoming pod's resource requests and writes them to the cycle state used.
func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
podRequests := ba.calculatePodResourceRequestList(pod, ba.resources)
if ba.isBestEffortPod(podRequests) {
// Skip BalancedAllocation scoring for best-effort pods to
@@ -77,7 +78,7 @@ func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState *framewor
return nil
}
func getBalancedAllocationPreScoreState(cycleState *framework.CycleState) (*balancedAllocationPreScoreState, error) {
func getBalancedAllocationPreScoreState(cycleState fwk.CycleState) (*balancedAllocationPreScoreState, error) {
c, err := cycleState.Read(balancedAllocationPreScoreStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %w", balancedAllocationPreScoreStateKey, err)
@@ -96,7 +97,7 @@ func (ba *BalancedAllocation) Name() string {
}
// Score invoked at the score extension point.
func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (ba *BalancedAllocation) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
s, err := getBalancedAllocationPreScoreState(state)
if err != nil {
s = &balancedAllocationPreScoreState{podRequests: ba.calculatePodResourceRequestList(pod, ba.resources)}

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
@@ -106,7 +107,7 @@ type preFilterState struct {
}
// Clone the prefilter state.
func (s *preFilterState) Clone() framework.StateData {
func (s *preFilterState) Clone() fwk.StateData {
return s
}
@@ -119,12 +120,12 @@ type preScoreState struct {
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() framework.StateData {
func (s *preScoreState) Clone() fwk.StateData {
return s
}
// PreScore calculates incoming pod's resource requests and writes them to the cycle state used.
func (f *Fit) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (f *Fit) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
state := &preScoreState{
podRequests: f.calculatePodResourceRequestList(pod, f.resources),
}
@@ -132,7 +133,7 @@ func (f *Fit) PreScore(ctx context.Context, cycleState *framework.CycleState, po
return nil
}
func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) {
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err)
@@ -227,7 +228,7 @@ func computePodResourceRequest(pod *v1.Pod, opts ResourceRequestsOptions) *preFi
}
// PreFilter invoked at the prefilter extension point.
func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (f *Fit) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
if !f.enableSidecarContainers && hasRestartableInitContainer(pod) {
// Scheduler will calculate resources usage for a Pod containing
// restartable init containers that will be equal or more than kubelet will
@@ -245,7 +246,7 @@ func (f *Fit) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
@@ -452,7 +453,7 @@ func isFit(pod *v1.Pod, node *v1.Node, opts ResourceRequestsOptions) bool {
// Filter invoked at the filter extension point.
// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod.
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (f *Fit) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -592,7 +593,7 @@ func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignor
}
// Score invoked at the Score extension point.
func (f *Fit) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (f *Fit) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
s, err := getPreScoreState(state)
if err != nil {
s = &preScoreState{

View File

@@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2/ktesting"
_ "k8s.io/klog/v2/ktesting/init"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/backend/cache"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -653,7 +654,7 @@ func TestPreFilterDisabled(t *testing.T) {
}
cycleState := framework.NewCycleState()
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := framework.AsStatus(framework.ErrNotFound)
wantStatus := framework.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(wantStatus, gotStatus); diff != "" {
t.Errorf("status does not match (-want,+got):\n%s", diff)
}

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@@ -129,7 +130,7 @@ func (pl *NodeUnschedulable) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *NodeUnschedulable) Filter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if !node.Spec.Unschedulable {

View File

@@ -32,6 +32,7 @@ import (
storagehelpers "k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@@ -233,7 +234,7 @@ func (pl *CSILimits) isSchedulableAfterCSINodeUpdated(logger klog.Logger, pod *v
// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *CSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, _ []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *CSILimits) PreFilter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, _ []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
@@ -251,7 +252,7 @@ func (pl *CSILimits) PreFilterExtensions() framework.PreFilterExtensions {
}
// Filter invoked at the filter extension point.
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *CSILimits) Filter(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If the new pod doesn't have any volume attached to it, the predicate will always be true
if len(pod.Spec.Volumes) == 0 {
return nil

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -68,7 +69,7 @@ func (s *preFilterState) minMatchNum(constraintID int, minDomains int32) (int, e
}
// Clone makes a copy of the given state.
func (s *preFilterState) Clone() framework.StateData {
func (s *preFilterState) Clone() fwk.StateData {
if s == nil {
return nil
}
@@ -136,7 +137,7 @@ func (p *criticalPaths) update(tpVal string, num int) {
}
// PreFilter invoked at the prefilter extension point.
func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *PodTopologySpread) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
s, err := pl.calPreFilterState(ctx, pod, nodes)
if err != nil {
return nil, framework.AsStatus(err)
@@ -154,7 +155,7 @@ func (pl *PodTopologySpread) PreFilterExtensions() framework.PreFilterExtensions
}
// AddPod from pre-computed data in cycleState.
func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -165,7 +166,7 @@ func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.C
}
// RemovePod from pre-computed data in cycleState.
func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -210,7 +211,7 @@ func (pl *PodTopologySpread) updateWithPod(s *preFilterState, updatedPod, preemp
}
// getPreFilterState fetches a pre-computed preFilterState.
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
@@ -307,7 +308,7 @@ func (pl *PodTopologySpread) calPreFilterState(ctx context.Context, pod *v1.Pod,
}
// Filter invoked at the filter extension point.
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
s, err := getPreFilterState(cycleState)

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/backend/cache"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -36,6 +37,7 @@ import (
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/metrics"
st "k8s.io/kubernetes/pkg/scheduler/testing"
"k8s.io/utils/ptr"
)
@@ -2368,7 +2370,7 @@ func BenchmarkFilter(b *testing.B) {
},
}
for _, tt := range tests {
var state *framework.CycleState
var state fwk.CycleState
b.Run(tt.name, func(b *testing.B) {
existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
_, ctx := ktesting.NewTestContext(b)
@@ -3442,7 +3444,7 @@ func TestPreFilterDisabled(t *testing.T) {
p := plugintesting.SetupPlugin(ctx, t, topologySpreadFunc, &config.PodTopologySpreadArgs{DefaultingType: config.ListDefaulting}, cache.NewEmptySnapshot())
cycleState := framework.NewCycleState()
gotStatus := p.(*PodTopologySpread).Filter(ctx, cycleState, pod, nodeInfo)
wantStatus := framework.AsStatus(framework.ErrNotFound)
wantStatus := framework.AsStatus(fwk.ErrNotFound)
if diff := cmp.Diff(wantStatus, gotStatus); diff != "" {
t.Errorf("Status does not match (-want,+got):\n%s", diff)
}

View File

@@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -48,7 +49,7 @@ type preScoreState struct {
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() framework.StateData {
func (s *preScoreState) Clone() fwk.StateData {
return s
}
@@ -116,7 +117,7 @@ func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, fi
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *PodTopologySpread) PreScore(
ctx context.Context,
cycleState *framework.CycleState,
cycleState fwk.CycleState,
pod *v1.Pod,
filteredNodes []*framework.NodeInfo,
) *framework.Status {
@@ -192,7 +193,7 @@ func (pl *PodTopologySpread) PreScore(
// Score invoked at the Score extension point.
// The "score" returned in this function is the matching number of pods on the `nodeName`,
// it is normalized later.
func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *PodTopologySpread) Score(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
s, err := getPreScoreState(cycleState)
if err != nil {
@@ -222,7 +223,7 @@ func (pl *PodTopologySpread) Score(ctx context.Context, cycleState *framework.Cy
}
// NormalizeScore invoked after scoring all nodes.
func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
func (pl *PodTopologySpread) NormalizeScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
s, err := getPreScoreState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -268,7 +269,7 @@ func (pl *PodTopologySpread) ScoreExtensions() framework.ScoreExtensions {
return pl
}
func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) {
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("error reading %q from cycleState: %w", preScoreStateKey, err)

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
@@ -107,7 +108,7 @@ func (pl *TaintToleration) isSchedulableAfterNodeChange(logger klog.Logger, pod
}
// Filter invoked at the filter extension point.
func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TaintToleration) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
taint, isUntolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, pod.Spec.Tolerations, helper.DoNotScheduleTaintsFilterFunc())
@@ -126,7 +127,7 @@ type preScoreState struct {
// Clone implements the mandatory Clone interface. We don't really copy the data since
// there is no need for that.
func (s *preScoreState) Clone() framework.StateData {
func (s *preScoreState) Clone() fwk.StateData {
return s
}
@@ -142,7 +143,7 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi
}
// PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *TaintToleration) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (pl *TaintToleration) PreScore(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
state := &preScoreState{
tolerationsPreferNoSchedule: tolerationsPreferNoSchedule,
@@ -151,7 +152,7 @@ func (pl *TaintToleration) PreScore(ctx context.Context, cycleState *framework.C
return nil
}
func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) {
func getPreScoreState(cycleState fwk.CycleState) (*preScoreState, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("failed to read %q from cycleState: %w", preScoreStateKey, err)
@@ -180,7 +181,7 @@ func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.
}
// Score invoked at the Score extension point.
func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *TaintToleration) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
s, err := getPreScoreState(state)
@@ -193,7 +194,7 @@ func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleStat
}
// NormalizeScore invoked after scoring all nodes.
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
return helper.DefaultNormalizeScore(framework.MaxNodeScore, true, scores)
}

View File

@@ -32,6 +32,7 @@ import (
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -42,13 +43,13 @@ import (
)
const (
stateKey framework.StateKey = Name
stateKey fwk.StateKey = Name
maxUtilization = 100
)
// the state is initialized in PreFilter phase. because we save the pointer in
// framework.CycleState, in the later phases we don't need to call Write method
// fwk.CycleState, in the later phases we don't need to call Write method
// to update the value
type stateData struct {
allBound bool
@@ -63,7 +64,7 @@ type stateData struct {
sync.Mutex
}
func (d *stateData) Clone() framework.StateData {
func (d *stateData) Clone() fwk.StateData {
return d
}
@@ -349,7 +350,7 @@ func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) {
// PreFilter invoked at the prefilter extension point to check if pod has all
// immediate PVCs bound. If not all immediate PVCs are bound, an
// UnschedulableAndUnresolvable is returned.
func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *VolumeBinding) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
logger := klog.FromContext(ctx)
// If pod does not reference any PVC, we don't need to do anything.
if hasPVC, err := pl.podHasPVCs(pod); err != nil {
@@ -386,7 +387,7 @@ func (pl *VolumeBinding) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
func getStateData(cs *framework.CycleState) (*stateData, error) {
func getStateData(cs fwk.CycleState) (*stateData, error) {
state, err := cs.Read(stateKey)
if err != nil {
return nil, err
@@ -413,7 +414,7 @@ func getStateData(cs *framework.CycleState) (*stateData, error) {
//
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
// PVCs can be matched with an available and node-compatible PV.
func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *VolumeBinding) Filter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
logger := klog.FromContext(ctx)
node := nodeInfo.Node()
@@ -445,7 +446,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
}
// PreScore invoked at the preScore extension point. It checks whether volumeBinding can skip Score
func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (pl *VolumeBinding) PreScore(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if pl.scorer == nil {
return framework.NewStatus(framework.Skip)
}
@@ -460,7 +461,7 @@ func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState,
}
// Score invoked at the score extension point.
func (pl *VolumeBinding) Score(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *VolumeBinding) Score(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
if pl.scorer == nil {
return 0, nil
}
@@ -520,7 +521,7 @@ func (pl *VolumeBinding) ScoreExtensions() framework.ScoreExtensions {
}
// Reserve reserves volumes of pod and saves binding status in cycle state.
func (pl *VolumeBinding) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (pl *VolumeBinding) Reserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
state, err := getStateData(cs)
if err != nil {
return framework.AsStatus(err)
@@ -545,7 +546,7 @@ func (pl *VolumeBinding) Reserve(ctx context.Context, cs *framework.CycleState,
//
// If binding errors, times out or gets undone, then an error will be returned to
// retry scheduling.
func (pl *VolumeBinding) PreBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (pl *VolumeBinding) PreBind(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
s, err := getStateData(cs)
if err != nil {
return framework.AsStatus(err)
@@ -572,7 +573,7 @@ func (pl *VolumeBinding) PreBind(ctx context.Context, cs *framework.CycleState,
// Unreserve clears assumed PV and PVC cache.
// It's idempotent, and does nothing if no cache found for the given pod.
func (pl *VolumeBinding) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) {
func (pl *VolumeBinding) Unreserve(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeName string) {
s, err := getStateData(cs)
if err != nil {
return

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
@@ -43,7 +44,7 @@ type VolumeRestrictions struct {
var _ framework.PreFilterPlugin = &VolumeRestrictions{}
var _ framework.FilterPlugin = &VolumeRestrictions{}
var _ framework.EnqueueExtensions = &VolumeRestrictions{}
var _ framework.StateData = &preFilterState{}
var _ fwk.StateData = &preFilterState{}
const (
// Name is the name of the plugin used in the plugin registry and configurations.
@@ -84,7 +85,7 @@ func (s *preFilterState) conflictingPVCRefCountForPod(podInfo *framework.PodInfo
}
// Clone the prefilter state.
func (s *preFilterState) Clone() framework.StateData {
func (s *preFilterState) Clone() fwk.StateData {
if s == nil {
return nil
}
@@ -162,7 +163,7 @@ func needsRestrictionsCheck(v v1.Volume) bool {
}
// PreFilter computes and stores cycleState containing details for enforcing ReadWriteOncePod.
func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
needsCheck := false
for i := range pod.Spec.Volumes {
if needsRestrictionsCheck(pod.Spec.Volumes[i]) {
@@ -192,7 +193,7 @@ func (pl *VolumeRestrictions) PreFilter(ctx context.Context, cycleState *framewo
}
// AddPod from pre-computed data in cycleState.
func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -202,7 +203,7 @@ func (pl *VolumeRestrictions) AddPod(ctx context.Context, cycleState *framework.
}
// RemovePod from pre-computed data in cycleState.
func (pl *VolumeRestrictions) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *VolumeRestrictions) RemovePod(ctx context.Context, cycleState fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.AsStatus(err)
@@ -211,7 +212,7 @@ func (pl *VolumeRestrictions) RemovePod(ctx context.Context, cycleState *framewo
return nil
}
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
func getPreFilterState(cycleState fwk.CycleState) (*preFilterState, error) {
c, err := cycleState.Read(preFilterStateKey)
if err != nil {
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
@@ -306,7 +307,7 @@ func (pl *VolumeRestrictions) PreFilterExtensions() framework.PreFilterExtension
// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only
// If the pod uses PVCs with the ReadWriteOncePod access mode, it evaluates if
// these PVCs are already in-use and if preemption will help.
func (pl *VolumeRestrictions) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *VolumeRestrictions) Filter(ctx context.Context, cycleState fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if !satisfyVolumeConflicts(pod, nodeInfo) {
return framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
}

View File

@@ -32,6 +32,7 @@ import (
volumehelpers "k8s.io/cloud-provider/volume/helpers"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
@@ -54,7 +55,7 @@ const (
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.VolumeZone
preFilterStateKey framework.StateKey = "PreFilter" + Name
preFilterStateKey fwk.StateKey = "PreFilter" + Name
// ErrReasonConflict is used for NoVolumeZoneConflict predicate error.
ErrReasonConflict = "node(s) had no available volume zone"
@@ -68,7 +69,7 @@ type pvTopology struct {
}
// the state is initialized in PreFilter phase. because we save the pointer in
// framework.CycleState, in the later phases we don't need to call Write method
// fwk.CycleState, in the later phases we don't need to call Write method
// to update the value
type stateData struct {
// podPVTopologies holds the pv information we need
@@ -76,7 +77,7 @@ type stateData struct {
podPVTopologies []pvTopology
}
func (d *stateData) Clone() framework.StateData {
func (d *stateData) Clone() fwk.StateData {
return d
}
@@ -108,7 +109,7 @@ func (pl *VolumeZone) Name() string {
//
// Currently, this is only supported with PersistentVolumeClaims,
// and only looks for the bound PersistentVolume.
func (pl *VolumeZone) PreFilter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *VolumeZone) PreFilter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
logger := klog.FromContext(ctx)
podPVTopologies, status := pl.getPVbyPod(logger, pod)
if !status.IsSuccess() {
@@ -187,7 +188,7 @@ func (pl *VolumeZone) PreFilterExtensions() framework.PreFilterExtensions {
// determining the zone of a volume during scheduling, and that is likely to
// require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway.
func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *VolumeZone) Filter(ctx context.Context, cs fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
logger := klog.FromContext(ctx)
// If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
@@ -237,7 +238,7 @@ func (pl *VolumeZone) Filter(ctx context.Context, cs *framework.CycleState, pod
return nil
}
func getStateData(cs *framework.CycleState) (*stateData, error) {
func getStateData(cs fwk.CycleState) (*stateData, error) {
state, err := cs.Read(preFilterStateKey)
if err != nil {
return nil, err

View File

@@ -37,6 +37,7 @@ import (
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
@@ -116,7 +117,7 @@ type Interface interface {
// SelectVictimsOnNode finds minimum set of pods on the given node that should be preempted in order to make enough room
// for "pod" to be scheduled.
// Note that both `state` and `nodeInfo` are deep copied.
SelectVictimsOnNode(ctx context.Context, state *framework.CycleState,
SelectVictimsOnNode(ctx context.Context, state fwk.CycleState,
pod *v1.Pod, nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget) ([]*v1.Pod, int, *framework.Status)
// OrderedScoreFuncs returns a list of ordered score functions to select preferable node where victims will be preempted.
// The ordered score functions will be processed one by one iff we find more than one node with the highest score.
@@ -228,7 +229,7 @@ func (ev *Evaluator) IsPodRunningPreemption(podUID types.UID) bool {
//
// - <non-nil PostFilterResult, Success>. It's the regular happy path
// and the non-empty nominatedNodeName will be applied to the preemptor pod.
func (ev *Evaluator) Preempt(ctx context.Context, state *framework.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
func (ev *Evaluator) Preempt(ctx context.Context, state fwk.CycleState, pod *v1.Pod, m framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
logger := klog.FromContext(ctx)
// 0) Fetch the latest version of <pod>.
@@ -303,7 +304,7 @@ func (ev *Evaluator) Preempt(ctx context.Context, state *framework.CycleState, p
// FindCandidates calculates a slice of preemption candidates.
// Each candidate is executable to make the given <pod> schedulable.
func (ev *Evaluator) findCandidates(ctx context.Context, state *framework.CycleState, allNodes []*framework.NodeInfo, pod *v1.Pod, m framework.NodeToStatusReader) ([]Candidate, *framework.NodeToStatus, error) {
func (ev *Evaluator) findCandidates(ctx context.Context, state fwk.CycleState, allNodes []*framework.NodeInfo, pod *v1.Pod, m framework.NodeToStatusReader) ([]Candidate, *framework.NodeToStatus, error) {
if len(allNodes) == 0 {
return nil, nil, errors.New("no nodes available")
}
@@ -678,7 +679,7 @@ func getLowerPriorityNominatedPods(logger klog.Logger, pn framework.PodNominator
// The number of candidates depends on the constraints defined in the plugin's args. In the returned list of
// candidates, ones that do not violate PDB are preferred over ones that do.
// NOTE: This method is exported for easier testing in default preemption.
func (ev *Evaluator) DryRunPreemption(ctx context.Context, state *framework.CycleState, pod *v1.Pod, potentialNodes []*framework.NodeInfo,
func (ev *Evaluator) DryRunPreemption(ctx context.Context, state fwk.CycleState, pod *v1.Pod, potentialNodes []*framework.NodeInfo,
pdbs []*policy.PodDisruptionBudget, offset int32, candidatesNum int32) ([]Candidate, *framework.NodeToStatus, error) {
fh := ev.Handler

View File

@@ -42,6 +42,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -72,7 +73,7 @@ type FakePostFilterPlugin struct {
}
func (pl *FakePostFilterPlugin) SelectVictimsOnNode(
ctx context.Context, state *framework.CycleState, pod *v1.Pod,
ctx context.Context, state fwk.CycleState, pod *v1.Pod,
nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget) (victims []*v1.Pod, numViolatingVictim int, status *framework.Status) {
return append(victims, nodeInfo.Pods[0].Pod), pl.numViolatingVictim, nil
}
@@ -109,7 +110,7 @@ func (f *fakePodActivator) Activate(logger klog.Logger, pods map[string]*v1.Pod)
type FakePreemptionScorePostFilterPlugin struct{}
func (pl *FakePreemptionScorePostFilterPlugin) SelectVictimsOnNode(
ctx context.Context, state *framework.CycleState, pod *v1.Pod,
ctx context.Context, state fwk.CycleState, pod *v1.Pod,
nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget) (victims []*v1.Pod, numViolatingVictim int, status *framework.Status) {
return append(victims, nodeInfo.Pods[0].Pod), 1, nil
}

View File

@@ -35,6 +35,7 @@ import (
"k8s.io/client-go/tools/events"
"k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/parallelize"
@@ -706,11 +707,11 @@ func (f *frameworkImpl) QueueSortFunc() framework.LessFunc {
// When it returns Skip status, returned PreFilterResult and other fields in status are just ignored,
// and coupled Filter plugin/PreFilterExtensions() will be skipped in this scheduling cycle.
// If a non-success status is returned, then the scheduling cycle is aborted.
func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (_ *framework.PreFilterResult, status *framework.Status, _ sets.Set[string]) {
func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod) (_ *framework.PreFilterResult, status *framework.Status, _ sets.Set[string]) {
startTime := time.Now()
skipPlugins := sets.New[string]()
defer func() {
state.SkipFilterPlugins = skipPlugins
state.SetSkipFilterPlugins(skipPlugins)
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
nodes, err := f.SnapshotSharedLister().NodeInfos().List()
@@ -769,7 +770,7 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framewor
return result, returnStatus, pluginsWithNodes
}
func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl framework.PreFilterPlugin, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl framework.PreFilterPlugin, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilter(ctx, state, pod, nodes)
}
@@ -784,7 +785,7 @@ func (f *frameworkImpl) runPreFilterPlugin(ctx context.Context, pl framework.Pre
// status other than Success.
func (f *frameworkImpl) RunPreFilterExtensionAddPod(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
podToSchedule *v1.Pod,
podInfoToAdd *framework.PodInfo,
nodeInfo *framework.NodeInfo,
@@ -795,7 +796,7 @@ func (f *frameworkImpl) RunPreFilterExtensionAddPod(
logger = klog.LoggerWithName(logger, "PreFilterExtension")
}
for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) {
if pl.PreFilterExtensions() == nil || state.GetSkipFilterPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
@@ -814,7 +815,7 @@ func (f *frameworkImpl) RunPreFilterExtensionAddPod(
return nil
}
func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl framework.PreFilterPlugin, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl framework.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podInfoToAdd, nodeInfo)
}
@@ -829,7 +830,7 @@ func (f *frameworkImpl) runPreFilterExtensionAddPod(ctx context.Context, pl fram
// status other than Success.
func (f *frameworkImpl) RunPreFilterExtensionRemovePod(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
podToSchedule *v1.Pod,
podInfoToRemove *framework.PodInfo,
nodeInfo *framework.NodeInfo,
@@ -840,7 +841,7 @@ func (f *frameworkImpl) RunPreFilterExtensionRemovePod(
logger = klog.LoggerWithName(logger, "PreFilterExtension")
}
for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil || state.SkipFilterPlugins.Has(pl.Name()) {
if pl.PreFilterExtensions() == nil || state.GetSkipFilterPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
@@ -859,7 +860,7 @@ func (f *frameworkImpl) RunPreFilterExtensionRemovePod(
return nil
}
func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl framework.PreFilterPlugin, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl framework.PreFilterPlugin, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podInfoToRemove, nodeInfo)
}
@@ -875,7 +876,7 @@ func (f *frameworkImpl) runPreFilterExtensionRemovePod(ctx context.Context, pl f
// Meanwhile, the failure message and status are set for the given node.
func (f *frameworkImpl) RunFilterPlugins(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
pod *v1.Pod,
nodeInfo *framework.NodeInfo,
) *framework.Status {
@@ -886,7 +887,7 @@ func (f *frameworkImpl) RunFilterPlugins(
}
for _, pl := range f.filterPlugins {
if state.SkipFilterPlugins.Has(pl.Name()) {
if state.GetSkipFilterPlugins().Has(pl.Name()) {
continue
}
ctx := ctx
@@ -908,7 +909,7 @@ func (f *frameworkImpl) RunFilterPlugins(
return nil
}
func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.FilterPlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Filter(ctx, state, pod, nodeInfo)
}
@@ -920,7 +921,7 @@ func (f *frameworkImpl) runFilterPlugin(ctx context.Context, pl framework.Filter
// RunPostFilterPlugins runs the set of configured PostFilter plugins until the first
// Success, Error or UnschedulableAndUnresolvable is met; otherwise continues to execute all plugins.
func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (_ *framework.PostFilterResult, status *framework.Status) {
func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (_ *framework.PostFilterResult, status *framework.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostFilter, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -965,7 +966,7 @@ func (f *frameworkImpl) RunPostFilterPlugins(ctx context.Context, state *framewo
return result, framework.NewStatus(framework.Unschedulable, reasons...).WithPlugin(rejectorPlugin)
}
func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.PostFilterPlugin, state *framework.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.PostFilterPlugin, state fwk.CycleState, pod *v1.Pod, filteredNodeStatusMap framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.PostFilter(ctx, state, pod, filteredNodeStatusMap)
}
@@ -985,7 +986,7 @@ func (f *frameworkImpl) runPostFilterPlugin(ctx context.Context, pl framework.Po
// and add the nominated pods. Removal of the victims is done by
// SelectVictimsOnNode(). Preempt removes victims from PreFilter state and
// NodeInfo before calling this function.
func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, state *framework.CycleState, pod *v1.Pod, info *framework.NodeInfo) *framework.Status {
func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, state fwk.CycleState, pod *v1.Pod, info *framework.NodeInfo) *framework.Status {
var status *framework.Status
podsAdded := false
@@ -1035,7 +1036,7 @@ func (f *frameworkImpl) RunFilterPluginsWithNominatedPods(ctx context.Context, s
// addGENominatedPods adds pods with equal or greater priority which are nominated
// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState,
// 3) augmented nodeInfo.
func addGENominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, state *framework.CycleState, nodeInfo *framework.NodeInfo) (bool, *framework.CycleState, *framework.NodeInfo, error) {
func addGENominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, state fwk.CycleState, nodeInfo *framework.NodeInfo) (bool, fwk.CycleState, *framework.NodeInfo, error) {
if fh == nil {
// This may happen only in tests.
return false, state, nodeInfo, nil
@@ -1066,14 +1067,14 @@ func addGENominatedPods(ctx context.Context, fh framework.Handle, pod *v1.Pod, s
// and coupled Score plugin will be skipped in this scheduling cycle.
func (f *frameworkImpl) RunPreScorePlugins(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
pod *v1.Pod,
nodes []*framework.NodeInfo,
) (status *framework.Status) {
startTime := time.Now()
skipPlugins := sets.New[string]()
defer func() {
state.SkipScorePlugins = skipPlugins
state.SetSkipScorePlugins(skipPlugins)
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreScore, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
}()
logger := klog.FromContext(ctx)
@@ -1099,7 +1100,7 @@ func (f *frameworkImpl) RunPreScorePlugins(
return nil
}
func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreScorePlugin, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreScorePlugin, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreScore(ctx, state, pod, nodes)
}
@@ -1113,7 +1114,7 @@ func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreS
// It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status.
func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (ns []framework.NodePluginScores, status *framework.Status) {
func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (ns []framework.NodePluginScores, status *framework.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Score, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1123,7 +1124,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy
plugins := make([]framework.ScorePlugin, 0, numPlugins)
pluginToNodeScores := make(map[string]framework.NodeScoreList, numPlugins)
for _, pl := range f.scorePlugins {
if state.SkipScorePlugins.Has(pl.Name()) {
if state.GetSkipScorePlugins().Has(pl.Name()) {
continue
}
plugins = append(plugins, pl)
@@ -1222,7 +1223,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy
return allNodePluginScores, nil
}
func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePlugin, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
if !state.ShouldRecordPluginMetrics() {
return pl.Score(ctx, state, pod, nodeInfo)
}
@@ -1232,7 +1233,7 @@ func (f *frameworkImpl) runScorePlugin(ctx context.Context, pl framework.ScorePl
return s, status
}
func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl framework.ScorePlugin, state *framework.CycleState, pod *v1.Pod, nodeScoreList framework.NodeScoreList) *framework.Status {
func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl framework.ScorePlugin, state fwk.CycleState, pod *v1.Pod, nodeScoreList framework.NodeScoreList) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.ScoreExtensions().NormalizeScore(ctx, state, pod, nodeScoreList)
}
@@ -1245,7 +1246,7 @@ func (f *frameworkImpl) runScoreExtension(ctx context.Context, pl framework.Scor
// RunPreBindPlugins runs the set of configured prebind plugins. It returns a
// failure (bool) if any of the plugins returns an error. It also returns an
// error containing the rejection message or the error occurred in the plugin.
func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PreBind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1277,7 +1278,7 @@ func (f *frameworkImpl) RunPreBindPlugins(ctx context.Context, state *framework.
return nil
}
func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl framework.PreBindPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl framework.PreBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreBind(ctx, state, pod, nodeName)
}
@@ -1288,7 +1289,7 @@ func (f *frameworkImpl) runPreBindPlugin(ctx context.Context, pl framework.PreBi
}
// RunBindPlugins runs the set of configured bind plugins until one returns a non `Skip` status.
func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Bind, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1326,7 +1327,7 @@ func (f *frameworkImpl) RunBindPlugins(ctx context.Context, state *framework.Cyc
return status
}
func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp framework.BindPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp framework.BindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return bp.Bind(ctx, state, pod, nodeName)
}
@@ -1337,7 +1338,7 @@ func (f *frameworkImpl) runBindPlugin(ctx context.Context, bp framework.BindPlug
}
// RunPostBindPlugins runs the set of configured postbind plugins.
func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.PostBind, framework.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1357,7 +1358,7 @@ func (f *frameworkImpl) RunPostBindPlugins(ctx context.Context, state *framework
}
}
func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl framework.PostBindPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl framework.PostBindPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if !state.ShouldRecordPluginMetrics() {
pl.PostBind(ctx, state, pod, nodeName)
return
@@ -1372,7 +1373,7 @@ func (f *frameworkImpl) runPostBindPlugin(ctx context.Context, pl framework.Post
// continue running the remaining ones and returns the error. In such a case,
// the pod will not be scheduled and the caller will be expected to call
// RunReservePluginsUnreserve.
func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Reserve, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1404,7 +1405,7 @@ func (f *frameworkImpl) RunReservePluginsReserve(ctx context.Context, state *fra
return nil
}
func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl framework.ReservePlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl framework.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Reserve(ctx, state, pod, nodeName)
}
@@ -1416,7 +1417,7 @@ func (f *frameworkImpl) runReservePluginReserve(ctx context.Context, pl framewor
// RunReservePluginsUnreserve runs the Unreserve method in the set of
// configured reserve plugins.
func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Unreserve, framework.Success.String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1440,7 +1441,7 @@ func (f *frameworkImpl) RunReservePluginsUnreserve(ctx context.Context, state *f
}
}
func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl framework.ReservePlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl framework.ReservePlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) {
if !state.ShouldRecordPluginMetrics() {
pl.Unreserve(ctx, state, pod, nodeName)
return
@@ -1456,7 +1457,7 @@ func (f *frameworkImpl) runReservePluginUnreserve(ctx context.Context, pl framew
// plugins returns "Wait", then this function will create and add waiting pod
// to a map of currently waiting pods and return status with "Wait" code.
// Pod will remain waiting pod for the minimum duration returned by the permit plugins.
func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
startTime := time.Now()
defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Permit, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@@ -1505,7 +1506,7 @@ func (f *frameworkImpl) RunPermitPlugins(ctx context.Context, state *framework.C
return nil
}
func (f *frameworkImpl) runPermitPlugin(ctx context.Context, pl framework.PermitPlugin, state *framework.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
func (f *frameworkImpl) runPermitPlugin(ctx context.Context, pl framework.PermitPlugin, state fwk.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
if !state.ShouldRecordPluginMetrics() {
return pl.Permit(ctx, state, pod, nodeName)
}

View File

@@ -35,6 +35,7 @@ import (
clientsetfake "k8s.io/client-go/kubernetes/fake"
"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/backend/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/backend/queue"
@@ -133,11 +134,11 @@ func (pl *TestScoreWithNormalizePlugin) Name() string {
return pl.name
}
func (pl *TestScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
func (pl *TestScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
return injectNormalizeRes(pl.inj, scores)
}
func (pl *TestScoreWithNormalizePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *TestScoreWithNormalizePlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
return setScoreRes(pl.inj)
}
@@ -155,11 +156,11 @@ func (pl *TestScorePlugin) Name() string {
return pl.name
}
func (pl *TestScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (pl *TestScorePlugin) PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreScoreStatus), injectReason)
}
func (pl *TestScorePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *TestScorePlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
return setScoreRes(pl.inj)
}
@@ -184,10 +185,10 @@ type TestPlugin struct {
inj injectedResult
}
func (pl *TestPlugin) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) AddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreFilterAddPodStatus), injectReason)
}
func (pl *TestPlugin) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) RemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreFilterRemovePodStatus), injectReason)
}
@@ -199,7 +200,7 @@ func (pl *TestPlugin) Less(*framework.QueuedPodInfo, *framework.QueuedPodInfo) b
return false
}
func (pl *TestPlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *TestPlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
return 0, framework.NewStatus(framework.Code(pl.inj.ScoreStatus), injectReason)
}
@@ -207,7 +208,7 @@ func (pl *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}
func (pl *TestPlugin) PreFilter(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *TestPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
return pl.inj.PreFilterResult, framework.NewStatus(framework.Code(pl.inj.PreFilterStatus), injectReason)
}
@@ -215,37 +216,37 @@ func (pl *TestPlugin) PreFilterExtensions() framework.PreFilterExtensions {
return pl
}
func (pl *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.FilterStatus), injectFilterReason)
}
func (pl *TestPlugin) PostFilter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
func (pl *TestPlugin) PostFilter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
return nil, framework.NewStatus(framework.Code(pl.inj.PostFilterStatus), injectReason)
}
func (pl *TestPlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (pl *TestPlugin) PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreScoreStatus), injectReason)
}
func (pl *TestPlugin) Reserve(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (pl *TestPlugin) Reserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.ReserveStatus), injectReason)
}
func (pl *TestPlugin) Unreserve(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) {
func (pl *TestPlugin) Unreserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) {
}
func (pl *TestPlugin) PreBind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (pl *TestPlugin) PreBind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreBindStatus), injectReason)
}
func (pl *TestPlugin) PostBind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) {
func (pl *TestPlugin) PostBind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) {
}
func (pl *TestPlugin) Permit(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
func (pl *TestPlugin) Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
return framework.NewStatus(framework.Code(pl.inj.PermitStatus), injectReason), time.Duration(0)
}
func (pl *TestPlugin) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (pl *TestPlugin) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.BindStatus), injectReason)
}
@@ -277,7 +278,7 @@ func (pl *TestPreFilterPlugin) Name() string {
return preFilterPluginName
}
func (pl *TestPreFilterPlugin) PreFilter(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *TestPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
pl.PreFilterCalled++
return nil, nil
}
@@ -297,18 +298,18 @@ func (pl *TestPreFilterWithExtensionsPlugin) Name() string {
return preFilterWithExtensionsPluginName
}
func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
pl.PreFilterCalled++
return nil, nil
}
func (pl *TestPreFilterWithExtensionsPlugin) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
func (pl *TestPreFilterWithExtensionsPlugin) AddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod,
podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
pl.AddCalled++
return nil
}
func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod,
podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
pl.RemoveCalled++
return nil
@@ -325,7 +326,7 @@ func (dp *TestDuplicatePlugin) Name() string {
return duplicatePluginName
}
func (dp *TestDuplicatePlugin) PreFilter(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (dp *TestDuplicatePlugin) PreFilter(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
return nil, nil
}
@@ -347,7 +348,7 @@ type TestPermitPlugin struct {
func (pp *TestPermitPlugin) Name() string {
return permitPlugin
}
func (pp *TestPermitPlugin) Permit(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
func (pp *TestPermitPlugin) Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
return framework.NewStatus(framework.Wait), 10 * time.Second
}
@@ -393,7 +394,7 @@ func (t TestBindPlugin) Name() string {
return bindPlugin
}
func (t TestBindPlugin) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (t TestBindPlugin) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
return nil
}
@@ -419,7 +420,7 @@ var defaultWeights = map[string]int32{
scorePlugin1: 1,
}
var state = &framework.CycleState{}
var state = framework.NewCycleState()
// Pod is only used for logging errors.
var pod = &v1.Pod{}
@@ -1118,7 +1119,7 @@ func TestRunPreScorePlugins(t *testing.T) {
if status.Code() != tt.wantStatusCode {
t.Errorf("wrong status code. got: %v, want: %v", status, tt.wantStatusCode)
}
skipped := state.SkipScorePlugins
skipped := state.GetSkipScorePlugins()
if diff := cmp.Diff(tt.wantSkippedPlugins, skipped); diff != "" {
t.Errorf("wrong skip score plugins (-want, +got):\n%s", diff)
}
@@ -1512,7 +1513,7 @@ func TestRunScorePlugins(t *testing.T) {
}()
state := framework.NewCycleState()
state.SkipScorePlugins = tt.skippedPlugins
state.SetSkipScorePlugins(tt.skippedPlugins)
res, status := f.RunScorePlugins(ctx, state, pod, BuildNodeInfos(nodes))
if tt.err {
@@ -1760,7 +1761,7 @@ func TestRunPreFilterPlugins(t *testing.T) {
if status.Code() != tt.wantStatusCode {
t.Errorf("wrong status code. got: %v, want: %v", status, tt.wantStatusCode)
}
skipped := state.SkipFilterPlugins
skipped := state.GetSkipFilterPlugins()
if diff := cmp.Diff(tt.wantSkippedPlugins, skipped); diff != "" {
t.Errorf("wrong skip filter plugins (-want,+got):\n%s", diff)
}
@@ -1847,7 +1848,7 @@ func TestRunPreFilterExtensionRemovePod(t *testing.T) {
}()
state := framework.NewCycleState()
state.SkipFilterPlugins = tt.skippedPluginNames
state.SetSkipFilterPlugins(tt.skippedPluginNames)
status := f.RunPreFilterExtensionRemovePod(ctx, state, nil, nil, nil)
if status.Code() != tt.wantStatusCode {
t.Errorf("wrong status code. got: %v, want: %v", status, tt.wantStatusCode)
@@ -1935,7 +1936,7 @@ func TestRunPreFilterExtensionAddPod(t *testing.T) {
}()
state := framework.NewCycleState()
state.SkipFilterPlugins = tt.skippedPluginNames
state.SetSkipFilterPlugins(tt.skippedPluginNames)
status := f.RunPreFilterExtensionAddPod(ctx, state, nil, nil, nil)
if status.Code() != tt.wantStatusCode {
t.Errorf("wrong status code. got: %v, want: %v", status, tt.wantStatusCode)
@@ -2141,7 +2142,7 @@ func TestFilterPlugins(t *testing.T) {
_ = f.Close()
}()
state := framework.NewCycleState()
state.SkipFilterPlugins = tt.skippedPlugins
state.SetSkipFilterPlugins(tt.skippedPlugins)
gotStatus := f.RunFilterPlugins(ctx, state, pod, nil)
if diff := cmp.Diff(tt.wantStatus, gotStatus, statusCmpOpts...); diff != "" {
t.Errorf("Unexpected status: (-want,+got):\n%s", diff)
@@ -2268,7 +2269,7 @@ func TestPostFilterPlugins(t *testing.T) {
defer func() {
_ = f.Close()
}()
_, gotStatus := f.RunPostFilterPlugins(ctx, nil, pod, nil)
_, gotStatus := f.RunPostFilterPlugins(ctx, state, pod, nil)
if diff := cmp.Diff(tt.wantStatus, gotStatus, statusCmpOpts...); diff != "" {
t.Errorf("Unexpected status (-want,+got):\n%s", diff)
@@ -2437,7 +2438,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
_ = f.Close()
}()
tt.nodeInfo.SetNode(tt.node)
gotStatus := f.RunFilterPluginsWithNominatedPods(ctx, framework.NewCycleState(), tt.pod, tt.nodeInfo)
gotStatus := f.RunFilterPluginsWithNominatedPods(ctx, state, tt.pod, tt.nodeInfo)
if diff := cmp.Diff(tt.wantStatus, gotStatus, statusCmpOpts...); diff != "" {
t.Errorf("Unexpected status: (-want,+got):\n%s", diff)
}
@@ -2598,7 +2599,7 @@ func TestPreBindPlugins(t *testing.T) {
_ = f.Close()
}()
status := f.RunPreBindPlugins(ctx, nil, pod, "")
status := f.RunPreBindPlugins(ctx, state, pod, "")
if diff := cmp.Diff(tt.wantStatus, status, statusCmpOpts...); diff != "" {
t.Errorf("Wrong status code (-want,+got):\n%s", diff)
@@ -2760,7 +2761,7 @@ func TestReservePlugins(t *testing.T) {
t.Fatalf("fail to create framework: %s", err)
}
status := f.RunReservePluginsReserve(ctx, nil, pod, "")
status := f.RunReservePluginsReserve(ctx, state, pod, "")
if diff := cmp.Diff(tt.wantStatus, status, statusCmpOpts...); diff != "" {
t.Errorf("Wrong status code (-want,+got):\n%s", diff)
@@ -2892,7 +2893,7 @@ func TestPermitPlugins(t *testing.T) {
t.Fatalf("fail to create framework: %s", err)
}
status := f.RunPermitPlugins(ctx, nil, pod, "")
status := f.RunPermitPlugins(ctx, state, pod, "")
if diff := cmp.Diff(tt.want, status, statusCmpOpts...); diff != "" {
t.Errorf("Wrong status code (-want,+got):\n%s", diff)
}
@@ -2908,7 +2909,6 @@ func withMetricsRecorder(recorder *metrics.MetricAsyncRecorder) Option {
}
func TestRecordingMetrics(t *testing.T) {
state := &framework.CycleState{}
state.SetRecordPluginMetrics(true)
tests := []struct {
name string
@@ -3255,7 +3255,7 @@ func TestPermitWaitDurationMetric(t *testing.T) {
_ = f.Close()
}()
f.RunPermitPlugins(ctx, nil, pod, "")
f.RunPermitPlugins(ctx, state, pod, "")
f.WaitOnPermit(ctx, pod)
collectAndComparePermitWaitDuration(t, tt.wantRes)
@@ -3317,7 +3317,7 @@ func TestWaitOnPermit(t *testing.T) {
_ = f.Close()
}()
runPermitPluginsStatus := f.RunPermitPlugins(ctx, nil, pod, "")
runPermitPluginsStatus := f.RunPermitPlugins(ctx, state, pod, "")
if runPermitPluginsStatus.Code() != framework.Wait {
t.Fatalf("Expected RunPermitPlugins to return status %v, but got %v",
framework.Wait, runPermitPluginsStatus.Code())

View File

@@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
compbasemetrics "k8s.io/component-base/metrics"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@@ -32,7 +33,7 @@ type instrumentedFilterPlugin struct {
var _ framework.FilterPlugin = &instrumentedFilterPlugin{}
func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
p.metric.Inc()
return p.FilterPlugin.Filter(ctx, state, pod, nodeInfo)
}
@@ -45,7 +46,7 @@ type instrumentedPreFilterPlugin struct {
var _ framework.PreFilterPlugin = &instrumentedPreFilterPlugin{}
func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
result, status := p.PreFilterPlugin.PreFilter(ctx, state, pod, nodes)
if !status.IsSkip() {
p.metric.Inc()
@@ -61,7 +62,7 @@ type instrumentedPreScorePlugin struct {
var _ framework.PreScorePlugin = &instrumentedPreScorePlugin{}
func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
status := p.PreScorePlugin.PreScore(ctx, state, pod, nodes)
if !status.IsSkip() {
p.metric.Inc()
@@ -77,7 +78,7 @@ type instrumentedScorePlugin struct {
var _ framework.ScorePlugin = &instrumentedScorePlugin{}
func (p *instrumentedScorePlugin) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (p *instrumentedScorePlugin) Score(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
p.metric.Inc()
return p.ScorePlugin.Score(ctx, state, pod, nodeInfo)
}

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
@@ -286,7 +287,7 @@ func (p *fakePlugin) Less(*framework.QueuedPodInfo, *framework.QueuedPodInfo) bo
return false
}
func (p *fakePlugin) Bind(context.Context, *framework.CycleState, *v1.Pod, string) *framework.Status {
func (p *fakePlugin) Bind(context.Context, fwk.CycleState, *v1.Pod, string) *framework.Status {
return nil
}

View File

@@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -140,7 +141,7 @@ var clearNominatedNode = &framework.NominatingInfo{NominatingMode: framework.Mod
// schedulingCycle tries to schedule a single Pod.
func (sched *Scheduler) schedulingCycle(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
fwk framework.Framework,
podInfo *framework.QueuedPodInfo,
start time.Time,
@@ -268,7 +269,7 @@ func (sched *Scheduler) schedulingCycle(
// bindingCycle tries to bind an assumed Pod.
func (sched *Scheduler) bindingCycle(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
fwk framework.Framework,
scheduleResult ScheduleResult,
assumedPodInfo *framework.QueuedPodInfo,
@@ -335,7 +336,7 @@ func (sched *Scheduler) bindingCycle(
func (sched *Scheduler) handleBindingCycleError(
ctx context.Context,
state *framework.CycleState,
state fwk.CycleState,
fwk framework.Framework,
podInfo *framework.QueuedPodInfo,
start time.Time,
@@ -399,7 +400,7 @@ func (sched *Scheduler) skipPodSchedule(ctx context.Context, fwk framework.Frame
// schedulePod tries to schedule the given pod to one of the nodes in the node list.
// If it succeeds, it will return the name of the node.
// If it fails, it will return a FitError with reasons.
func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (result ScheduleResult, err error) {
func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (result ScheduleResult, err error) {
trace := utiltrace.New("Scheduling", utiltrace.Field{Key: "namespace", Value: pod.Namespace}, utiltrace.Field{Key: "name", Value: pod.Name})
defer trace.LogIfLong(100 * time.Millisecond)
if err := sched.Cache.UpdateSnapshot(klog.FromContext(ctx), sched.nodeInfoSnapshot); err != nil {
@@ -451,7 +452,7 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework
// Filters the nodes to find the ones that fit the pod based on the framework
// filter plugins and filter extenders.
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*framework.NodeInfo, framework.Diagnosis, error) {
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) ([]*framework.NodeInfo, framework.Diagnosis, error) {
logger := klog.FromContext(ctx)
diagnosis := framework.Diagnosis{
NodeToStatus: framework.NewDefaultNodeToStatus(),
@@ -535,7 +536,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F
return feasibleNodesAfterExtender, diagnosis, nil
}
func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, fwk framework.Framework, state *framework.CycleState, diagnosis framework.Diagnosis) ([]*framework.NodeInfo, error) {
func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, fwk framework.Framework, state fwk.CycleState, diagnosis framework.Diagnosis) ([]*framework.NodeInfo, error) {
nnn := pod.Status.NominatedNodeName
nodeInfo, err := sched.nodeInfoSnapshot.Get(nnn)
if err != nil {
@@ -582,7 +583,7 @@ func (sched *Scheduler) hasExtenderFilters() bool {
func (sched *Scheduler) findNodesThatPassFilters(
ctx context.Context,
fwk framework.Framework,
state *framework.CycleState,
state fwk.CycleState,
pod *v1.Pod,
diagnosis *framework.Diagnosis,
nodes []*framework.NodeInfo) ([]*framework.NodeInfo, error) {
@@ -747,7 +748,7 @@ func prioritizeNodes(
ctx context.Context,
extenders []framework.Extender,
fwk framework.Framework,
state *framework.CycleState,
state fwk.CycleState,
pod *v1.Pod,
nodes []*framework.NodeInfo,
) ([]framework.NodePluginScores, error) {
@@ -956,7 +957,7 @@ func (sched *Scheduler) assume(logger klog.Logger, assumed *v1.Pod, host string)
// bind binds a pod to a given node defined in a binding object.
// The precedence for binding is: (1) extenders and (2) framework plugins.
// We expect this to run asynchronously, so we handle binding metrics internally.
func (sched *Scheduler) bind(ctx context.Context, fwk framework.Framework, assumed *v1.Pod, targetNode string, state *framework.CycleState) (status *framework.Status) {
func (sched *Scheduler) bind(ctx context.Context, fwk framework.Framework, assumed *v1.Pod, targetNode string, state fwk.CycleState) (status *framework.Status) {
logger := klog.FromContext(ctx)
defer func() {
sched.finishBinding(logger, fwk, assumed, targetNode, status)

View File

@@ -53,6 +53,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
internalcache "k8s.io/kubernetes/pkg/scheduler/backend/cache"
@@ -170,7 +171,7 @@ func (pl *falseMapPlugin) Name() string {
return "FalseMap"
}
func (pl *falseMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) (int64, *framework.Status) {
func (pl *falseMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) (int64, *framework.Status) {
return 0, framework.AsStatus(errPrioritize)
}
@@ -190,7 +191,7 @@ func (pl *numericMapPlugin) Name() string {
return "NumericMap"
}
func (pl *numericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *numericMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
nodeName := nodeInfo.Node().Name
score, err := strconv.Atoi(nodeName)
if err != nil {
@@ -214,7 +215,7 @@ func (pl *reverseNumericMapPlugin) Name() string {
return "ReverseNumericMap"
}
func (pl *reverseNumericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *reverseNumericMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
nodeName := nodeInfo.Node().Name
score, err := strconv.Atoi(nodeName)
if err != nil {
@@ -227,7 +228,7 @@ func (pl *reverseNumericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return pl
}
func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
var maxScore float64
minScore := math.MaxFloat64
@@ -256,7 +257,7 @@ func (pl *trueMapPlugin) Name() string {
return "TrueMap"
}
func (pl *trueMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) (int64, *framework.Status) {
func (pl *trueMapPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) (int64, *framework.Status) {
return 1, nil
}
@@ -264,7 +265,7 @@ func (pl *trueMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return pl
}
func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
for _, host := range nodeScores {
if host.Name == "" {
return framework.NewStatus(framework.Error, "unexpected empty host name")
@@ -287,7 +288,7 @@ func (pl *noPodsFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if len(nodeInfo.Pods) == 0 {
return nil
}
@@ -306,7 +307,7 @@ func (s *fakeNodeSelector) Name() string {
return "FakeNodeSelector"
}
func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (s *fakeNodeSelector) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo.Node().Name != s.NodeName {
return framework.NewStatus(framework.UnschedulableAndUnresolvable)
}
@@ -333,7 +334,7 @@ func (f *fakeNodeSelectorDependOnPodAnnotation) Name() string {
}
// Filter selects the specified one node and rejects other non-specified nodes.
func (f *fakeNodeSelectorDependOnPodAnnotation) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (f *fakeNodeSelectorDependOnPodAnnotation) Filter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
resolveNodeNameFromPodAnnotation := func(pod *v1.Pod) (string, error) {
if pod == nil {
return "", fmt.Errorf("empty pod")
@@ -370,7 +371,7 @@ func (t *TestPlugin) Name() string {
return t.name
}
func (t *TestPlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (t *TestPlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
return 1, nil
}
@@ -378,7 +379,7 @@ func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}
func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (t *TestPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return nil
}
@@ -856,7 +857,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
})
informerFactory := informers.NewSharedInformerFactory(client, 0)
fwk, err := tf.NewFramework(ctx,
schedFramework, err := tf.NewFramework(ctx,
append(item.registerPluginFuncs,
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
@@ -878,11 +879,11 @@ func TestSchedulerScheduleOne(t *testing.T) {
client: client,
NextPod: queue.Pop,
SchedulingQueue: queue,
Profiles: profile.Map{testSchedulerName: fwk},
Profiles: profile.Map{testSchedulerName: schedFramework},
}
queue.Add(logger, item.sendPod)
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) {
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (ScheduleResult, error) {
return item.mockScheduleResult, item.injectSchedulingError
}
sched.FailureHandler = func(ctx context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *framework.Status, ni *framework.NominatingInfo, start time.Time) {
@@ -1156,7 +1157,7 @@ func TestScheduleOneMarksPodAsProcessedBeforePreBind(t *testing.T) {
ar := metrics.NewMetricsAsyncRecorder(10, 1*time.Second, ctx.Done())
queue := internalqueue.NewSchedulingQueue(nil, informerFactory, internalqueue.WithMetricsRecorder(*ar))
fwk, err := NewFakeFramework(
schedFramework, err := NewFakeFramework(
ctx,
queue,
append(item.registerPluginFuncs,
@@ -1172,12 +1173,12 @@ func TestScheduleOneMarksPodAsProcessedBeforePreBind(t *testing.T) {
t.Fatal(err)
}
fwk.waitOnPermitFn = func(_ context.Context, pod *v1.Pod) *framework.Status {
gotPodIsInFlightAtWaitOnPermit = podListContainsPod(fwk.queue.InFlightPods(), pod)
schedFramework.waitOnPermitFn = func(_ context.Context, pod *v1.Pod) *framework.Status {
gotPodIsInFlightAtWaitOnPermit = podListContainsPod(schedFramework.queue.InFlightPods(), pod)
return item.mockWaitOnPermitResult
}
fwk.runPreBindPluginsFn = func(_ context.Context, _ *framework.CycleState, pod *v1.Pod, _ string) *framework.Status {
gotPodIsInFlightAtRunPreBindPlugins = podListContainsPod(fwk.queue.InFlightPods(), pod)
schedFramework.runPreBindPluginsFn = func(_ context.Context, _ fwk.CycleState, pod *v1.Pod, _ string) *framework.Status {
gotPodIsInFlightAtRunPreBindPlugins = podListContainsPod(schedFramework.queue.InFlightPods(), pod)
return item.mockRunPreBindPluginsResult
}
@@ -1186,11 +1187,11 @@ func TestScheduleOneMarksPodAsProcessedBeforePreBind(t *testing.T) {
client: client,
NextPod: queue.Pop,
SchedulingQueue: queue,
Profiles: profile.Map{testSchedulerName: fwk},
Profiles: profile.Map{testSchedulerName: schedFramework},
}
queue.Add(logger, item.sendPod)
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) {
sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (ScheduleResult, error) {
return item.mockScheduleResult, item.injectSchedulingError
}
sched.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *framework.Status, _ *framework.NominatingInfo, _ time.Time) {
@@ -1271,7 +1272,7 @@ type FakeFramework struct {
framework.Framework
queue internalqueue.SchedulingQueue
waitOnPermitFn func(context.Context, *v1.Pod) *framework.Status
runPreBindPluginsFn func(context.Context, *framework.CycleState, *v1.Pod, string) *framework.Status
runPreBindPluginsFn func(context.Context, fwk.CycleState, *v1.Pod, string) *framework.Status
}
func NewFakeFramework(ctx context.Context, schedQueue internalqueue.SchedulingQueue, fns []tf.RegisterPluginFunc,
@@ -1287,7 +1288,7 @@ func (ff *FakeFramework) WaitOnPermit(ctx context.Context, pod *v1.Pod) *framewo
return ff.waitOnPermitFn(ctx, pod)
}
func (ff *FakeFramework) RunPreBindPlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (ff *FakeFramework) RunPreBindPlugins(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
return ff.runPreBindPluginsFn(ctx, state, pod, nodeName)
}
@@ -1718,6 +1719,7 @@ func TestSchedulerBinding(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
pod := st.MakePod().Name(test.podName).Obj()
defaultBound := false
state := framework.NewCycleState()
client := clientsetfake.NewClientset(pod)
client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "binding" {
@@ -1742,7 +1744,7 @@ func TestSchedulerBinding(t *testing.T) {
nodeInfoSnapshot: nil,
percentageOfNodesToScore: 0,
}
status := sched.bind(ctx, fwk, pod, "node", nil)
status := sched.bind(ctx, fwk, pod, "node", state)
if !status.IsSuccess() {
t.Error(status.AsError())
}

View File

@@ -36,6 +36,7 @@ import (
resourceslicetracker "k8s.io/dynamic-resource-allocation/resourceslice/tracker"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
@@ -84,7 +85,7 @@ type Scheduler struct {
// SchedulePod tries to schedule the given pod to one of the nodes in the node list.
// Return a struct of ScheduleResult with the name of suggested host on success,
// otherwise will return a FitError with reasons.
SchedulePod func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error)
SchedulePod func(ctx context.Context, fwk framework.Framework, state fwk.CycleState, pod *v1.Pod) (ScheduleResult, error)
// Close this to shut down the scheduler.
StopEverything <-chan struct{}

View File

@@ -45,6 +45,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
fwk "k8s.io/kube-scheduler/framework"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/ptr"
@@ -1150,7 +1151,7 @@ func (t *fakebindPlugin) Name() string {
return fakeBind
}
func (t *fakebindPlugin) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (t *fakebindPlugin) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
return nil
}
@@ -1159,7 +1160,7 @@ type filterWithoutEnqueueExtensionsPlugin struct{}
func (*filterWithoutEnqueueExtensionsPlugin) Name() string { return filterWithoutEnqueueExtensions }
func (*filterWithoutEnqueueExtensionsPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (*filterWithoutEnqueueExtensionsPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
@@ -1173,7 +1174,7 @@ var fakeNodePluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{})
func (*fakeNodePlugin) Name() string { return fakeNode }
func (*fakeNodePlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (*fakeNodePlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
@@ -1193,7 +1194,7 @@ var fakePodPluginQueueingFn = func(_ klog.Logger, _ *v1.Pod, _, _ interface{}) (
func (*fakePodPlugin) Name() string { return fakePod }
func (*fakePodPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (*fakePodPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
@@ -1207,7 +1208,7 @@ type emptyEventPlugin struct{}
func (*emptyEventPlugin) Name() string { return emptyEventExtensions }
func (*emptyEventPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (*emptyEventPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
@@ -1220,7 +1221,7 @@ type errorEventsToRegisterPlugin struct{}
func (*errorEventsToRegisterPlugin) Name() string { return errorEventsToRegister }
func (*errorEventsToRegisterPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (*errorEventsToRegisterPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
@@ -1235,7 +1236,7 @@ type emptyEventsToRegisterPlugin struct{}
func (*emptyEventsToRegisterPlugin) Name() string { return emptyEventsToRegister }
func (*emptyEventsToRegisterPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (*emptyEventsToRegisterPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
@@ -1266,7 +1267,7 @@ const (
permitTimeout = 10 * time.Second
)
func (f fakePermitPlugin) Permit(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
func (f fakePermitPlugin) Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
defer func() {
// Send event with podWaiting reason to broadcast this pod is already waiting in the permit stage.
f.eventRecorder.Eventf(p, nil, v1.EventTypeWarning, podWaitingReason, "", "")

View File

@@ -26,6 +26,7 @@ import (
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/util"
@@ -124,7 +125,7 @@ func (pl *node2PrioritizerPlugin) Name() string {
}
// Score return score 100 if the given nodeName is "node2"; otherwise return score 10.
func (pl *node2PrioritizerPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *node2PrioritizerPlugin) Score(_ context.Context, _ fwk.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
score := 10
if nodeInfo.Node().Name == "node2" {
score = 100

View File

@@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
)
@@ -40,7 +41,7 @@ func (pl *FalseFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *FalseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *FalseFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
@@ -58,7 +59,7 @@ func (pl *TrueFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *TrueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *TrueFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return nil
}
@@ -90,7 +91,7 @@ func (pl *FakeFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *FakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *FakeFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
atomic.AddInt32(&pl.NumFilterCalled, 1)
if returnCode, ok := pl.FailedNodeReturnCodeMap[nodeInfo.Node().Name]; ok {
@@ -119,7 +120,7 @@ func (pl *MatchFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *MatchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *MatchFilterPlugin) Filter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")
@@ -148,7 +149,7 @@ func (pl *FakePreFilterPlugin) Name() string {
}
// PreFilter invoked at the PreFilter extension point.
func (pl *FakePreFilterPlugin) PreFilter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pl *FakePreFilterPlugin) PreFilter(_ context.Context, _ fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
return pl.Result, pl.Status
}
@@ -179,12 +180,12 @@ func (pl *FakeReservePlugin) Name() string {
}
// Reserve invoked at the Reserve extension point.
func (pl *FakeReservePlugin) Reserve(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) *framework.Status {
func (pl *FakeReservePlugin) Reserve(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ string) *framework.Status {
return pl.Status
}
// Unreserve invoked at the Unreserve extension point.
func (pl *FakeReservePlugin) Unreserve(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) {
func (pl *FakeReservePlugin) Unreserve(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ string) {
}
// NewFakeReservePlugin initializes a fakeReservePlugin and returns it.
@@ -207,7 +208,7 @@ func (pl *FakePreBindPlugin) Name() string {
}
// PreBind invoked at the PreBind extension point.
func (pl *FakePreBindPlugin) PreBind(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) *framework.Status {
func (pl *FakePreBindPlugin) PreBind(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ string) *framework.Status {
return pl.Status
}
@@ -232,7 +233,7 @@ func (pl *FakePermitPlugin) Name() string {
}
// Permit invoked at the Permit extension point.
func (pl *FakePermitPlugin) Permit(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (*framework.Status, time.Duration) {
func (pl *FakePermitPlugin) Permit(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ string) (*framework.Status, time.Duration) {
return pl.Status, pl.Timeout
}
@@ -258,7 +259,7 @@ func (pl *FakePreScoreAndScorePlugin) Name() string {
return pl.name
}
func (pl *FakePreScoreAndScorePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (pl *FakePreScoreAndScorePlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
return pl.score, pl.scoreStatus
}
@@ -266,7 +267,7 @@ func (pl *FakePreScoreAndScorePlugin) ScoreExtensions() framework.ScoreExtension
return nil
}
func (pl *FakePreScoreAndScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
func (pl *FakePreScoreAndScorePlugin) PreScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
return pl.preScoreStatus
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
// ErrNotFound is the not found error message.
ErrNotFound = errors.New("not found")
)
// StateData is a generic type for arbitrary data stored in CycleState.
type StateData interface {
// Clone is an interface to make a copy of StateData. For performance reasons,
// clone should make shallow copies for members (e.g., slices or maps) that are not
// impacted by PreFilter's optional AddPod/RemovePod methods.
Clone() StateData
}
// StateKey is the type of keys stored in CycleState.
type StateKey string
// CycleState provides a mechanism for plugins to store and retrieve arbitrary data.
// StateData stored by one plugin can be read, altered, or deleted by another plugin.
// CycleState does not provide any data protection, as all plugins are assumed to be
// trusted.
type CycleState interface {
// ShouldRecordPluginMetrics returns whether metrics.PluginExecutionDuration metrics
// should be recorded.
// This function is mostly for the scheduling framework runtime, plugins usually don't have to use it.
ShouldRecordPluginMetrics() bool
// GetSkipFilterPlugins returns plugins that will be skipped in the Filter extension point.
// This function is mostly for the scheduling framework runtime, plugins usually don't have to use it.
GetSkipFilterPlugins() sets.Set[string]
// SetSkipFilterPlugins sets plugins that should be skipped in the Filter extension point.
// This function is mostly for the scheduling framework runtime, plugins usually don't have to use it.
SetSkipFilterPlugins(plugins sets.Set[string])
// GetSkipScorePlugins returns plugins that will be skipped in the Score extension point.
// This function is mostly for the scheduling framework runtime, plugins usually don't have to use it.
GetSkipScorePlugins() sets.Set[string]
// SetSkipScorePlugins sets plugins that should be skipped in the Score extension point.
// This function is mostly for the scheduling framework runtime, plugins usually don't have to use it.
SetSkipScorePlugins(plugins sets.Set[string])
// Read retrieves data with the given "key" from CycleState. If the key is not
// present, ErrNotFound is returned.
//
// See CycleState for notes on concurrency.
Read(key StateKey) (StateData, error)
// Write stores the given "val" in CycleState with the given "key".
//
// See CycleState for notes on concurrency.
Write(key StateKey, val StateData)
// Delete deletes data with the given key from CycleState.
//
// See CycleState for notes on concurrency.
Delete(key StateKey)
// Clone creates a copy of CycleState and returns its pointer. Clone returns
// nil if the context being cloned is nil.
Clone() CycleState
}

View File

@@ -61,7 +61,7 @@ func TestDefaultBinder(t *testing.T) {
podCopy.UID = "another"
}
status := testCtx.Scheduler.Profiles["default-scheduler"].RunBindPlugins(testCtx.Ctx, nil, podCopy, node.Name)
status := testCtx.Scheduler.Profiles["default-scheduler"].RunBindPlugins(testCtx.Ctx, framework.NewCycleState(), podCopy, node.Name)
if code := status.Code(); code != tc.wantStatusCode {
t.Errorf("Bind returned code %s, want %s", code, tc.wantStatusCode)
}

View File

@@ -31,6 +31,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-helpers/scheduling/corev1"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
@@ -54,7 +55,7 @@ func (pl *fooPlugin) Name() string {
return "foo"
}
func (pl *fooPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (pl *fooPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
taints := nodeInfo.Node().Spec.Taints
if len(taints) == 0 {
return nil

View File

@@ -44,6 +44,7 @@ import (
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
@@ -361,7 +362,7 @@ func (sp *ScorePlugin) Name() string {
}
// Score returns the score of scheduling a pod on a specific node.
func (sp *ScorePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (sp *ScorePlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
sp.mutex.Lock()
defer sp.mutex.Unlock()
@@ -393,7 +394,7 @@ func (sp *ScoreWithNormalizePlugin) Name() string {
}
// Score returns the score of scheduling a pod on a specific node.
func (sp *ScoreWithNormalizePlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
func (sp *ScoreWithNormalizePlugin) Score(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
sp.mutex.Lock()
defer sp.mutex.Unlock()
@@ -402,7 +403,7 @@ func (sp *ScoreWithNormalizePlugin) Score(ctx context.Context, state *framework.
return score, nil
}
func (sp *ScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
func (sp *ScoreWithNormalizePlugin) NormalizeScore(ctx context.Context, state fwk.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
sp.numNormalizeScoreCalled++
return nil
}
@@ -418,7 +419,7 @@ func (fp *FilterPlugin) Name() string {
// Filter is a test function that returns an error or nil, depending on the
// value of "failFilter".
func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
func (fp *FilterPlugin) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
fp.mutex.Lock()
defer fp.mutex.Unlock()
@@ -450,7 +451,7 @@ func (rp *ReservePlugin) Name() string {
// Reserve is a test function that increments an intenral counter and returns
// an error or nil, depending on the value of "failReserve".
func (rp *ReservePlugin) Reserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (rp *ReservePlugin) Reserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
rp.numReserveCalled++
if rp.failReserve {
return framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name))
@@ -461,7 +462,7 @@ func (rp *ReservePlugin) Reserve(ctx context.Context, state *framework.CycleStat
// Unreserve is a test function that increments an internal counter and emits
// an event to a channel. While Unreserve implementations should normally be
// idempotent, we relax that requirement here for testing purposes.
func (rp *ReservePlugin) Unreserve(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (rp *ReservePlugin) Unreserve(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
rp.numUnreserveCalled++
if rp.pluginInvokeEventChan != nil {
select {
@@ -477,7 +478,7 @@ func (*PreScorePlugin) Name() string {
}
// PreScore is a test function.
func (pfp *PreScorePlugin) PreScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, _ []*framework.NodeInfo) *framework.Status {
func (pfp *PreScorePlugin) PreScore(ctx context.Context, _ fwk.CycleState, pod *v1.Pod, _ []*framework.NodeInfo) *framework.Status {
pfp.numPreScoreCalled++
if pfp.failPreScore {
return framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name))
@@ -492,7 +493,7 @@ func (pp *PreBindPlugin) Name() string {
}
// PreBind is a test function that returns (true, nil) or errors for testing.
func (pp *PreBindPlugin) PreBind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (pp *PreBindPlugin) PreBind(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
pp.mutex.Lock()
defer pp.mutex.Unlock()
@@ -520,7 +521,7 @@ func (bp *BindPlugin) Name() string {
return bp.name
}
func (bp *BindPlugin) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (bp *BindPlugin) Bind(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
bp.mutex.Lock()
defer bp.mutex.Unlock()
@@ -551,7 +552,7 @@ func (pp *PostBindPlugin) Name() string {
}
// PostBind is a test function, which counts the number of times called.
func (pp *PostBindPlugin) PostBind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) {
func (pp *PostBindPlugin) PostBind(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) {
pp.mutex.Lock()
defer pp.mutex.Unlock()
@@ -575,7 +576,7 @@ func (pp *PreFilterPlugin) PreFilterExtensions() framework.PreFilterExtensions {
}
// PreFilter is a test function that returns (true, nil) or errors for testing.
func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (pp *PreFilterPlugin) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
pp.numPreFilterCalled++
if pp.failPreFilter {
return nil, framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name))
@@ -594,7 +595,7 @@ func (pp *PostFilterPlugin) Name() string {
return pp.name
}
func (pp *PostFilterPlugin) PostFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, _ framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
func (pp *PostFilterPlugin) PostFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, _ framework.NodeToStatusReader) (*framework.PostFilterResult, *framework.Status) {
pp.numPostFilterCalled++
nodeInfos, err := pp.fh.SnapshotSharedLister().NodeInfos().List()
if err != nil {
@@ -625,7 +626,7 @@ func (pp *PermitPlugin) Name() string {
}
// Permit implements the permit test plugin.
func (pp *PermitPlugin) Permit(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
func (pp *PermitPlugin) Permit(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
pp.mutex.Lock()
defer pp.mutex.Unlock()
@@ -2625,7 +2626,7 @@ func (j *JobPlugin) Name() string {
return jobPluginName
}
func (j *JobPlugin) PreFilter(_ context.Context, _ *framework.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (j *JobPlugin) PreFilter(_ context.Context, _ fwk.CycleState, p *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
labelSelector := labels.SelectorFromSet(labels.Set{"driver": ""})
driverPods, err := j.podLister.Pods(p.Namespace).List(labelSelector)
if err != nil {
@@ -2641,7 +2642,7 @@ func (j *JobPlugin) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
func (j *JobPlugin) PostBind(_ context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) {
func (j *JobPlugin) PostBind(_ context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) {
if _, ok := p.Labels["driver"]; !ok {
return
}

View File

@@ -41,6 +41,7 @@ import (
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
@@ -103,7 +104,7 @@ func (fp *tokenFilter) Name() string {
return tokenFilterName
}
func (fp *tokenFilter) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod,
func (fp *tokenFilter) Filter(ctx context.Context, state fwk.CycleState, pod *v1.Pod,
nodeInfo *framework.NodeInfo) *framework.Status {
if fp.Tokens > 0 {
fp.Tokens--
@@ -116,20 +117,20 @@ func (fp *tokenFilter) Filter(ctx context.Context, state *framework.CycleState,
return framework.NewStatus(status, fmt.Sprintf("can't fit %v", pod.Name))
}
func (fp *tokenFilter) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
func (fp *tokenFilter) PreFilter(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.PreFilterResult, *framework.Status) {
if !fp.EnablePreFilter || fp.Tokens > 0 {
return nil, nil
}
return nil, framework.NewStatus(framework.Unschedulable)
}
func (fp *tokenFilter) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
func (fp *tokenFilter) AddPod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod,
podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
fp.Tokens--
return nil
}
func (fp *tokenFilter) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
func (fp *tokenFilter) RemovePod(ctx context.Context, state fwk.CycleState, podToSchedule *v1.Pod,
podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
fp.Tokens++
return nil
@@ -1511,7 +1512,7 @@ func (af *alwaysFail) Name() string {
return alwaysFailPlugin
}
func (af *alwaysFail) PreBind(_ context.Context, _ *framework.CycleState, p *v1.Pod, _ string) *framework.Status {
func (af *alwaysFail) PreBind(_ context.Context, _ fwk.CycleState, p *v1.Pod, _ string) *framework.Status {
if strings.Contains(p.Name, doNotFailMe) {
return nil
}

View File

@@ -37,6 +37,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
fwk "k8s.io/kube-scheduler/framework"
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/scheduler"
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
@@ -187,7 +188,7 @@ func (f *fakeCRPlugin) Name() string {
return "fakeCRPlugin"
}
func (f *fakeCRPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
func (f *fakeCRPlugin) Filter(_ context.Context, _ fwk.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, "always fail")
}
@@ -438,7 +439,7 @@ func (*firstFailBindPlugin) Name() string {
return "firstFailBindPlugin"
}
func (p *firstFailBindPlugin) Bind(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodename string) *framework.Status {
func (p *firstFailBindPlugin) Bind(ctx context.Context, state fwk.CycleState, pod *v1.Pod, nodename string) *framework.Status {
if p.counter == 0 {
// fail in the first Bind call.
p.counter++
@@ -568,7 +569,7 @@ func (p *fakePermitPlugin) Name() string {
return fakePermitPluginName
}
func (p *fakePermitPlugin) Permit(ctx context.Context, state *framework.CycleState, _ *v1.Pod, _ string) (*framework.Status, time.Duration) {
func (p *fakePermitPlugin) Permit(ctx context.Context, state fwk.CycleState, _ *v1.Pod, _ string) (*framework.Status, time.Duration) {
return framework.NewStatus(framework.Wait), wait.ForeverTestTimeout
}

View File

@@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
fwk "k8s.io/kube-scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler"
"k8s.io/kubernetes/pkg/scheduler/framework"
st "k8s.io/kubernetes/pkg/scheduler/testing"
@@ -46,7 +47,7 @@ func (rp *ReservePlugin) Name() string {
return rp.name
}
func (rp *ReservePlugin) Reserve(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
func (rp *ReservePlugin) Reserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) *framework.Status {
rp.numReserveCalled += 1
if rp.statusCode == framework.Error {
@@ -62,7 +63,7 @@ func (rp *ReservePlugin) Reserve(ctx context.Context, state *framework.CycleStat
return nil
}
func (rp *ReservePlugin) Unreserve(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) {
func (rp *ReservePlugin) Unreserve(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) {
rp.numUnreserveCalled += 1
}
@@ -87,7 +88,7 @@ func (pp *PermitPlugin) Name() string {
return pp.name
}
func (pp *PermitPlugin) Permit(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
func (pp *PermitPlugin) Permit(ctx context.Context, state fwk.CycleState, p *v1.Pod, nodeName string) (*framework.Status, time.Duration) {
pp.numPermitCalled += 1
if pp.statusCode == framework.Error {