cleanup: remove event list

This commit is contained in:
Kensei Nakada
2024-10-02 12:24:03 +09:00
parent b1b4e5d397
commit 83f9e4b6df
15 changed files with 528 additions and 482 deletions

View File

@@ -48,7 +48,7 @@ type activeQueuer interface {
listInFlightEvents() []interface{}
listInFlightPods() []*v1.Pod
clusterEventsForPod(logger klog.Logger, pInfo *framework.QueuedPodInfo) ([]*clusterEvent, error)
addEventIfPodInFlight(oldPod, newPod *v1.Pod, event framework.ClusterEvent) bool
addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []framework.ClusterEvent) bool
addEventIfAnyInFlight(oldObj, newObj interface{}, event framework.ClusterEvent) bool
schedulingCycle() int64
@@ -304,20 +304,22 @@ func (aq *activeQueue) clusterEventsForPod(logger klog.Logger, pInfo *framework.
return events, nil
}
// addEventIfPodInFlight adds clusterEvent to inFlightEvents if the newPod is in inFlightPods.
// addEventsIfPodInFlight adds clusterEvent to inFlightEvents if the newPod is in inFlightPods.
// It returns true if pushed the event to the inFlightEvents.
func (aq *activeQueue) addEventIfPodInFlight(oldPod, newPod *v1.Pod, event framework.ClusterEvent) bool {
func (aq *activeQueue) addEventsIfPodInFlight(oldPod, newPod *v1.Pod, events []framework.ClusterEvent) bool {
aq.lock.Lock()
defer aq.lock.Unlock()
_, ok := aq.inFlightPods[newPod.UID]
if ok {
aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label, 1, false)
aq.inFlightEvents.PushBack(&clusterEvent{
event: event,
oldObj: oldPod,
newObj: newPod,
})
for _, event := range events {
aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label(), 1, false)
aq.inFlightEvents.PushBack(&clusterEvent{
event: event,
oldObj: oldPod,
newObj: newPod,
})
}
}
return ok
}
@@ -329,7 +331,7 @@ func (aq *activeQueue) addEventIfAnyInFlight(oldObj, newObj interface{}, event f
defer aq.lock.Unlock()
if len(aq.inFlightPods) != 0 {
aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label, 1, false)
aq.metricsRecorder.ObserveInFlightEventsAsync(event.Label(), 1, false)
aq.inFlightEvents.PushBack(&clusterEvent{
event: event,
oldObj: oldObj,
@@ -380,7 +382,7 @@ func (aq *activeQueue) done(pod types.UID) {
break
}
aq.inFlightEvents.Remove(e)
aggrMetricsCounter[ev.event.Label]--
aggrMetricsCounter[ev.event.Label()]--
}
for evLabel, count := range aggrMetricsCounter {

View File

@@ -46,8 +46,8 @@ func TestClose(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error while pop(): %v", err)
}
aq.addEventIfAnyInFlight(nil, nil, framework.NodeAdd)
aq.addEventIfAnyInFlight(nil, nil, framework.NodeConditionChange)
aq.addEventIfAnyInFlight(nil, nil, nodeAdd)
aq.addEventIfAnyInFlight(nil, nil, csiNodeUpdate)
if len(aq.listInFlightEvents()) != 4 {
t.Fatalf("unexpected number of in-flight events: %v", len(aq.listInFlightEvents()))

View File

@@ -450,7 +450,7 @@ func (p *PriorityQueue) isPodWorthRequeuing(logger klog.Logger, pInfo *framework
}
hint = framework.Queue
}
p.metricsRecorder.ObserveQueueingHintDurationAsync(hintfn.PluginName, event.Label, queueingHintToLabel(hint, err), metrics.SinceInSeconds(start))
p.metricsRecorder.ObserveQueueingHintDurationAsync(hintfn.PluginName, event.Label(), queueingHintToLabel(hint, err), metrics.SinceInSeconds(start))
if hint == framework.QueueSkip {
continue
@@ -571,7 +571,7 @@ func (p *PriorityQueue) moveToActiveQ(logger klog.Logger, pInfo *framework.Queue
_ = p.podBackoffQ.Delete(pInfo) // Don't need to react when pInfo is not found.
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event, "queue", activeQ)
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", event).Inc()
if event == framework.PodAdd || event == framework.PodUpdate {
if event == framework.EventUnscheduledPodAdd.Label() || event == framework.EventUnscheduledPodUpdate.Label() {
p.AddNominatedPod(logger, pInfo.PodInfo, nil)
}
})
@@ -585,7 +585,7 @@ func (p *PriorityQueue) Add(logger klog.Logger, pod *v1.Pod) {
defer p.lock.Unlock()
pInfo := p.newQueuedPodInfo(pod)
if added := p.moveToActiveQ(logger, pInfo, framework.PodAdd); added {
if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodAdd.Label()); added {
p.activeQ.broadcast()
}
}
@@ -660,7 +660,7 @@ func (p *PriorityQueue) determineSchedulingHintForInFlightPod(logger klog.Logger
// check if there is an event that makes this Pod schedulable based on pInfo.UnschedulablePlugins.
queueingStrategy := queueSkip
for _, e := range events {
logger.V(5).Info("Checking event for in-flight pod", "pod", klog.KObj(pInfo.Pod), "event", e.event.Label)
logger.V(5).Info("Checking event for in-flight pod", "pod", klog.KObj(pInfo.Pod), "event", e.event.Label())
switch p.isPodWorthRequeuing(logger, pInfo, e.event, e.oldObj, e.newObj) {
case queueSkip:
@@ -818,7 +818,7 @@ func (p *PriorityQueue) flushUnschedulablePodsLeftover(logger klog.Logger) {
}
if len(podsToMove) > 0 {
p.movePodsToActiveOrBackoffQueue(logger, podsToMove, framework.UnschedulableTimeout, nil, nil)
p.movePodsToActiveOrBackoffQueue(logger, podsToMove, framework.EventUnschedulableTimeout, nil, nil)
}
}
@@ -878,13 +878,15 @@ func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) {
p.lock.Lock()
defer p.lock.Unlock()
var events []framework.ClusterEvent
if p.isSchedulingQueueHintEnabled {
events = framework.PodSchedulingPropertiesChange(newPod, oldPod)
// The inflight pod will be requeued using the latest version from the informer cache, which matches what the event delivers.
// Record this update as Pod/Update because
// Record this Pod update because
// this update may make the Pod schedulable in case it gets rejected and comes back to the queue.
// We can clean it up once we change updatePodInSchedulingQueue to call MoveAllToActiveOrBackoffQueue.
// See https://github.com/kubernetes/kubernetes/pull/125578#discussion_r1648338033 for more context.
if exists := p.activeQ.addEventIfPodInFlight(oldPod, newPod, framework.UnscheduledPodUpdate); exists {
if exists := p.activeQ.addEventsIfPodInFlight(oldPod, newPod, events); exists {
logger.V(6).Info("The pod doesn't be queued for now because it's being scheduled and will be queued back if necessary", "pod", klog.KObj(newPod))
return
}
@@ -917,12 +919,11 @@ func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) {
// whether the update may make the pods schedulable.
// Plugins have to implement a QueueingHint for Pod/Update event
// if the rejection from them could be resolved by updating unscheduled Pods itself.
events := framework.PodSchedulingPropertiesChange(newPod, oldPod)
for _, evt := range events {
hint := p.isPodWorthRequeuing(logger, pInfo, evt, oldPod, newPod)
queue := p.requeuePodViaQueueingHint(logger, pInfo, hint, evt.Label)
queue := p.requeuePodViaQueueingHint(logger, pInfo, hint, evt.Label())
if queue != unschedulablePods {
logger.V(5).Info("Pod moved to an internal scheduling queue because the Pod is updated", "pod", klog.KObj(newPod), "event", evt.Label, "queue", queue)
logger.V(5).Info("Pod moved to an internal scheduling queue because the Pod is updated", "pod", klog.KObj(newPod), "event", evt.Label(), "queue", queue)
p.unschedulablePods.delete(pInfo.Pod, gated)
}
if queue == activeQ {
@@ -936,7 +937,7 @@ func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) {
if p.isPodBackingoff(pInfo) {
p.podBackoffQ.AddOrUpdate(pInfo)
p.unschedulablePods.delete(pInfo.Pod, gated)
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", framework.PodUpdate, "queue", backoffQ)
logger.V(5).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", framework.EventUnscheduledPodUpdate.Label(), "queue", backoffQ)
return
}
@@ -952,7 +953,7 @@ func (p *PriorityQueue) Update(logger klog.Logger, oldPod, newPod *v1.Pod) {
}
// If pod is not in any of the queues, we put it in the active queue.
pInfo := p.newQueuedPodInfo(newPod)
if added := p.moveToActiveQ(logger, pInfo, framework.PodUpdate); added {
if added := p.moveToActiveQ(logger, pInfo, framework.EventUnscheduledPodUpdate.Label()); added {
p.activeQ.broadcast()
}
}
@@ -980,7 +981,7 @@ func (p *PriorityQueue) AssignedPodAdded(logger klog.Logger, pod *v1.Pod) {
// Pre-filter Pods to move by getUnschedulablePodsWithCrossTopologyTerm
// because Pod related events shouldn't make Pods that rejected by single-node scheduling requirement schedulable.
p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithCrossTopologyTerm(logger, pod), framework.AssignedPodAdd, nil, pod)
p.movePodsToActiveOrBackoffQueue(logger, p.getUnschedulablePodsWithCrossTopologyTerm(logger, pod), framework.EventAssignedPodAdd, nil, pod)
p.lock.Unlock()
}
@@ -991,7 +992,7 @@ func (p *PriorityQueue) AssignedPodUpdated(logger klog.Logger, oldPod, newPod *v
if event.Resource == framework.Pod && event.ActionType&framework.UpdatePodScaleDown != 0 {
// In this case, we don't want to pre-filter Pods by getUnschedulablePodsWithCrossTopologyTerm
// because Pod related events may make Pods that were rejected by NodeResourceFit schedulable.
p.moveAllToActiveOrBackoffQueue(logger, framework.AssignedPodUpdate, oldPod, newPod, nil)
p.moveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodUpdate, oldPod, newPod, nil)
} else {
// Pre-filter Pods to move by getUnschedulablePodsWithCrossTopologyTerm
// because Pod related events only make Pods rejected by cross topology term schedulable.
@@ -1093,13 +1094,13 @@ func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podIn
schedulingHint := p.isPodWorthRequeuing(logger, pInfo, event, oldObj, newObj)
if schedulingHint == queueSkip {
// QueueingHintFn determined that this Pod isn't worth putting to activeQ or backoffQ by this event.
logger.V(5).Info("Event is not making pod schedulable", "pod", klog.KObj(pInfo.Pod), "event", event.Label)
logger.V(5).Info("Event is not making pod schedulable", "pod", klog.KObj(pInfo.Pod), "event", event.Label())
continue
}
p.unschedulablePods.delete(pInfo.Pod, pInfo.Gated)
queue := p.requeuePodViaQueueingHint(logger, pInfo, schedulingHint, event.Label)
logger.V(4).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event.Label, "queue", queue, "hint", schedulingHint)
queue := p.requeuePodViaQueueingHint(logger, pInfo, schedulingHint, event.Label())
logger.V(4).Info("Pod moved to an internal scheduling queue", "pod", klog.KObj(pInfo.Pod), "event", event.Label(), "queue", queue, "hint", schedulingHint)
if queue == activeQ {
activated = true
}
@@ -1112,7 +1113,7 @@ func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(logger klog.Logger, podIn
// AddUnschedulableIfNotPresent we need to know whether events were
// observed while scheduling them.
if added := p.activeQ.addEventIfAnyInFlight(oldObj, newObj, event); added {
logger.V(5).Info("Event received while pods are in flight", "event", event.Label)
logger.V(5).Info("Event received while pods are in flight", "event", event.Label())
}
}

View File

@@ -58,7 +58,16 @@ const queueMetricMetadata = `
`
var (
NodeAllEvent = framework.ClusterEvent{Resource: framework.Node, ActionType: framework.All}
// nodeAdd is the event when a new node is added to the cluster.
nodeAdd = framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add}
// pvAdd is the event when a persistent volume is added in the cluster.
pvAdd = framework.ClusterEvent{Resource: framework.PersistentVolume, ActionType: framework.Add}
// pvUpdate is the event when a persistent volume is updated in the cluster.
pvUpdate = framework.ClusterEvent{Resource: framework.PersistentVolume, ActionType: framework.Update}
// pvcAdd is the event when a persistent volume claim is added in the cluster.
pvcAdd = framework.ClusterEvent{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}
// csiNodeUpdate is the event when a CSI node is updated in the cluster.
csiNodeUpdate = framework.ClusterEvent{Resource: framework.CSINode, ActionType: framework.Update}
lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
mediumPriority = (lowPriority + highPriority) / 2
@@ -210,13 +219,13 @@ func Test_InFlightPods(t *testing.T) {
// This Pod shouldn't be added to inFlightPods because SchedulingQueueHint is disabled.
{podPopped: pod1},
// This event shouldn't be added to inFlightEvents because SchedulingQueueHint is disabled.
{eventHappens: &framework.PvAdd},
{eventHappens: &pvAdd},
},
wantInFlightPods: nil,
wantInFlightEvents: nil,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -231,18 +240,18 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
// This won't be added to inFlightEvents because no inFlightPods at this point.
{eventHappens: &framework.PvcAdd},
{eventHappens: &pvcAdd},
{podPopped: pod1},
// This gets added for the pod.
{eventHappens: &framework.PvAdd},
// This doesn't get added because no plugin is interested in framework.PvUpdate.
{eventHappens: &framework.PvUpdate},
{eventHappens: &pvAdd},
// This doesn't get added because no plugin is interested in PvUpdate.
{eventHappens: &pvUpdate},
},
wantInFlightPods: []*v1.Pod{pod1},
wantInFlightEvents: []interface{}{pod1, framework.PvAdd},
wantInFlightEvents: []interface{}{pod1, pvAdd},
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -257,32 +266,32 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1, pod2},
actions: []action{
// This won't be added to inFlightEvents because no inFlightPods at this point.
{eventHappens: &framework.PvcAdd},
{eventHappens: &pvcAdd},
{podPopped: pod1},
{eventHappens: &framework.PvAdd},
{eventHappens: &pvAdd},
{podPopped: pod2},
{eventHappens: &framework.NodeAdd},
{eventHappens: &nodeAdd},
// This pod will be requeued to backoffQ because no plugin is registered as unschedulable plugin.
{podEnqueued: newQueuedPodInfoForLookup(pod1)},
},
wantBackoffQPodNames: []string{"targetpod"},
wantInFlightPods: []*v1.Pod{pod2}, // only pod2 is registered because pod is already enqueued back.
wantInFlightEvents: []interface{}{pod2, framework.NodeAdd},
wantInFlightEvents: []interface{}{pod2, nodeAdd},
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.PvcAdd: {
pvcAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -297,14 +306,14 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1, pod2},
actions: []action{
// This won't be added to inFlightEvents because no inFlightPods at this point.
{eventHappens: &framework.PvcAdd},
{eventHappens: &pvcAdd},
{podPopped: pod1},
{eventHappens: &framework.PvAdd},
{eventHappens: &pvAdd},
{podPopped: pod2},
{eventHappens: &framework.NodeAdd},
{eventHappens: &nodeAdd},
// This pod will be requeued to backoffQ because no plugin is registered as unschedulable plugin.
{podEnqueued: newQueuedPodInfoForLookup(pod1)},
{eventHappens: &framework.CSINodeUpdate},
{eventHappens: &csiNodeUpdate},
// This pod will be requeued to backoffQ because no plugin is registered as unschedulable plugin.
{podEnqueued: newQueuedPodInfoForLookup(pod2)},
},
@@ -312,25 +321,25 @@ func Test_InFlightPods(t *testing.T) {
wantInFlightPods: nil, // empty
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.PvcAdd: {
pvcAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.CSINodeUpdate: {
csiNodeUpdate: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -345,34 +354,34 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1, pod2, pod3},
actions: []action{
// This won't be added to inFlightEvents because no inFlightPods at this point.
{eventHappens: &framework.PvcAdd},
{eventHappens: &pvcAdd},
{podPopped: pod1},
{eventHappens: &framework.PvAdd},
{eventHappens: &pvAdd},
{podPopped: pod2},
{eventHappens: &framework.NodeAdd},
{eventHappens: &nodeAdd},
// This Pod won't be requeued again.
{podPopped: pod3},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{podEnqueued: newQueuedPodInfoForLookup(pod2)},
},
wantBackoffQPodNames: []string{"targetpod2"},
wantInFlightPods: []*v1.Pod{pod1, pod3},
wantInFlightEvents: []interface{}{pod1, framework.PvAdd, framework.NodeAdd, pod3, framework.AssignedPodAdd},
wantInFlightEvents: []interface{}{pod1, pvAdd, nodeAdd, pod3, framework.EventAssignedPodAdd},
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -386,7 +395,7 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
{podPopped: pod1},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{podEnqueued: newQueuedPodInfoForLookup(pod1, "fooPlugin1")},
},
wantBackoffQPodNames: []string{"targetpod"},
@@ -396,7 +405,7 @@ func Test_InFlightPods(t *testing.T) {
"": {
// This hint fn tells that this event doesn't make a Pod schedulable.
// However, this QueueingHintFn will be ignored actually because SchedulingQueueHint is disabled.
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnSkip,
@@ -410,9 +419,9 @@ func Test_InFlightPods(t *testing.T) {
isSchedulingQueueHintEnabled: true,
initialPods: []*v1.Pod{pod1},
actions: []action{
{eventHappens: &framework.WildCardEvent},
{eventHappens: &framework.EventUnschedulableTimeout},
{podPopped: pod1},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
// This Pod won't be requeued to activeQ/backoffQ because fooPlugin1 returns QueueSkip.
{podEnqueued: newQueuedPodInfoForLookup(pod1, "fooPlugin1")},
},
@@ -423,7 +432,7 @@ func Test_InFlightPods(t *testing.T) {
"": {
// fooPlugin1 has a queueing hint function for framework.AssignedPodAdd,
// but hint fn tells that this event doesn't make a Pod scheudlable.
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnSkip,
@@ -438,7 +447,7 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
{podPopped: pod1},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{podEnqueued: newQueuedPodInfoForLookup(pod1)},
},
wantBackoffQPodNames: []string{"targetpod"},
@@ -447,7 +456,7 @@ func Test_InFlightPods(t *testing.T) {
queueingHintMap: QueueingHintMapPerProfile{
"": {
// It will be ignored because no failed plugin.
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -462,7 +471,7 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
{podPopped: pod1},
{eventHappens: &framework.NodeAdd},
{eventHappens: &nodeAdd},
{podEnqueued: newQueuedPodInfoForLookup(pod1, "fooPlugin1")},
},
wantUnschedPodPoolPodNames: []string{"targetpod"},
@@ -470,10 +479,10 @@ func Test_InFlightPods(t *testing.T) {
wantInFlightEvents: nil,
queueingHintMap: QueueingHintMapPerProfile{
"": {
// fooPlugin1 has no queueing hint function for framework.NodeAdd.
framework.AssignedPodAdd: {
// fooPlugin1 has no queueing hint function for NodeAdd.
framework.EventAssignedPodAdd: {
{
// It will be ignored because the event is not framework.NodeAdd.
// It will be ignored because the event is not NodeAdd.
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
@@ -487,7 +496,7 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
{podPopped: pod1},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{podEnqueued: newQueuedPodInfoForLookup(pod1, "fooPlugin1")},
},
wantUnschedPodPoolPodNames: []string{"targetpod"},
@@ -497,7 +506,7 @@ func Test_InFlightPods(t *testing.T) {
"": {
// fooPlugin1 has a queueing hint function for framework.AssignedPodAdd,
// but hint fn tells that this event doesn't make a Pod scheudlable.
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnSkip,
@@ -512,7 +521,7 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
{podPopped: pod1},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{podEnqueued: &framework.QueuedPodInfo{
PodInfo: mustNewPodInfo(pod1),
UnschedulablePlugins: sets.New("fooPlugin2", "fooPlugin3"),
@@ -524,7 +533,7 @@ func Test_InFlightPods(t *testing.T) {
wantInFlightEvents: nil,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
PluginName: "fooPlugin3",
QueueingHintFn: queueHintReturnSkip,
@@ -548,7 +557,7 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1},
actions: []action{
{podPopped: pod1},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{podEnqueued: newQueuedPodInfoForLookup(pod1, "fooPlugin1", "fooPlugin2")},
},
wantBackoffQPodNames: []string{"targetpod"},
@@ -556,7 +565,7 @@ func Test_InFlightPods(t *testing.T) {
wantInFlightEvents: nil,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
// it will be ignored because the hint fn returns Skip that is weaker than queueHintReturnQueue from fooPlugin1.
PluginName: "fooPlugin2",
@@ -577,9 +586,9 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1, pod2},
actions: []action{
{callback: func(t *testing.T, q *PriorityQueue) { poppedPod = popPod(t, logger, q, pod1) }},
{eventHappens: &framework.NodeAdd},
{eventHappens: &nodeAdd},
{callback: func(t *testing.T, q *PriorityQueue) { poppedPod2 = popPod(t, logger, q, pod2) }},
{eventHappens: &framework.AssignedPodAdd},
{eventHappens: &framework.EventAssignedPodAdd},
{callback: func(t *testing.T, q *PriorityQueue) {
logger, _ := ktesting.NewTestContext(t)
err := q.AddUnschedulableIfNotPresent(logger, poppedPod, q.SchedulingCycle())
@@ -602,7 +611,7 @@ func Test_InFlightPods(t *testing.T) {
wantInFlightEvents: nil,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.AssignedPodAdd: {
framework.EventAssignedPodAdd: {
{
// it will be ignored because the hint fn returns QueueSkip that is weaker than queueHintReturnQueueImmediately from fooPlugin1.
PluginName: "fooPlugin3",
@@ -638,7 +647,7 @@ func Test_InFlightPods(t *testing.T) {
t.Errorf("Unexpected error from AddUnschedulableIfNotPresent: %v", err)
}
}},
{eventHappens: &framework.PvAdd}, // Active again.
{eventHappens: &pvAdd}, // Active again.
{callback: func(t *testing.T, q *PriorityQueue) {
poppedPod = popPod(t, logger, q, pod1)
if len(poppedPod.UnschedulablePlugins) > 0 {
@@ -655,7 +664,7 @@ func Test_InFlightPods(t *testing.T) {
},
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
// The hint fn tells that this event makes a Pod scheudlable immediately.
PluginName: "fooPlugin1",
@@ -674,9 +683,9 @@ func Test_InFlightPods(t *testing.T) {
initialPods: []*v1.Pod{pod1, pod2},
actions: []action{
// This won't be added to inFlightEvents because no inFlightPods at this point.
{eventHappens: &framework.PvcAdd},
{eventHappens: &pvcAdd},
{podPopped: pod1},
{eventHappens: &framework.PvAdd},
{eventHappens: &pvAdd},
{podPopped: pod2},
// Simulate a bug, putting pod into activeQ, while pod is being scheduled.
{callback: func(t *testing.T, q *PriorityQueue) {
@@ -696,10 +705,10 @@ func Test_InFlightPods(t *testing.T) {
t.Fatalf("activeQ should be empty, but got: %v", q.activeQ.list())
}
}},
{eventHappens: &framework.NodeAdd},
{eventHappens: &nodeAdd},
// This pod will be requeued to backoffQ because no plugin is registered as unschedulable plugin.
{podEnqueued: newQueuedPodInfoForLookup(pod1)},
{eventHappens: &framework.CSINodeUpdate},
{eventHappens: &csiNodeUpdate},
// This pod will be requeued to backoffQ because no plugin is registered as unschedulable plugin.
{podEnqueued: newQueuedPodInfoForLookup(pod2)},
{podEnqueued: newQueuedPodInfoForLookup(pod3)},
@@ -708,25 +717,25 @@ func Test_InFlightPods(t *testing.T) {
wantInFlightPods: nil, // should be empty
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.PvcAdd: {
pvcAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
},
framework.CSINodeUpdate: {
csiNodeUpdate: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
@@ -801,7 +810,7 @@ func Test_InFlightPods(t *testing.T) {
}
wantInFlightEvents = append(wantInFlightEvents, value)
}
if diff := cmp.Diff(wantInFlightEvents, q.activeQ.listInFlightEvents(), cmp.AllowUnexported(clusterEvent{})); diff != "" {
if diff := cmp.Diff(wantInFlightEvents, q.activeQ.listInFlightEvents(), cmp.AllowUnexported(clusterEvent{}), cmpopts.EquateComparable(framework.ClusterEvent{})); diff != "" {
t.Errorf("Unexpected diff in inFlightEvents (-want, +got):\n%s", diff)
}
@@ -867,7 +876,7 @@ func TestPop(t *testing.T) {
pod := st.MakePod().Name("targetpod").UID("pod1").Obj()
queueingHintMap := QueueingHintMapPerProfile{
"": {
framework.PvAdd: {
pvAdd: {
{
// The hint fn tells that this event makes a Pod scheudlable.
PluginName: "fooPlugin1",
@@ -895,7 +904,7 @@ func TestPop(t *testing.T) {
}
// Activate it again.
q.MoveAllToActiveOrBackoffQueue(logger, framework.PvAdd, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, pvAdd, nil, nil, nil)
// Now check result of Pop.
poppedPod = popPod(t, logger, q, pod)
@@ -964,7 +973,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) {
}
// move all pods to active queue when we were trying to schedule them
q.MoveAllToActiveOrBackoffQueue(logger, framework.WildCardEvent, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
oldCycle := q.SchedulingCycle()
firstPod, _ := q.Pop(logger)
@@ -1022,13 +1031,14 @@ func TestPriorityQueue_Pop(t *testing.T) {
}
func TestPriorityQueue_Update(t *testing.T) {
metrics.Register()
c := testingclock.NewFakeClock(time.Now())
queuePlugin := "queuePlugin"
skipPlugin := "skipPlugin"
queueingHintMap := QueueingHintMapPerProfile{
"": {
framework.UnscheduledPodUpdate: {
framework.EventUnscheduledPodUpdate: {
{
PluginName: queuePlugin,
QueueingHintFn: queueHintReturnQueue,
@@ -1054,14 +1064,6 @@ func TestPriorityQueue_Update(t *testing.T) {
// schedulingHintsEnablement shows which value of QHint feature gate we test a test case with.
schedulingHintsEnablement []bool
}{
{
name: "add highPriorityPodInfo to activeQ",
wantQ: activeQ,
prepareFunc: func(t *testing.T, logger klog.Logger, q *PriorityQueue) (oldPod, newPod *v1.Pod) {
return nil, highPriorityPodInfo.Pod
},
schedulingHintsEnablement: []bool{false, true},
},
{
name: "Update pod that didn't exist in the queue",
wantQ: activeQ,
@@ -1085,7 +1087,7 @@ func TestPriorityQueue_Update(t *testing.T) {
name: "When updating a pod that is already in activeQ, the pod should remain in activeQ after Update()",
wantQ: activeQ,
prepareFunc: func(t *testing.T, logger klog.Logger, q *PriorityQueue) (oldPod, newPod *v1.Pod) {
q.Update(logger, nil, highPriorityPodInfo.Pod)
q.Add(logger, highPriorityPodInfo.Pod)
return highPriorityPodInfo.Pod, highPriorityPodInfo.Pod
},
schedulingHintsEnablement: []bool{false, true},
@@ -1222,7 +1224,7 @@ func TestPriorityQueue_UpdateWhenInflight(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SchedulerQueueingHints, true)
m := makeEmptyQueueingHintMapPerProfile()
// fakePlugin could change its scheduling result by any updates in Pods.
m[""][framework.UnscheduledPodUpdate] = []*QueueingHintFunction{
m[""][framework.EventUnscheduledPodUpdate] = []*QueueingHintFunction{
{
PluginName: "fakePlugin",
QueueingHintFn: queueHintReturnQueue,
@@ -1433,7 +1435,7 @@ func TestPriorityQueue_addToActiveQ(t *testing.T) {
m := map[string][]framework.PreEnqueuePlugin{"": tt.plugins}
q := NewTestQueueWithObjects(ctx, newDefaultQueueSort(), []runtime.Object{tt.pod}, WithPreEnqueuePluginMap(m),
WithPodInitialBackoffDuration(time.Second*30), WithPodMaxBackoffDuration(time.Second*60))
got := q.moveToActiveQ(logger, q.newQueuedPodInfo(tt.pod), framework.PodAdd)
got := q.moveToActiveQ(logger, q.newQueuedPodInfo(tt.pod), framework.EventUnscheduledPodAdd.Label())
if got != tt.wantSuccess {
t.Errorf("Unexpected result: want %v, but got %v", tt.wantSuccess, got)
}
@@ -1460,11 +1462,11 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
}{
{
name: "baseline",
moveEvent: framework.UnschedulableTimeout,
moveEvent: framework.EventUnschedulableTimeout,
},
{
name: "worst",
moveEvent: framework.NodeAdd,
moveEvent: nodeAdd,
},
{
name: "random",
@@ -1478,24 +1480,24 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
}
events := []framework.ClusterEvent{
framework.NodeAdd,
framework.NodeTaintChange,
framework.NodeAllocatableChange,
framework.NodeConditionChange,
framework.NodeLabelChange,
framework.NodeAnnotationChange,
framework.PvcAdd,
framework.PvcUpdate,
framework.PvAdd,
framework.PvUpdate,
framework.StorageClassAdd,
framework.StorageClassUpdate,
framework.CSINodeAdd,
framework.CSINodeUpdate,
framework.CSIDriverAdd,
framework.CSIDriverUpdate,
framework.CSIStorageCapacityAdd,
framework.CSIStorageCapacityUpdate,
{Resource: framework.Node, ActionType: framework.Add},
{Resource: framework.Node, ActionType: framework.UpdateNodeTaint},
{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable},
{Resource: framework.Node, ActionType: framework.UpdateNodeCondition},
{Resource: framework.Node, ActionType: framework.UpdateNodeLabel},
{Resource: framework.Node, ActionType: framework.UpdateNodeAnnotation},
{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add},
{Resource: framework.PersistentVolumeClaim, ActionType: framework.Update},
{Resource: framework.PersistentVolume, ActionType: framework.Add},
{Resource: framework.PersistentVolume, ActionType: framework.Update},
{Resource: framework.StorageClass, ActionType: framework.Add},
{Resource: framework.StorageClass, ActionType: framework.Update},
{Resource: framework.CSINode, ActionType: framework.Add},
{Resource: framework.CSINode, ActionType: framework.Update},
{Resource: framework.CSIDriver, ActionType: framework.Add},
{Resource: framework.CSIDriver, ActionType: framework.Update},
{Resource: framework.CSIStorageCapacity, ActionType: framework.Add},
{Resource: framework.CSIStorageCapacity, ActionType: framework.Update},
}
pluginNum := 20
@@ -1514,7 +1516,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
c := testingclock.NewFakeClock(time.Now())
m := makeEmptyQueueingHintMapPerProfile()
// - All plugins registered for events[0], which is framework.NodeAdd.
// - All plugins registered for events[0], which is NodeAdd.
// - 1/2 of plugins registered for events[1]
// - 1/3 of plugins registered for events[2]
// - ...
@@ -1638,7 +1640,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithQueueingHint(t *testing.
t.Run(test.name, func(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
m := makeEmptyQueueingHintMapPerProfile()
m[""][framework.NodeAdd] = []*QueueingHintFunction{
m[""][nodeAdd] = []*QueueingHintFunction{
{
PluginName: "foo",
QueueingHintFn: test.hint,
@@ -1657,7 +1659,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithQueueingHint(t *testing.
}
cl.Step(test.duration)
q.MoveAllToActiveOrBackoffQueue(logger, framework.NodeAdd, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, nodeAdd, nil, nil, nil)
if q.podBackoffQ.Len() == 0 && test.expectedQ == backoffQ {
t.Fatalf("expected pod to be queued to backoffQ, but it was not")
@@ -1682,7 +1684,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
m := makeEmptyQueueingHintMapPerProfile()
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SchedulerQueueingHints, true)
m[""][framework.NodeAdd] = []*QueueingHintFunction{
m[""][nodeAdd] = []*QueueingHintFunction{
{
PluginName: "fooPlugin",
QueueingHintFn: queueHintReturnQueue,
@@ -1737,7 +1739,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
expectInFlightPods(t, q)
// This NodeAdd event moves unschedulablePodInfo and highPriorityPodInfo to the backoffQ,
// because of the queueing hint function registered for NodeAdd/fooPlugin.
q.MoveAllToActiveOrBackoffQueue(logger, framework.NodeAdd, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, nodeAdd, nil, nil, nil)
q.Add(logger, medPriorityPodInfo.Pod)
if q.activeQ.len() != 1 {
t.Errorf("Expected 1 item to be in activeQ, but got: %v", q.activeQ.len())
@@ -1808,7 +1810,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
// and the pods will be moved into activeQ.
c.Step(q.podInitialBackoffDuration)
q.flushBackoffQCompleted(logger) // flush the completed backoffQ to move hpp1 to activeQ.
q.MoveAllToActiveOrBackoffQueue(logger, framework.NodeAdd, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, nodeAdd, nil, nil, nil)
if q.activeQ.len() != 4 {
t.Errorf("Expected 4 items to be in activeQ, but got: %v", q.activeQ.len())
}
@@ -1829,7 +1831,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithOutQueueingHint(t *testi
defer cancel()
m := makeEmptyQueueingHintMapPerProfile()
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SchedulerQueueingHints, false)
m[""][framework.NodeAdd] = []*QueueingHintFunction{
m[""][nodeAdd] = []*QueueingHintFunction{
{
PluginName: "fooPlugin",
QueueingHintFn: queueHintReturnQueue,
@@ -1863,7 +1865,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithOutQueueingHint(t *testi
}
// This NodeAdd event moves unschedulablePodInfo and highPriorityPodInfo to the backoffQ,
// because of the queueing hint function registered for NodeAdd/fooPlugin.
q.MoveAllToActiveOrBackoffQueue(logger, framework.NodeAdd, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, nodeAdd, nil, nil, nil)
if q.activeQ.len() != 1 {
t.Errorf("Expected 1 item to be in activeQ, but got: %v", q.activeQ.len())
}
@@ -1914,7 +1916,7 @@ func TestPriorityQueue_MoveAllToActiveOrBackoffQueueWithOutQueueingHint(t *testi
// and the pods will be moved into activeQ.
c.Step(q.podInitialBackoffDuration)
q.flushBackoffQCompleted(logger) // flush the completed backoffQ to move hpp1 to activeQ.
q.MoveAllToActiveOrBackoffQueue(logger, framework.NodeAdd, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, nodeAdd, nil, nil, nil)
if q.activeQ.len() != 4 {
t.Errorf("Expected 4 items to be in activeQ, but got: %v", q.activeQ.len())
}
@@ -2012,7 +2014,7 @@ func TestPriorityQueue_AssignedPodAdded_(t *testing.T) {
c := testingclock.NewFakeClock(time.Now())
m := makeEmptyQueueingHintMapPerProfile()
m[""][framework.AssignedPodAdd] = []*QueueingHintFunction{
m[""][framework.EventAssignedPodAdd] = []*QueueingHintFunction{
{
PluginName: "fakePlugin",
QueueingHintFn: queueHintReturnQueue,
@@ -2171,7 +2173,7 @@ func TestPriorityQueue_PendingPods(t *testing.T) {
t.Errorf("Unexpected pending pods summary: want %v, but got %v.", wantSummary, gotSummary)
}
// Move all to active queue. We should still see the same set of pods.
q.MoveAllToActiveOrBackoffQueue(logger, framework.WildCardEvent, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
gotPods, gotSummary = q.PendingPods()
if diff := cmp.Diff(expectedSet, makeSet(gotPods)); diff != "" {
t.Errorf("Unexpected list of pending Pods (-want, +got):\n%s", diff)
@@ -2456,7 +2458,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) {
}
c.Step(DefaultPodInitialBackoffDuration)
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue(logger, framework.UnschedulableTimeout, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
// Simulation is over. Now let's pop all pods. The pod popped first should be
// the last one we pop here.
for i := 0; i < 5; i++ {
@@ -2507,7 +2509,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
// Move clock to make the unschedulable pods complete backoff.
c.Step(DefaultPodInitialBackoffDuration + time.Second)
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue(logger, framework.UnschedulableTimeout, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
// Simulate a pod being popped by the scheduler,
// At this time, unschedulable pod should be popped.
@@ -2540,7 +2542,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
// Move clock to make the unschedulable pods complete backoff.
c.Step(DefaultPodInitialBackoffDuration + time.Second)
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue(logger, framework.UnschedulableTimeout, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
// At this time, newerPod should be popped
// because it is the oldest tried pod.
@@ -2587,7 +2589,7 @@ func TestHighPriorityBackoff(t *testing.T) {
t.Fatalf("unexpected error from AddUnschedulableIfNotPresent: %v", err)
}
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue(logger, framework.WildCardEvent, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
p, err = q.Pop(logger)
if err != nil {
@@ -2603,7 +2605,7 @@ func TestHighPriorityBackoff(t *testing.T) {
func TestHighPriorityFlushUnschedulablePodsLeftover(t *testing.T) {
c := testingclock.NewFakeClock(time.Now())
m := makeEmptyQueueingHintMapPerProfile()
m[""][framework.NodeAdd] = []*QueueingHintFunction{
m[""][nodeAdd] = []*QueueingHintFunction{
{
PluginName: "fakePlugin",
QueueingHintFn: queueHintReturnQueue,
@@ -2819,7 +2821,7 @@ var (
queue.podBackoffQ.AddOrUpdate(pInfo)
}
moveAllToActiveOrBackoffQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) {
queue.MoveAllToActiveOrBackoffQueue(logger, framework.UnschedulableTimeout, nil, nil, nil)
queue.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
}
flushBackoffQ = func(t *testing.T, logger klog.Logger, queue *PriorityQueue, _ *framework.QueuedPodInfo) {
queue.clock.(*testingclock.FakeClock).Step(2 * time.Second)
@@ -3349,7 +3351,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
add,
},
want: `
scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 3
scheduler_queue_incoming_pods_total{event="UnschedulablePodAdd",queue="active"} 3
`,
},
{
@@ -3357,7 +3359,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
operations: []operation{
popAndRequeueAsUnschedulable,
},
want: `scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 3
want: `scheduler_queue_incoming_pods_total{event="UnschedulablePodAdd",queue="active"} 3
scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
`,
},
@@ -3367,7 +3369,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
popAndRequeueAsUnschedulable,
moveAllToActiveOrBackoffQ,
},
want: `scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 3
want: `scheduler_queue_incoming_pods_total{event="UnschedulablePodAdd",queue="active"} 3
scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
scheduler_queue_incoming_pods_total{event="UnschedulableTimeout",queue="backoff"} 3
`,
@@ -3379,7 +3381,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
moveClockForward,
moveAllToActiveOrBackoffQ,
},
want: `scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 3
want: `scheduler_queue_incoming_pods_total{event="UnschedulablePodAdd",queue="active"} 3
scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
scheduler_queue_incoming_pods_total{event="UnschedulableTimeout",queue="active"} 3
`,
@@ -3391,7 +3393,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
moveClockForward,
flushBackoffQ,
},
want: `scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 3
want: `scheduler_queue_incoming_pods_total{event="UnschedulablePodAdd",queue="active"} 3
scheduler_queue_incoming_pods_total{event="BackoffComplete",queue="active"} 3
scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="backoff"} 3
`,
@@ -3461,7 +3463,7 @@ func TestBackOffFlow(t *testing.T) {
}
// An event happens.
q.MoveAllToActiveOrBackoffQueue(logger, framework.UnschedulableTimeout, nil, nil, nil)
q.MoveAllToActiveOrBackoffQueue(logger, framework.EventUnschedulableTimeout, nil, nil, nil)
if !q.podBackoffQ.Has(podInfo) {
t.Errorf("pod %v is not in the backoff queue", podID)
@@ -3510,20 +3512,20 @@ func TestMoveAllToActiveOrBackoffQueue_PreEnqueueChecks(t *testing.T) {
{
name: "nil PreEnqueueCheck",
podInfos: podInfos,
event: framework.WildCardEvent,
event: framework.EventUnschedulableTimeout,
want: []string{"p0", "p1", "p2", "p3", "p4"},
},
{
name: "move Pods with priority greater than 2",
podInfos: podInfos,
event: framework.WildCardEvent,
event: framework.EventUnschedulableTimeout,
preEnqueueCheck: func(pod *v1.Pod) bool { return *pod.Spec.Priority >= 2 },
want: []string{"p2", "p3", "p4"},
},
{
name: "move Pods with even priority and greater than 2",
podInfos: podInfos,
event: framework.WildCardEvent,
event: framework.EventUnschedulableTimeout,
preEnqueueCheck: func(pod *v1.Pod) bool {
return *pod.Spec.Priority%2 == 0 && *pod.Spec.Priority >= 2
},
@@ -3532,7 +3534,7 @@ func TestMoveAllToActiveOrBackoffQueue_PreEnqueueChecks(t *testing.T) {
{
name: "move Pods with even and negative priority",
podInfos: podInfos,
event: framework.WildCardEvent,
event: framework.EventUnschedulableTimeout,
preEnqueueCheck: func(pod *v1.Pod) bool {
return *pod.Spec.Priority%2 == 0 && *pod.Spec.Priority < 0
},
@@ -3540,7 +3542,7 @@ func TestMoveAllToActiveOrBackoffQueue_PreEnqueueChecks(t *testing.T) {
{
name: "preCheck isn't called if the event is not interested by any plugins",
podInfos: podInfos,
event: framework.PvAdd, // No plugin is interested in this event.
event: pvAdd, // No plugin is interested in this event.
preEnqueueCheck: func(pod *v1.Pod) bool {
panic("preCheck shouldn't be called")
},
@@ -3692,17 +3694,17 @@ func Test_isPodWorthRequeuing(t *testing.T) {
UnschedulablePlugins: sets.New("fooPlugin1"),
PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()),
},
event: framework.NodeAdd,
event: nodeAdd,
oldObj: nil,
newObj: st.MakeNode().Obj(),
expected: queueSkip,
expectedExecutionCount: 0,
queueingHintMap: QueueingHintMapPerProfile{
"": {
// no queueing hint function for framework.NodeAdd.
framework.AssignedPodAdd: {
// no queueing hint function for NodeAdd.
framework.EventAssignedPodAdd: {
{
// It will be ignored because the event is not framework.NodeAdd.
// It will be ignored because the event is not NodeAdd.
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,
},
@@ -3716,14 +3718,14 @@ func Test_isPodWorthRequeuing(t *testing.T) {
UnschedulablePlugins: sets.New("fooPlugin1"),
PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()),
},
event: framework.NodeAdd,
event: nodeAdd,
oldObj: nil,
newObj: st.MakeNode().Obj(),
expected: queueAfterBackoff,
expectedExecutionCount: 1,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnErr,
@@ -3738,7 +3740,7 @@ func Test_isPodWorthRequeuing(t *testing.T) {
UnschedulablePlugins: sets.New("fooPlugin1"),
PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()),
},
event: framework.WildCardEvent,
event: framework.EventUnschedulableTimeout,
oldObj: nil,
newObj: st.MakeNode().Obj(),
expected: queueAfterBackoff,
@@ -3752,14 +3754,14 @@ func Test_isPodWorthRequeuing(t *testing.T) {
PendingPlugins: sets.New("fooPlugin2"),
PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()),
},
event: framework.NodeAdd,
event: nodeAdd,
oldObj: nil,
newObj: st.MakeNode().Node,
expected: queueImmediately,
expectedExecutionCount: 2,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
// It returns Queue and it's interpreted as queueAfterBackoff.
@@ -3790,14 +3792,14 @@ func Test_isPodWorthRequeuing(t *testing.T) {
UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2"),
PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()),
},
event: framework.NodeAdd,
event: nodeAdd,
oldObj: nil,
newObj: st.MakeNode().Obj(),
expected: queueAfterBackoff,
expectedExecutionCount: 2,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.NodeAdd: {
nodeAdd: {
{
// Skip will be ignored
PluginName: "fooPlugin1",
@@ -3818,14 +3820,14 @@ func Test_isPodWorthRequeuing(t *testing.T) {
UnschedulablePlugins: sets.New("fooPlugin1", "fooPlugin2"),
PodInfo: mustNewPodInfo(st.MakePod().Name("pod1").Namespace("ns1").UID("1").Obj()),
},
event: framework.NodeAdd,
event: nodeAdd,
oldObj: nil,
newObj: st.MakeNode().Node,
expected: queueSkip,
expectedExecutionCount: 2,
queueingHintMap: QueueingHintMapPerProfile{
"": {
framework.NodeAdd: {
nodeAdd: {
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnSkip,
@@ -3873,7 +3875,7 @@ func Test_isPodWorthRequeuing(t *testing.T) {
QueueingHintFn: queueHintReturnQueue,
},
},
framework.NodeAdd: { // not executed because NodeAdd is unrelated.
nodeAdd: { // not executed because NodeAdd is unrelated.
{
PluginName: "fooPlugin1",
QueueingHintFn: queueHintReturnQueue,

View File

@@ -47,8 +47,9 @@ import (
)
func (sched *Scheduler) addNodeToCache(obj interface{}) {
evt := framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Add}
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.NodeAdd.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
node, ok := obj.(*v1.Node)
if !ok {
@@ -58,7 +59,7 @@ func (sched *Scheduler) addNodeToCache(obj interface{}) {
logger.V(3).Info("Add event for node", "node", klog.KObj(node))
nodeInfo := sched.Cache.AddNode(logger, node)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.NodeAdd, nil, node, preCheckForNode(nodeInfo))
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, nil, node, preCheckForNode(nodeInfo))
}
func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) {
@@ -88,13 +89,14 @@ func (sched *Scheduler) updateNodeInCache(oldObj, newObj interface{}) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, oldNode, newNode, preCheckForNode(nodeInfo))
movingDuration := metrics.SinceInSeconds(startMoving)
metrics.EventHandlingLatency.WithLabelValues(evt.Label).Observe(updatingDuration + movingDuration)
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(updatingDuration + movingDuration)
}
}
func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
evt := framework.ClusterEvent{Resource: framework.Node, ActionType: framework.Delete}
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.NodeDelete.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
var node *v1.Node
@@ -113,7 +115,7 @@ func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
return
}
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.NodeDelete, node, nil, nil)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, node, nil, nil)
logger.V(3).Info("Delete event for node", "node", klog.KObj(node))
if err := sched.Cache.RemoveNode(logger, node); err != nil {
@@ -123,7 +125,7 @@ func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.UnscheduledPodAdd.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodAdd.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
pod := obj.(*v1.Pod)
@@ -133,7 +135,6 @@ func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) {
func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) {
start := time.Now()
logger := sched.logger
oldPod, newPod := oldObj.(*v1.Pod), newObj.(*v1.Pod)
// Bypass update event that carries identical objects; otherwise, a duplicated
@@ -142,10 +143,10 @@ func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) {
return
}
defer metrics.EventHandlingLatency.WithLabelValues(framework.UnscheduledPodUpdate.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodUpdate.Label()).Observe(metrics.SinceInSeconds(start))
for _, evt := range framework.PodSchedulingPropertiesChange(newPod, oldPod) {
if evt.Label != framework.UnscheduledPodUpdate.Label {
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label).Observe(metrics.SinceInSeconds(start))
if evt.Label() != framework.EventUnscheduledPodUpdate.Label() {
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
}
}
@@ -163,7 +164,7 @@ func (sched *Scheduler) updatePodInSchedulingQueue(oldObj, newObj interface{}) {
func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.UnscheduledPodDelete.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventUnscheduledPodDelete.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
var pod *v1.Pod
@@ -195,13 +196,13 @@ func (sched *Scheduler) deletePodFromSchedulingQueue(obj interface{}) {
// removing it from the scheduler cache. In this case, signal a AssignedPodDelete
// event to immediately retry some unscheduled Pods.
if fwk.RejectWaitingPod(pod.UID) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.AssignedPodDelete, pod, nil, nil)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, nil)
}
}
func (sched *Scheduler) addPodToCache(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.AssignedPodAdd.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodAdd.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
pod, ok := obj.(*v1.Pod)
@@ -225,7 +226,7 @@ func (sched *Scheduler) addPodToCache(obj interface{}) {
// Here we use MoveAllToActiveOrBackoffQueue only when QueueingHint is enabled.
// (We cannot switch to MoveAllToActiveOrBackoffQueue right away because of throughput concern.)
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.AssignedPodAdd, nil, pod, nil)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodAdd, nil, pod, nil)
} else {
sched.SchedulingQueue.AssignedPodAdded(logger, pod)
}
@@ -233,7 +234,7 @@ func (sched *Scheduler) addPodToCache(obj interface{}) {
func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.AssignedPodUpdate.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodUpdate.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
oldPod, ok := oldObj.(*v1.Pod)
@@ -274,13 +275,13 @@ func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) {
sched.SchedulingQueue.AssignedPodUpdated(logger, oldPod, newPod, evt)
}
movingDuration := metrics.SinceInSeconds(startMoving)
metrics.EventHandlingLatency.WithLabelValues(evt.Label).Observe(updatingDuration + movingDuration)
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(updatingDuration + movingDuration)
}
}
func (sched *Scheduler) deletePodFromCache(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(framework.AssignedPodDelete.Label).Observe(metrics.SinceInSeconds(start))
defer metrics.EventHandlingLatency.WithLabelValues(framework.EventAssignedPodDelete.Label()).Observe(metrics.SinceInSeconds(start))
logger := sched.logger
var pod *v1.Pod
@@ -304,7 +305,7 @@ func (sched *Scheduler) deletePodFromCache(obj interface{}) {
logger.Error(err, "Scheduler cache RemovePod failed", "pod", klog.KObj(pod))
}
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.AssignedPodDelete, pod, nil, nil)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, pod, nil, nil)
}
// assignedPod selects pods that are assigned (scheduled and running).
@@ -342,7 +343,7 @@ func addAllEventHandlers(
informerFactory informers.SharedInformerFactory,
dynInformerFactory dynamicinformer.DynamicSharedInformerFactory,
resourceClaimCache *assumecache.AssumeCache,
gvkMap map[framework.GVK]framework.ActionType,
gvkMap map[framework.EventResource]framework.ActionType,
) error {
var (
handlerRegistration cache.ResourceEventHandlerRegistration
@@ -423,15 +424,14 @@ func addAllEventHandlers(
handlers = append(handlers, handlerRegistration)
logger := sched.logger
buildEvtResHandler := func(at framework.ActionType, gvk framework.GVK, shortGVK string) cache.ResourceEventHandlerFuncs {
buildEvtResHandler := func(at framework.ActionType, resource framework.EventResource) cache.ResourceEventHandlerFuncs {
funcs := cache.ResourceEventHandlerFuncs{}
if at&framework.Add != 0 {
label := fmt.Sprintf("%vAdd", shortGVK)
evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Add, Label: label}
evt := framework.ClusterEvent{Resource: resource, ActionType: framework.Add}
funcs.AddFunc = func(obj interface{}) {
start := time.Now()
defer metrics.EventHandlingLatency.WithLabelValues(label).Observe(metrics.SinceInSeconds(start))
if gvk == framework.StorageClass && !utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
defer metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
if resource == framework.StorageClass && !utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
sc, ok := obj.(*storagev1.StorageClass)
if !ok {
logger.Error(nil, "Cannot convert to *storagev1.StorageClass", "obj", obj)
@@ -452,21 +452,19 @@ func addAllEventHandlers(
}
}
if at&framework.Update != 0 {
label := fmt.Sprintf("%vUpdate", shortGVK)
evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Update, Label: label}
evt := framework.ClusterEvent{Resource: resource, ActionType: framework.Update}
funcs.UpdateFunc = func(old, obj interface{}) {
start := time.Now()
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, old, obj, nil)
metrics.EventHandlingLatency.WithLabelValues(label).Observe(metrics.SinceInSeconds(start))
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
}
}
if at&framework.Delete != 0 {
label := fmt.Sprintf("%vDelete", shortGVK)
evt := framework.ClusterEvent{Resource: gvk, ActionType: framework.Delete, Label: label}
evt := framework.ClusterEvent{Resource: resource, ActionType: framework.Delete}
funcs.DeleteFunc = func(obj interface{}) {
start := time.Now()
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, evt, obj, nil, nil)
metrics.EventHandlingLatency.WithLabelValues(label).Observe(metrics.SinceInSeconds(start))
metrics.EventHandlingLatency.WithLabelValues(evt.Label()).Observe(metrics.SinceInSeconds(start))
}
}
return funcs
@@ -478,21 +476,21 @@ func addAllEventHandlers(
// Do nothing.
case framework.CSINode:
if handlerRegistration, err = informerFactory.Storage().V1().CSINodes().Informer().AddEventHandler(
buildEvtResHandler(at, framework.CSINode, "CSINode"),
buildEvtResHandler(at, framework.CSINode),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case framework.CSIDriver:
if handlerRegistration, err = informerFactory.Storage().V1().CSIDrivers().Informer().AddEventHandler(
buildEvtResHandler(at, framework.CSIDriver, "CSIDriver"),
buildEvtResHandler(at, framework.CSIDriver),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
case framework.CSIStorageCapacity:
if handlerRegistration, err = informerFactory.Storage().V1().CSIStorageCapacities().Informer().AddEventHandler(
buildEvtResHandler(at, framework.CSIStorageCapacity, "CSIStorageCapacity"),
buildEvtResHandler(at, framework.CSIStorageCapacity),
); err != nil {
return err
}
@@ -512,7 +510,7 @@ func addAllEventHandlers(
// parties, then scheduler will add pod back to unschedulable queue. We
// need to move pods to active queue on PV update for this scenario.
if handlerRegistration, err = informerFactory.Core().V1().PersistentVolumes().Informer().AddEventHandler(
buildEvtResHandler(at, framework.PersistentVolume, "Pv"),
buildEvtResHandler(at, framework.PersistentVolume),
); err != nil {
return err
}
@@ -520,7 +518,7 @@ func addAllEventHandlers(
case framework.PersistentVolumeClaim:
// MaxPDVolumeCountPredicate: add/update PVC will affect counts of PV when it is bound.
if handlerRegistration, err = informerFactory.Core().V1().PersistentVolumeClaims().Informer().AddEventHandler(
buildEvtResHandler(at, framework.PersistentVolumeClaim, "Pvc"),
buildEvtResHandler(at, framework.PersistentVolumeClaim),
); err != nil {
return err
}
@@ -528,14 +526,14 @@ func addAllEventHandlers(
case framework.ResourceClaim:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
handlerRegistration = resourceClaimCache.AddEventHandler(
buildEvtResHandler(at, framework.ResourceClaim, "ResourceClaim"),
buildEvtResHandler(at, framework.ResourceClaim),
)
handlers = append(handlers, handlerRegistration)
}
case framework.ResourceSlice:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1alpha3().ResourceSlices().Informer().AddEventHandler(
buildEvtResHandler(at, framework.ResourceSlice, "ResourceSlice"),
buildEvtResHandler(at, framework.ResourceSlice),
); err != nil {
return err
}
@@ -544,7 +542,7 @@ func addAllEventHandlers(
case framework.DeviceClass:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1alpha3().DeviceClasses().Informer().AddEventHandler(
buildEvtResHandler(at, framework.DeviceClass, "DeviceClass"),
buildEvtResHandler(at, framework.DeviceClass),
); err != nil {
return err
}
@@ -552,7 +550,7 @@ func addAllEventHandlers(
}
case framework.StorageClass:
if handlerRegistration, err = informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler(
buildEvtResHandler(at, framework.StorageClass, "StorageClass"),
buildEvtResHandler(at, framework.StorageClass),
); err != nil {
return err
}
@@ -578,7 +576,7 @@ func addAllEventHandlers(
gvr, _ := schema.ParseResourceArg(string(gvk))
dynInformer := dynInformerFactory.ForResource(*gvr).Informer()
if handlerRegistration, err = dynInformer.AddEventHandler(
buildEvtResHandler(at, gvk, strings.Title(gvr.Resource)),
buildEvtResHandler(at, gvk),
); err != nil {
return err
}

View File

@@ -242,14 +242,14 @@ func TestPreCheckForNode(t *testing.T) {
func TestAddAllEventHandlers(t *testing.T) {
tests := []struct {
name string
gvkMap map[framework.GVK]framework.ActionType
gvkMap map[framework.EventResource]framework.ActionType
enableDRA bool
expectStaticInformers map[reflect.Type]bool
expectDynamicInformers map[schema.GroupVersionResource]bool
}{
{
name: "default handlers in framework",
gvkMap: map[framework.GVK]framework.ActionType{},
gvkMap: map[framework.EventResource]framework.ActionType{},
expectStaticInformers: map[reflect.Type]bool{
reflect.TypeOf(&v1.Pod{}): true,
reflect.TypeOf(&v1.Node{}): true,
@@ -259,7 +259,7 @@ func TestAddAllEventHandlers(t *testing.T) {
},
{
name: "DRA events disabled",
gvkMap: map[framework.GVK]framework.ActionType{
gvkMap: map[framework.EventResource]framework.ActionType{
framework.ResourceClaim: framework.Add,
framework.ResourceSlice: framework.Add,
framework.DeviceClass: framework.Add,
@@ -273,7 +273,7 @@ func TestAddAllEventHandlers(t *testing.T) {
},
{
name: "all DRA events enabled",
gvkMap: map[framework.GVK]framework.ActionType{
gvkMap: map[framework.EventResource]framework.ActionType{
framework.ResourceClaim: framework.Add,
framework.ResourceSlice: framework.Add,
framework.DeviceClass: framework.Add,
@@ -291,7 +291,7 @@ func TestAddAllEventHandlers(t *testing.T) {
},
{
name: "add GVKs handlers defined in framework dynamically",
gvkMap: map[framework.GVK]framework.ActionType{
gvkMap: map[framework.EventResource]framework.ActionType{
"Pod": framework.Add | framework.Delete,
"PersistentVolume": framework.Delete,
"storage.k8s.io/CSIStorageCapacity": framework.Update,
@@ -307,7 +307,7 @@ func TestAddAllEventHandlers(t *testing.T) {
},
{
name: "add GVKs handlers defined in plugins dynamically",
gvkMap: map[framework.GVK]framework.ActionType{
gvkMap: map[framework.EventResource]framework.ActionType{
"daemonsets.v1.apps": framework.Add | framework.Delete,
"cronjobs.v1.batch": framework.Delete,
},
@@ -323,7 +323,7 @@ func TestAddAllEventHandlers(t *testing.T) {
},
{
name: "add GVKs handlers defined in plugins dynamically, with one illegal GVK form",
gvkMap: map[framework.GVK]framework.ActionType{
gvkMap: map[framework.EventResource]framework.ActionType{
"daemonsets.v1.apps": framework.Add | framework.Delete,
"custommetrics.v1beta1": framework.Update,
},

View File

@@ -25,9 +25,8 @@ import (
"k8s.io/kubernetes/pkg/features"
)
// Special event labels.
const (
// PodAdd is the event when a new pod is added to API server.
PodAdd = "PodAdd"
// ScheduleAttemptFailure is the event when a schedule attempt fails.
ScheduleAttemptFailure = "ScheduleAttemptFailure"
// BackoffComplete is the event when a pod finishes backoff.
@@ -35,120 +34,33 @@ const (
// ForceActivate is the event when a pod is moved from unschedulablePods/backoffQ
// to activeQ. Usually it's triggered by plugin implementations.
ForceActivate = "ForceActivate"
// PodUpdate is the event when a pod is updated
PodUpdate = "PodUpdate"
)
var (
// AssignedPodAdd is the event when an assigned pod is added.
AssignedPodAdd = ClusterEvent{Resource: Pod, ActionType: Add, Label: "AssignedPodAdd"}
// NodeAdd is the event when a new node is added to the cluster.
NodeAdd = ClusterEvent{Resource: Node, ActionType: Add, Label: "NodeAdd"}
// NodeDelete is the event when a node is deleted from the cluster.
NodeDelete = ClusterEvent{Resource: Node, ActionType: Delete, Label: "NodeDelete"}
// AssignedPodUpdate is the event when an assigned pod is updated.
AssignedPodUpdate = ClusterEvent{Resource: Pod, ActionType: Update, Label: "AssignedPodUpdate"}
// UnscheduledPodAdd is the event when an unscheduled pod is added.
UnscheduledPodAdd = ClusterEvent{Resource: Pod, ActionType: Update, Label: "UnschedulablePodAdd"}
// UnscheduledPodUpdate is the event when an unscheduled pod is updated.
UnscheduledPodUpdate = ClusterEvent{Resource: Pod, ActionType: Update, Label: "UnschedulablePodUpdate"}
// UnscheduledPodDelete is the event when an unscheduled pod is deleted.
UnscheduledPodDelete = ClusterEvent{Resource: Pod, ActionType: Update, Label: "UnschedulablePodDelete"}
// assignedPodOtherUpdate is the event when an assigned pod got updated in fields that are not covered by UpdatePodXXX.
assignedPodOtherUpdate = ClusterEvent{Resource: Pod, ActionType: updatePodOther, Label: "AssignedPodUpdate"}
// AssignedPodDelete is the event when an assigned pod is deleted.
AssignedPodDelete = ClusterEvent{Resource: Pod, ActionType: Delete, Label: "AssignedPodDelete"}
// PodRequestScaledDown is the event when a pod's resource request is scaled down.
PodRequestScaledDown = ClusterEvent{Resource: Pod, ActionType: UpdatePodScaleDown, Label: "PodRequestScaledDown"}
// PodLabelChange is the event when a pod's label is changed.
PodLabelChange = ClusterEvent{Resource: Pod, ActionType: UpdatePodLabel, Label: "PodLabelChange"}
// PodTolerationChange is the event when a pod's toleration is changed.
PodTolerationChange = ClusterEvent{Resource: Pod, ActionType: UpdatePodTolerations, Label: "PodTolerationChange"}
// PodSchedulingGateEliminatedChange is the event when a pod's scheduling gate is changed.
PodSchedulingGateEliminatedChange = ClusterEvent{Resource: Pod, ActionType: UpdatePodSchedulingGatesEliminated, Label: "PodSchedulingGateChange"}
// PodGeneratedResourceClaimChange is the event when a pod's list of generated ResourceClaims changes.
PodGeneratedResourceClaimChange = ClusterEvent{Resource: Pod, ActionType: UpdatePodGeneratedResourceClaim, Label: "PodGeneratedResourceClaimChange"}
// NodeSpecUnschedulableChange is the event when unschedulable node spec is changed.
NodeSpecUnschedulableChange = ClusterEvent{Resource: Node, ActionType: UpdateNodeTaint, Label: "NodeSpecUnschedulableChange"}
// NodeAllocatableChange is the event when node allocatable is changed.
NodeAllocatableChange = ClusterEvent{Resource: Node, ActionType: UpdateNodeAllocatable, Label: "NodeAllocatableChange"}
// NodeLabelChange is the event when node label is changed.
NodeLabelChange = ClusterEvent{Resource: Node, ActionType: UpdateNodeLabel, Label: "NodeLabelChange"}
// NodeAnnotationChange is the event when node annotation is changed.
NodeAnnotationChange = ClusterEvent{Resource: Node, ActionType: UpdateNodeAnnotation, Label: "NodeAnnotationChange"}
// NodeTaintChange is the event when node taint is changed.
NodeTaintChange = ClusterEvent{Resource: Node, ActionType: UpdateNodeTaint, Label: "NodeTaintChange"}
// NodeConditionChange is the event when node condition is changed.
NodeConditionChange = ClusterEvent{Resource: Node, ActionType: UpdateNodeCondition, Label: "NodeConditionChange"}
// PvAdd is the event when a persistent volume is added in the cluster.
PvAdd = ClusterEvent{Resource: PersistentVolume, ActionType: Add, Label: "PvAdd"}
// PvUpdate is the event when a persistent volume is updated in the cluster.
PvUpdate = ClusterEvent{Resource: PersistentVolume, ActionType: Update, Label: "PvUpdate"}
// PvcAdd is the event when a persistent volume claim is added in the cluster.
PvcAdd = ClusterEvent{Resource: PersistentVolumeClaim, ActionType: Add, Label: "PvcAdd"}
// PvcUpdate is the event when a persistent volume claim is updated in the cluster.
PvcUpdate = ClusterEvent{Resource: PersistentVolumeClaim, ActionType: Update, Label: "PvcUpdate"}
// StorageClassAdd is the event when a StorageClass is added in the cluster.
StorageClassAdd = ClusterEvent{Resource: StorageClass, ActionType: Add, Label: "StorageClassAdd"}
// StorageClassUpdate is the event when a StorageClass is updated in the cluster.
StorageClassUpdate = ClusterEvent{Resource: StorageClass, ActionType: Update, Label: "StorageClassUpdate"}
// CSINodeAdd is the event when a CSI node is added in the cluster.
CSINodeAdd = ClusterEvent{Resource: CSINode, ActionType: Add, Label: "CSINodeAdd"}
// CSINodeUpdate is the event when a CSI node is updated in the cluster.
CSINodeUpdate = ClusterEvent{Resource: CSINode, ActionType: Update, Label: "CSINodeUpdate"}
// CSIDriverAdd is the event when a CSI driver is added in the cluster.
CSIDriverAdd = ClusterEvent{Resource: CSIDriver, ActionType: Add, Label: "CSIDriverAdd"}
// CSIDriverUpdate is the event when a CSI driver is updated in the cluster.
CSIDriverUpdate = ClusterEvent{Resource: CSIDriver, ActionType: Update, Label: "CSIDriverUpdate"}
// CSIStorageCapacityAdd is the event when a CSI storage capacity is added in the cluster.
CSIStorageCapacityAdd = ClusterEvent{Resource: CSIStorageCapacity, ActionType: Add, Label: "CSIStorageCapacityAdd"}
// CSIStorageCapacityUpdate is the event when a CSI storage capacity is updated in the cluster.
CSIStorageCapacityUpdate = ClusterEvent{Resource: CSIStorageCapacity, ActionType: Update, Label: "CSIStorageCapacityUpdate"}
// WildCardEvent semantically matches all resources on all actions.
WildCardEvent = ClusterEvent{Resource: WildCard, ActionType: All, Label: "WildCardEvent"}
// UnschedulableTimeout is the event when a pod stays in unschedulable for longer than timeout.
UnschedulableTimeout = ClusterEvent{Resource: WildCard, ActionType: All, Label: "UnschedulableTimeout"}
// AllEvents contains all events defined above.
AllEvents = []ClusterEvent{
AssignedPodAdd,
NodeAdd,
NodeDelete,
AssignedPodUpdate,
UnscheduledPodAdd,
UnscheduledPodUpdate,
UnscheduledPodDelete,
assignedPodOtherUpdate,
AssignedPodDelete,
PodRequestScaledDown,
PodLabelChange,
PodTolerationChange,
PodSchedulingGateEliminatedChange,
NodeSpecUnschedulableChange,
NodeAllocatableChange,
NodeLabelChange,
NodeAnnotationChange,
NodeTaintChange,
NodeConditionChange,
PvAdd,
PvUpdate,
PvcAdd,
PvcUpdate,
StorageClassAdd,
StorageClassUpdate,
CSINodeAdd,
CSINodeUpdate,
CSIDriverAdd,
CSIDriverUpdate,
CSIStorageCapacityAdd,
CSIStorageCapacityUpdate,
WildCardEvent,
UnschedulableTimeout,
}
// EventAssignedPodAdd is the event when an assigned pod is added.
EventAssignedPodAdd = ClusterEvent{Resource: assignedPod, ActionType: Add}
// EventAssignedPodUpdate is the event when an assigned pod is updated.
EventAssignedPodUpdate = ClusterEvent{Resource: assignedPod, ActionType: Update}
// EventAssignedPodDelete is the event when an assigned pod is deleted.
EventAssignedPodDelete = ClusterEvent{Resource: assignedPod, ActionType: Delete}
// EventUnscheduledPodAdd is the event when an unscheduled pod is added.
EventUnscheduledPodAdd = ClusterEvent{Resource: unschedulablePod, ActionType: Add}
// EventUnscheduledPodUpdate is the event when an unscheduled pod is updated.
EventUnscheduledPodUpdate = ClusterEvent{Resource: unschedulablePod, ActionType: Update}
// EventUnscheduledPodDelete is the event when an unscheduled pod is deleted.
EventUnscheduledPodDelete = ClusterEvent{Resource: unschedulablePod, ActionType: Delete}
// EventUnschedulableTimeout is the event when a pod stays in unschedulable for longer than timeout.
EventUnschedulableTimeout = ClusterEvent{Resource: WildCard, ActionType: All, label: "UnschedulableTimeout"}
)
// PodSchedulingPropertiesChange interprets the update of a pod and returns corresponding UpdatePodXYZ event(s).
// Once we have other pod update events, we should update here as well.
func PodSchedulingPropertiesChange(newPod *v1.Pod, oldPod *v1.Pod) (events []ClusterEvent) {
r := assignedPod
if newPod.Spec.NodeName == "" {
r = unschedulablePod
}
podChangeExtracters := []podChangeExtractor{
extractPodLabelsChange,
extractPodScaleDown,
@@ -160,24 +72,24 @@ func PodSchedulingPropertiesChange(newPod *v1.Pod, oldPod *v1.Pod) (events []Clu
}
for _, fn := range podChangeExtracters {
if event := fn(newPod, oldPod); event != nil {
events = append(events, *event)
if event := fn(newPod, oldPod); event != none {
events = append(events, ClusterEvent{Resource: r, ActionType: event})
}
}
if len(events) == 0 {
// When no specific event is found, we use AssignedPodOtherUpdate,
// which should only trigger plugins registering a general Pod/Update event.
events = append(events, assignedPodOtherUpdate)
events = append(events, ClusterEvent{Resource: r, ActionType: updatePodOther})
}
return
}
type podChangeExtractor func(newNode *v1.Pod, oldNode *v1.Pod) *ClusterEvent
type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) ActionType
// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down.
func extractPodScaleDown(newPod, oldPod *v1.Pod) *ClusterEvent {
func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType {
opt := resource.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
}
@@ -188,52 +100,52 @@ func extractPodScaleDown(newPod, oldPod *v1.Pod) *ClusterEvent {
newReq, ok := newPodRequests[rName]
if !ok {
// The resource request of rName is removed.
return &PodRequestScaledDown
return UpdatePodScaleDown
}
if oldReq.MilliValue() > newReq.MilliValue() {
// The resource request of rName is scaled down.
return &PodRequestScaledDown
return UpdatePodScaleDown
}
}
return nil
return none
}
func extractPodLabelsChange(newPod *v1.Pod, oldPod *v1.Pod) *ClusterEvent {
func extractPodLabelsChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType {
if isLabelChanged(newPod.GetLabels(), oldPod.GetLabels()) {
return &PodLabelChange
return UpdatePodLabel
}
return nil
return none
}
func extractPodTolerationChange(newPod *v1.Pod, oldPod *v1.Pod) *ClusterEvent {
func extractPodTolerationChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType {
if len(newPod.Spec.Tolerations) != len(oldPod.Spec.Tolerations) {
// A Pod got a new toleration.
// Due to API validation, the user can add, but cannot modify or remove tolerations.
// So, it's enough to just check the length of tolerations to notice the update.
// And, any updates in tolerations could make Pod schedulable.
return &PodTolerationChange
return UpdatePodTolerations
}
return nil
return none
}
func extractPodSchedulingGateEliminatedChange(newPod *v1.Pod, oldPod *v1.Pod) *ClusterEvent {
func extractPodSchedulingGateEliminatedChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType {
if len(newPod.Spec.SchedulingGates) == 0 && len(oldPod.Spec.SchedulingGates) != 0 {
// A scheduling gate on the pod is completely removed.
return &PodSchedulingGateEliminatedChange
return UpdatePodSchedulingGatesEliminated
}
return nil
return none
}
func extractPodGeneratedResourceClaimChange(newPod *v1.Pod, oldPod *v1.Pod) *ClusterEvent {
func extractPodGeneratedResourceClaimChange(newPod *v1.Pod, oldPod *v1.Pod) ActionType {
if !resourceclaim.PodStatusEqual(newPod.Status.ResourceClaimStatuses, oldPod.Status.ResourceClaimStatuses) {
return &PodGeneratedResourceClaimChange
return UpdatePodGeneratedResourceClaim
}
return nil
return none
}
// NodeSchedulingPropertiesChange interprets the update of a node and returns corresponding UpdateNodeXYZ event(s).
@@ -248,41 +160,41 @@ func NodeSchedulingPropertiesChange(newNode *v1.Node, oldNode *v1.Node) (events
}
for _, fn := range nodeChangeExtracters {
if event := fn(newNode, oldNode); event != nil {
events = append(events, *event)
if event := fn(newNode, oldNode); event != none {
events = append(events, ClusterEvent{Resource: Node, ActionType: event})
}
}
return
}
type nodeChangeExtractor func(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent
type nodeChangeExtractor func(newNode *v1.Node, oldNode *v1.Node) ActionType
func extractNodeAllocatableChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent {
func extractNodeAllocatableChange(newNode *v1.Node, oldNode *v1.Node) ActionType {
if !equality.Semantic.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) {
return &NodeAllocatableChange
return UpdateNodeAllocatable
}
return nil
return none
}
func extractNodeLabelsChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent {
func extractNodeLabelsChange(newNode *v1.Node, oldNode *v1.Node) ActionType {
if isLabelChanged(newNode.GetLabels(), oldNode.GetLabels()) {
return &NodeLabelChange
return UpdateNodeLabel
}
return nil
return none
}
func isLabelChanged(newLabels map[string]string, oldLabels map[string]string) bool {
return !equality.Semantic.DeepEqual(newLabels, oldLabels)
}
func extractNodeTaintsChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent {
func extractNodeTaintsChange(newNode *v1.Node, oldNode *v1.Node) ActionType {
if !equality.Semantic.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints) {
return &NodeTaintChange
return UpdateNodeTaint
}
return nil
return none
}
func extractNodeConditionsChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent {
func extractNodeConditionsChange(newNode *v1.Node, oldNode *v1.Node) ActionType {
strip := func(conditions []v1.NodeCondition) map[v1.NodeConditionType]v1.ConditionStatus {
conditionStatuses := make(map[v1.NodeConditionType]v1.ConditionStatus, len(conditions))
for i := range conditions {
@@ -291,21 +203,22 @@ func extractNodeConditionsChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEve
return conditionStatuses
}
if !equality.Semantic.DeepEqual(strip(oldNode.Status.Conditions), strip(newNode.Status.Conditions)) {
return &NodeConditionChange
return UpdateNodeCondition
}
return nil
return none
}
func extractNodeSpecUnschedulableChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent {
func extractNodeSpecUnschedulableChange(newNode *v1.Node, oldNode *v1.Node) ActionType {
if newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable && !newNode.Spec.Unschedulable {
return &NodeSpecUnschedulableChange
// TODO: create UpdateNodeSpecUnschedulable ActionType
return UpdateNodeTaint
}
return nil
return none
}
func extractNodeAnnotationsChange(newNode *v1.Node, oldNode *v1.Node) *ClusterEvent {
func extractNodeAnnotationsChange(newNode *v1.Node, oldNode *v1.Node) ActionType {
if !equality.Semantic.DeepEqual(oldNode.GetAnnotations(), newNode.GetAnnotations()) {
return &NodeAnnotationChange
return UpdateNodeAnnotation
}
return nil
return none
}

View File

@@ -21,6 +21,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -59,7 +60,7 @@ func TestNodeAllocatableChange(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.oldAllocatable}}
newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.newAllocatable}}
changed := extractNodeAllocatableChange(newNode, oldNode) != nil
changed := extractNodeAllocatableChange(newNode, oldNode) != none
if changed != test.changed {
t.Errorf("nodeAllocatableChanged should be %t, got %t", test.changed, changed)
}
@@ -92,7 +93,7 @@ func TestNodeLabelsChange(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.oldLabels}}
newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.newLabels}}
changed := extractNodeLabelsChange(newNode, oldNode) != nil
changed := extractNodeLabelsChange(newNode, oldNode) != none
if changed != test.changed {
t.Errorf("Test case %q failed: should be %t, got %t", test.name, test.changed, changed)
}
@@ -124,7 +125,7 @@ func TestNodeTaintsChange(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.oldTaints}}
newNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.newTaints}}
changed := extractNodeTaintsChange(newNode, oldNode) != nil
changed := extractNodeTaintsChange(newNode, oldNode) != none
if changed != test.changed {
t.Errorf("Test case %q failed: should be %t, not %t", test.name, test.changed, changed)
}
@@ -179,7 +180,7 @@ func TestNodeConditionsChange(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.oldConditions}}
newNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.newConditions}}
changed := extractNodeConditionsChange(newNode, oldNode) != nil
changed := extractNodeConditionsChange(newNode, oldNode) != none
if changed != test.changed {
t.Errorf("Test case %q failed: should be %t, got %t", test.name, test.changed, changed)
}
@@ -204,7 +205,7 @@ func TestNodeSchedulingPropertiesChange(t *testing.T) {
name: "only node spec unavailable changed",
newNode: st.MakeNode().Unschedulable(false).Obj(),
oldNode: st.MakeNode().Unschedulable(true).Obj(),
wantEvents: []ClusterEvent{NodeSpecUnschedulableChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeTaint}},
},
{
name: "only node allocatable changed",
@@ -218,13 +219,13 @@ func TestNodeSchedulingPropertiesChange(t *testing.T) {
v1.ResourceMemory: "100m",
v1.ResourceName("example.com/foo"): "2"},
).Obj(),
wantEvents: []ClusterEvent{NodeAllocatableChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeAllocatable}},
},
{
name: "only node label changed",
newNode: st.MakeNode().Label("foo", "bar").Obj(),
oldNode: st.MakeNode().Label("foo", "fuz").Obj(),
wantEvents: []ClusterEvent{NodeLabelChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeLabel}},
},
{
name: "only node taint changed",
@@ -234,13 +235,13 @@ func TestNodeSchedulingPropertiesChange(t *testing.T) {
oldNode: st.MakeNode().Taints([]v1.Taint{
{Key: v1.TaintNodeUnschedulable, Value: "foo", Effect: v1.TaintEffectNoSchedule},
}).Obj(),
wantEvents: []ClusterEvent{NodeTaintChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeTaint}},
},
{
name: "only node annotation changed",
newNode: st.MakeNode().Annotation("foo", "bar").Obj(),
oldNode: st.MakeNode().Annotation("foo", "fuz").Obj(),
wantEvents: []ClusterEvent{NodeAnnotationChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeAnnotation}},
},
{
name: "only node condition changed",
@@ -251,7 +252,7 @@ func TestNodeSchedulingPropertiesChange(t *testing.T) {
"Ready",
"Ready",
).Obj(),
wantEvents: []ClusterEvent{NodeConditionChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeCondition}},
},
{
name: "both node label and node taint changed",
@@ -263,13 +264,13 @@ func TestNodeSchedulingPropertiesChange(t *testing.T) {
oldNode: st.MakeNode().Taints([]v1.Taint{
{Key: v1.TaintNodeUnschedulable, Value: "foo", Effect: v1.TaintEffectNoSchedule},
}).Obj(),
wantEvents: []ClusterEvent{NodeLabelChange, NodeTaintChange},
wantEvents: []ClusterEvent{{Resource: Node, ActionType: UpdateNodeLabel}, {Resource: Node, ActionType: UpdateNodeTaint}},
},
}
for _, tc := range testCases {
gotEvents := NodeSchedulingPropertiesChange(tc.newNode, tc.oldNode)
if diff := cmp.Diff(tc.wantEvents, gotEvents); diff != "" {
if diff := cmp.Diff(tc.wantEvents, gotEvents, cmpopts.EquateComparable(ClusterEvent{})); diff != "" {
t.Errorf("unexpected event (-want, +got):\n%s", diff)
}
}
@@ -354,85 +355,94 @@ func Test_podSchedulingPropertiesChange(t *testing.T) {
draDisabled bool
want []ClusterEvent
}{
{
name: "assigned pod is updated",
newPod: st.MakePod().Label("foo", "bar").Node("node").Obj(),
oldPod: st.MakePod().Label("foo", "bar2").Node("node").Obj(),
want: []ClusterEvent{{Resource: assignedPod, ActionType: UpdatePodLabel}},
},
{
name: "only label is updated",
newPod: st.MakePod().Label("foo", "bar").Obj(),
oldPod: st.MakePod().Label("foo", "bar2").Obj(),
want: []ClusterEvent{PodLabelChange},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodLabel}},
},
{
name: "pod's resource request is scaled down",
oldPod: podWithBigRequest,
newPod: podWithSmallRequest,
want: []ClusterEvent{PodRequestScaledDown},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodScaleDown}},
},
{
name: "pod's resource request is scaled up",
oldPod: podWithSmallRequest,
newPod: podWithBigRequest,
want: []ClusterEvent{assignedPodOtherUpdate},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: updatePodOther}},
},
{
name: "both pod's resource request and label are updated",
oldPod: podWithBigRequest,
newPod: podWithSmallRequestAndLabel,
want: []ClusterEvent{PodLabelChange, PodRequestScaledDown},
want: []ClusterEvent{
{Resource: unschedulablePod, ActionType: UpdatePodLabel},
{Resource: unschedulablePod, ActionType: UpdatePodScaleDown},
},
},
{
name: "untracked properties of pod is updated",
newPod: st.MakePod().Annotation("foo", "bar").Obj(),
oldPod: st.MakePod().Annotation("foo", "bar2").Obj(),
want: []ClusterEvent{assignedPodOtherUpdate},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: updatePodOther}},
},
{
name: "scheduling gate is eliminated",
newPod: st.MakePod().SchedulingGates([]string{}).Obj(),
oldPod: st.MakePod().SchedulingGates([]string{"foo"}).Obj(),
want: []ClusterEvent{PodSchedulingGateEliminatedChange},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodSchedulingGatesEliminated}},
},
{
name: "scheduling gate is removed, but not completely eliminated",
newPod: st.MakePod().SchedulingGates([]string{"foo"}).Obj(),
oldPod: st.MakePod().SchedulingGates([]string{"foo", "bar"}).Obj(),
want: []ClusterEvent{assignedPodOtherUpdate},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: updatePodOther}},
},
{
name: "pod's tolerations are updated",
newPod: st.MakePod().Toleration("key").Toleration("key2").Obj(),
oldPod: st.MakePod().Toleration("key").Obj(),
want: []ClusterEvent{PodTolerationChange},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodTolerations}},
},
{
name: "pod claim statuses change, feature disabled",
draDisabled: true,
newPod: st.MakePod().ResourceClaimStatuses(claimStatusA).Obj(),
oldPod: st.MakePod().Obj(),
want: []ClusterEvent{assignedPodOtherUpdate},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: updatePodOther}},
},
{
name: "pod claim statuses change, feature enabled",
newPod: st.MakePod().ResourceClaimStatuses(claimStatusA).Obj(),
oldPod: st.MakePod().Obj(),
want: []ClusterEvent{PodGeneratedResourceClaimChange},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodGeneratedResourceClaim}},
},
{
name: "pod claim statuses swapped",
newPod: st.MakePod().ResourceClaimStatuses(claimStatusA, claimStatusB).Obj(),
oldPod: st.MakePod().ResourceClaimStatuses(claimStatusB, claimStatusA).Obj(),
want: []ClusterEvent{PodGeneratedResourceClaimChange},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodGeneratedResourceClaim}},
},
{
name: "pod claim statuses extended",
newPod: st.MakePod().ResourceClaimStatuses(claimStatusA, claimStatusB).Obj(),
oldPod: st.MakePod().ResourceClaimStatuses(claimStatusA).Obj(),
want: []ClusterEvent{PodGeneratedResourceClaimChange},
want: []ClusterEvent{{Resource: unschedulablePod, ActionType: UpdatePodGeneratedResourceClaim}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicResourceAllocation, !tt.draDisabled)
got := PodSchedulingPropertiesChange(tt.newPod, tt.oldPod)
if diff := cmp.Diff(tt.want, got); diff != "" {
if diff := cmp.Diff(tt.want, got, cmpopts.EquateComparable(ClusterEvent{})); diff != "" {
t.Errorf("unexpected event is returned from podSchedulingPropertiesChange (-want, +got):\n%s", diff)
}
})

View File

@@ -23,6 +23,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
@@ -1204,7 +1205,7 @@ func TestEventsToRegister(t *testing.T) {
for i := range actualClusterEvents {
actualClusterEvents[i].QueueingHintFn = nil
}
if diff := cmp.Diff(test.expectedClusterEvents, actualClusterEvents); diff != "" {
if diff := cmp.Diff(test.expectedClusterEvents, actualClusterEvents, cmpopts.EquateComparable(framework.ClusterEvent{})); diff != "" {
t.Error("Cluster Events doesn't match extected events (-expected +actual):\n", diff)
}
})

View File

@@ -45,6 +45,9 @@ var generation int64
type ActionType int64
// Constants for ActionTypes.
// Note: When you add a new ActionType, you must update the following:
// - The list of basic, podOnly, and nodeOnly.
// - String() method.
const (
Add ActionType = 1 << iota
Delete
@@ -86,10 +89,59 @@ const (
// Use the general Update type if you don't either know or care the specific sub-Update type to use.
Update = UpdateNodeAllocatable | UpdateNodeLabel | UpdateNodeTaint | UpdateNodeCondition | UpdateNodeAnnotation | UpdatePodLabel | UpdatePodScaleDown | UpdatePodTolerations | UpdatePodSchedulingGatesEliminated | UpdatePodGeneratedResourceClaim | updatePodOther
// none is a special ActionType that is only used internally.
none ActionType = 0
)
// GVK is short for group/version/kind, which can uniquely represent a particular API resource.
type GVK string
var (
// basicActionTypes is a list of basicActionTypes ActionTypes.
basicActionTypes = []ActionType{Add, Delete, Update}
// podActionTypes is a list of ActionTypes that are only applicable for Pod events.
podActionTypes = []ActionType{UpdatePodLabel, UpdatePodScaleDown, UpdatePodTolerations, UpdatePodSchedulingGatesEliminated, UpdatePodGeneratedResourceClaim}
// nodeActionTypes is a list of ActionTypes that are only applicable for Node events.
nodeActionTypes = []ActionType{UpdateNodeAllocatable, UpdateNodeLabel, UpdateNodeTaint, UpdateNodeCondition, UpdateNodeAnnotation}
)
func (a ActionType) String() string {
switch a {
case Add:
return "Add"
case Delete:
return "Delete"
case UpdateNodeAllocatable:
return "UpdateNodeAllocatable"
case UpdateNodeLabel:
return "UpdateNodeLabel"
case UpdateNodeTaint:
return "UpdateNodeTaint"
case UpdateNodeCondition:
return "UpdateNodeCondition"
case UpdateNodeAnnotation:
return "UpdateNodeAnnotation"
case UpdatePodLabel:
return "UpdatePodLabel"
case UpdatePodScaleDown:
return "UpdatePodScaleDown"
case UpdatePodTolerations:
return "UpdatePodTolerations"
case UpdatePodSchedulingGatesEliminated:
return "UpdatePodSchedulingGatesEliminated"
case UpdatePodGeneratedResourceClaim:
return "UpdatePodGeneratedResourceClaim"
case updatePodOther:
return "Update"
case All:
return "All"
case Update:
return "Update"
}
// Shouldn't reach here.
return ""
}
// EventResource is basically short for group/version/kind, which can uniquely represent a particular API resource.
type EventResource string
// Constants for GVKs.
//
@@ -115,35 +167,60 @@ const (
// the previous rejection from noderesources plugin can be resolved.
// this plugin would implement QueueingHint for Pod/Update event
// that returns Queue when such label changes are made in unscheduled Pods.
Pod GVK = "Pod"
Pod EventResource = "Pod"
// These assignedPod and unschedulablePod are internal resources that are used to represent the type of Pod.
// We don't expose them to the plugins deliberately because we don't publish Pod events with unschedulable Pods in the first place.
assignedPod EventResource = "AssignedPod"
unschedulablePod EventResource = "UnschedulablePod"
// A note about NodeAdd event and UpdateNodeTaint event:
// NodeAdd QueueingHint isn't always called because of the internal feature called preCheck.
// When QHint is disabled, NodeAdd often isn't worked expectedly because of the internal feature called preCheck.
// It's definitely not something expected for plugin developers,
// and registering UpdateNodeTaint event is the only mitigation for now.
// So, kube-scheduler registers UpdateNodeTaint event for plugins that has NodeAdded event, but don't have UpdateNodeTaint event.
// It has a bad impact for the requeuing efficiency though, a lot better than some Pods being stuck in the
// unschedulable pod pool.
// This behavior will be removed when we remove the preCheck feature.
// This problematic preCheck feature is disabled when QHint is enabled,
// and eventually will be removed along with QHint graduation.
// See: https://github.com/kubernetes/kubernetes/issues/110175
Node GVK = "Node"
PersistentVolume GVK = "PersistentVolume"
PersistentVolumeClaim GVK = "PersistentVolumeClaim"
CSINode GVK = "storage.k8s.io/CSINode"
CSIDriver GVK = "storage.k8s.io/CSIDriver"
CSIStorageCapacity GVK = "storage.k8s.io/CSIStorageCapacity"
StorageClass GVK = "storage.k8s.io/StorageClass"
ResourceClaim GVK = "ResourceClaim"
ResourceSlice GVK = "ResourceSlice"
DeviceClass GVK = "DeviceClass"
Node EventResource = "Node"
PersistentVolume EventResource = "PersistentVolume"
PersistentVolumeClaim EventResource = "PersistentVolumeClaim"
CSINode EventResource = "storage.k8s.io/CSINode"
CSIDriver EventResource = "storage.k8s.io/CSIDriver"
CSIStorageCapacity EventResource = "storage.k8s.io/CSIStorageCapacity"
StorageClass EventResource = "storage.k8s.io/StorageClass"
ResourceClaim EventResource = "resource.k8s.io/ResourceClaim"
ResourceSlice EventResource = "resource.k8s.io/ResourceSlice"
DeviceClass EventResource = "resource.k8s.io/DeviceClass"
// WildCard is a special GVK to match all resources.
// WildCard is a special EventResource to match all resources.
// e.g., If you register `{Resource: "*", ActionType: All}` in EventsToRegister,
// all coming clusterEvents will be admitted. Be careful to register it, it will
// increase the computing pressure in requeueing unless you really need it.
//
// Meanwhile, if the coming clusterEvent is a wildcard one, all pods
// will be moved from unschedulablePod pool to activeQ/backoffQ forcibly.
WildCard GVK = "*"
WildCard EventResource = "*"
)
var (
// allResources is a list of all resources.
allResources = []EventResource{
Pod,
assignedPod,
unschedulablePod,
Node,
PersistentVolume,
PersistentVolumeClaim,
CSINode,
CSIDriver,
CSIStorageCapacity,
StorageClass,
ResourceClaim,
DeviceClass,
}
)
type ClusterEventWithHint struct {
@@ -194,10 +271,42 @@ func (s QueueingHint) String() string {
// Resource represents the standard API resources such as Pod, Node, etc.
// ActionType denotes the specific change such as Add, Update or Delete.
type ClusterEvent struct {
Resource GVK
Resource EventResource
ActionType ActionType
// Label describes this cluster event, only used in logging and metrics.
Label string
// label describes this cluster event.
// It's an optional field to control String(), which is used in logging and metrics.
// Normally, it's not necessary to set this field; only used for special events like UnschedulableTimeout.
label string
}
// Label is used for logging and metrics.
func (ce ClusterEvent) Label() string {
if ce.label != "" {
return ce.label
}
return fmt.Sprintf("%v%v", ce.Resource, ce.ActionType)
}
// AllClusterEventLabels returns all possible cluster event labels given to the metrics.
func AllClusterEventLabels() []string {
labels := []string{EventUnschedulableTimeout.Label()}
for _, r := range allResources {
for _, a := range basicActionTypes {
labels = append(labels, ClusterEvent{Resource: r, ActionType: a}.Label())
}
if r == Pod {
for _, a := range podActionTypes {
labels = append(labels, ClusterEvent{Resource: r, ActionType: a}.Label())
}
} else if r == Node {
for _, a := range nodeActionTypes {
labels = append(labels, ClusterEvent{Resource: r, ActionType: a}.Label())
}
}
}
return labels
}
// IsWildCard returns true if ClusterEvent follows WildCard semantics
@@ -212,8 +321,19 @@ func (ce ClusterEvent) IsWildCard() bool {
// Note: we have a special case here when the coming event is a wildcard event,
// it will force all Pods to move to activeQ/backoffQ,
// but we take it as an unmatched event unless the ce is also a wildcard one.
func (ce ClusterEvent) Match(event ClusterEvent) bool {
return ce.IsWildCard() || (ce.Resource == WildCard || ce.Resource == event.Resource) && ce.ActionType&event.ActionType != 0
func (ce ClusterEvent) Match(incomingEvent ClusterEvent) bool {
return ce.IsWildCard() || ce.Resource.match(incomingEvent.Resource) && ce.ActionType&incomingEvent.ActionType != 0
}
// match returns true if the resource is matched with the coming resource.
func (r EventResource) match(resource EventResource) bool {
// WildCard matches all resources
return r == WildCard ||
// Exact match
r == resource ||
// Pod matches assignedPod and unscheduledPod.
// (assignedPod and unscheduledPod aren't exposed and hence only used for incoming events and never used in EventsToRegister)
r == Pod && (resource == assignedPod || resource == unschedulablePod)
}
func UnrollWildCardResource() []ClusterEventWithHint {

View File

@@ -365,11 +365,11 @@ func (sched *Scheduler) handleBindingCycleError(
// It's intentional to "defer" this operation; otherwise MoveAllToActiveOrBackoffQueue() would
// add this event to in-flight events and thus move the assumed pod to backoffQ anyways if the plugins don't have appropriate QueueingHint.
if status.IsRejected() {
defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.AssignedPodDelete, assumedPod, nil, func(pod *v1.Pod) bool {
defer sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, assumedPod, nil, func(pod *v1.Pod) bool {
return assumedPod.UID != pod.UID
})
} else {
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.AssignedPodDelete, assumedPod, nil, nil)
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, framework.EventAssignedPodDelete, assumedPod, nil, nil)
}
}

View File

@@ -547,8 +547,8 @@ func buildExtenders(logger klog.Logger, extenders []schedulerapi.Extender, profi
type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time)
func unionedGVKs(queueingHintsPerProfile internalqueue.QueueingHintMapPerProfile) map[framework.GVK]framework.ActionType {
gvkMap := make(map[framework.GVK]framework.ActionType)
func unionedGVKs(queueingHintsPerProfile internalqueue.QueueingHintMapPerProfile) map[framework.EventResource]framework.ActionType {
gvkMap := make(map[framework.EventResource]framework.ActionType)
for _, queueingHints := range queueingHintsPerProfile {
for evt := range queueingHints {
if _, ok := gvkMap[evt.Resource]; ok {

View File

@@ -780,7 +780,7 @@ func Test_UnionedGVKs(t *testing.T) {
tests := []struct {
name string
plugins schedulerapi.PluginSet
want map[framework.GVK]framework.ActionType
want map[framework.EventResource]framework.ActionType
enableInPlacePodVerticalScaling bool
enableSchedulerQueueingHints bool
}{
@@ -794,7 +794,7 @@ func Test_UnionedGVKs(t *testing.T) {
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Pod: framework.All,
framework.Node: framework.All,
framework.CSINode: framework.All,
@@ -817,7 +817,7 @@ func Test_UnionedGVKs(t *testing.T) {
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Node: framework.Add | framework.UpdateNodeTaint, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
},
@@ -831,7 +831,7 @@ func Test_UnionedGVKs(t *testing.T) {
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Pod: framework.Add,
},
},
@@ -846,7 +846,7 @@ func Test_UnionedGVKs(t *testing.T) {
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Pod: framework.Add,
framework.Node: framework.Add | framework.UpdateNodeTaint, // When Node/Add is registered, Node/UpdateNodeTaint is automatically registered.
},
@@ -861,12 +861,12 @@ func Test_UnionedGVKs(t *testing.T) {
},
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[framework.GVK]framework.ActionType{},
want: map[framework.EventResource]framework.ActionType{},
},
{
name: "plugins with default profile (No feature gate enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Pod: framework.Add | framework.UpdatePodLabel | framework.Delete,
framework.Node: framework.Add | framework.UpdateNodeAllocatable | framework.UpdateNodeLabel | framework.UpdateNodeTaint | framework.Delete,
framework.CSINode: framework.All - framework.Delete,
@@ -880,7 +880,7 @@ func Test_UnionedGVKs(t *testing.T) {
{
name: "plugins with default profile (InPlacePodVerticalScaling: enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Pod: framework.Add | framework.UpdatePodLabel | framework.UpdatePodScaleDown | framework.Delete,
framework.Node: framework.Add | framework.UpdateNodeAllocatable | framework.UpdateNodeLabel | framework.UpdateNodeTaint | framework.Delete,
framework.CSINode: framework.All - framework.Delete,
@@ -895,7 +895,7 @@ func Test_UnionedGVKs(t *testing.T) {
{
name: "plugins with default profile (queueingHint/InPlacePodVerticalScaling: enabled)",
plugins: schedulerapi.PluginSet{Enabled: defaults.PluginsV1.MultiPoint.Enabled},
want: map[framework.GVK]framework.ActionType{
want: map[framework.EventResource]framework.ActionType{
framework.Pod: framework.Add | framework.UpdatePodLabel | framework.UpdatePodScaleDown | framework.UpdatePodTolerations | framework.UpdatePodSchedulingGatesEliminated | framework.Delete,
framework.Node: framework.Add | framework.UpdateNodeAllocatable | framework.UpdateNodeLabel | framework.UpdateNodeTaint | framework.Delete,
framework.CSINode: framework.All - framework.Delete,

View File

@@ -191,6 +191,13 @@ func TestSchedulingGates(t *testing.T) {
// TestCoreResourceEnqueue verify Pods failed by in-tree default plugins can be
// moved properly upon their registered events.
func TestCoreResourceEnqueue(t *testing.T) {
// These resources are unexported from the framework intentionally
// because they're only used internally for the metric labels/logging.
// We need to declare them here to use them in the test
// because this test is used the metric labels.
var assignedPod framework.EventResource = "AssignedPod"
var unschedulablePod framework.EventResource = "UnschedulablePod"
tests := []struct {
name string
// initialNodes is the list of Nodes to be created at first.
@@ -235,7 +242,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAllocatableChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
},
@@ -267,8 +274,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
return nil, fmt.Errorf("failed to remove taints off the node: %w", err)
}
return map[framework.ClusterEvent]uint64{
framework.NodeAdd: 1,
framework.NodeTaintChange: 1}, nil
{Resource: framework.Node, ActionType: framework.Add}: 1,
{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
},
@@ -288,7 +295,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node1").Label("group", "b").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -308,7 +315,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("node1").Label("group", "a").Label("node", "fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
@@ -329,7 +336,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, st.MakeNode().Name("fake-node2").Label("group", "b").Obj(), metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -347,7 +354,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Container("image").Toleration("taint-key").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the pod: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PodTolerationChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: unschedulablePod, ActionType: framework.UpdatePodTolerations}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -366,7 +373,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the Node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeTaintChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
@@ -385,7 +392,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, st.MakeNode().Name("fake-node2").Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj(), metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create the Node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -402,7 +409,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Container("image").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the pod: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PodRequestScaledDown: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: unschedulablePod, ActionType: framework.UpdatePodScaleDown}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -422,7 +429,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
return nil, fmt.Errorf("failed to delete pod1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.AssignedPodDelete: 1}, nil
return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
},
@@ -442,7 +449,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create a new node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -461,7 +468,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update fake-node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAllocatableChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -480,7 +487,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "4", v1.ResourceMemory: "4000"}).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update fake-node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAllocatableChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
@@ -500,7 +507,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "5"}).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update fake-node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAllocatableChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
@@ -524,7 +531,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(testCtx.Ctx, st.MakeNode().Name("fake-node1").Capacity(map[v1.ResourceName]string{v1.ResourcePods: "3"}).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update fake-node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAllocatableChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -540,7 +547,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("key", "val").Container("image").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the pod: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PodLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: unschedulablePod, ActionType: framework.UpdatePodLabel}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
// This behaviour is only true when enabling QHint
@@ -599,7 +606,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("anti2", "anti2").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pod1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PodLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: assignedPod, ActionType: framework.UpdatePodLabel}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -621,7 +628,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("aaa", "bbb").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pod1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PodLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: assignedPod, ActionType: framework.UpdatePodLabel}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -641,7 +648,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node").Label("zone", "zone1").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pod1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
@@ -664,7 +671,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
return nil, fmt.Errorf("failed to delete Pod: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.AssignedPodDelete: 1}, nil
return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
},
wantRequeuedPods: sets.New("pod3"),
enableSchedulingQueueHint: []bool{true},
@@ -685,7 +692,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create a new node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -702,7 +709,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node").Unschedulable(false).Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeSpecUnschedulableChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -719,7 +726,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create a new node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
},
@@ -735,7 +742,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create a new node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
// This test case is valid only when QHint is enabled
@@ -762,7 +769,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
return nil, fmt.Errorf("failed to create Pod %q: %w", pod.Name, err)
}
return map[framework.ClusterEvent]uint64{framework.AssignedPodAdd: 1}, nil
return map[framework.ClusterEvent]uint64{framework.EventAssignedPodAdd: 1}, nil
},
wantRequeuedPods: sets.New("pod3"),
enableSchedulingQueueHint: []bool{true},
@@ -786,7 +793,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Update(testCtx.Ctx, st.MakePod().Name("pod1").Label("key3", "val").Container("image").Node("fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update the pod: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.AssignedPodUpdate: 1}, nil
return map[framework.ClusterEvent]uint64{framework.EventAssignedPodUpdate: 1}, nil
},
wantRequeuedPods: sets.New("pod3"),
enableSchedulingQueueHint: []bool{true},
@@ -809,7 +816,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
return nil, fmt.Errorf("failed to delete Pod: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.AssignedPodDelete: 1}, nil
return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
},
wantRequeuedPods: sets.New("pod3"),
enableSchedulingQueueHint: []bool{true},
@@ -836,7 +843,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create a new node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod3"),
enableSchedulingQueueHint: []bool{true},
@@ -863,7 +870,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeLabelChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
},
wantRequeuedPods: sets.New("pod4"),
enableSchedulingQueueHint: []bool{true},
@@ -889,7 +896,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node2", metav1.DeleteOptions{}); err != nil {
return nil, fmt.Errorf("failed to update node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeDelete: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Delete}: 1}, nil
},
wantRequeuedPods: sets.New("pod4"),
enableSchedulingQueueHint: []bool{true},
@@ -915,7 +922,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node2", metav1.DeleteOptions{}); err != nil {
return nil, fmt.Errorf("failed to update node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeDelete: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Delete}: 1}, nil
},
wantRequeuedPods: sets.New("pod3", "pod4"),
enableSchedulingQueueHint: []bool{false},
@@ -943,7 +950,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update node: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.NodeTaintChange: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: 1}, nil
},
wantRequeuedPods: sets.New("pod4"),
},
@@ -990,7 +997,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Create(testCtx.Ctx, pv2, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create pv2: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PvAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -1045,7 +1052,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv2, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pv2: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PvUpdate: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -1096,7 +1103,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc2, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to add pvc2: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PvcAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -1153,7 +1160,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Update(testCtx.Ctx, pvc2, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pvc2: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PvcUpdate: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -1201,7 +1208,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Create(testCtx.Ctx, sc1, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create sc1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.StorageClassAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -1257,7 +1264,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv2, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pv2: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PvUpdate: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
@@ -1288,7 +1295,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc2, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to add pvc1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.PvcAdd: 1}, nil
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
@@ -1324,7 +1331,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
return nil, fmt.Errorf("failed to delete pod1: %w", err)
}
return map[framework.ClusterEvent]uint64{framework.AssignedPodDelete: 1}, nil
return map[framework.ClusterEvent]uint64{framework.EventAssignedPodDelete: 1}, nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
@@ -1428,14 +1435,14 @@ func TestCoreResourceEnqueue(t *testing.T) {
if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
for e, count := range wantTriggeredEvents {
vec, err := testutil.GetHistogramVecFromGatherer(legacyregistry.DefaultGatherer, "scheduler_event_handling_duration_seconds", map[string]string{
"event": string(e.Label),
"event": string(e.Label()),
})
if err != nil {
return false, err
}
if vec.GetAggregatedSampleCount() != count {
t.Logf("Expected %d sample for event %s, got %d", count, e.Label, vec.GetAggregatedSampleCount())
t.Logf("Expected %d sample for event %s, got %d", count, e.Label(), vec.GetAggregatedSampleCount())
return false, nil
}
}

View File

@@ -167,13 +167,13 @@ var (
},
{
label: eventLabelName,
values: clusterEventsToLabels(schedframework.AllEvents),
values: schedframework.AllClusterEventLabels(),
},
},
"scheduler_event_handling_duration_seconds": {
{
label: eventLabelName,
values: clusterEventsToLabels(schedframework.AllEvents),
values: schedframework.AllClusterEventLabels(),
},
},
},
@@ -204,14 +204,6 @@ var (
}
)
func clusterEventsToLabels(events []schedframework.ClusterEvent) []string {
labels := make([]string, 0, len(events))
for _, event := range events {
labels = append(labels, event.Label)
}
return labels
}
// testCase defines a set of test cases that intends to test the performance of
// similar workloads of varying sizes with shared overall settings such as
// feature gates and metrics collected.
@@ -1256,7 +1248,7 @@ func compareMetricWithThreshold(items []DataItem, threshold float64, metricSelec
}
func checkEmptyInFlightEvents() error {
labels := append(clusterEventsToLabels(schedframework.AllEvents), metrics.PodPoppedInFlightEvent)
labels := append(schedframework.AllClusterEventLabels(), metrics.PodPoppedInFlightEvent)
for _, label := range labels {
value, err := testutil.GetGaugeMetricValue(metrics.InFlightEvents.WithLabelValues(label))
if err != nil {