mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #112146 from kerthcet/feat/move-schedulerError-to-api
Move constant schedulerError in scheduler to v1 package
This commit is contained in:
		@@ -45,8 +45,6 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// SchedulerError is the reason recorded for events when an error occurs during scheduling a pod.
 | 
			
		||||
	SchedulerError = "SchedulerError"
 | 
			
		||||
	// Percentage of plugin metrics to be sampled.
 | 
			
		||||
	pluginMetricsSamplePercent = 10
 | 
			
		||||
	// minFeasibleNodesToFind is the minimum number of nodes that would be scored
 | 
			
		||||
@@ -151,7 +149,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
 | 
			
		||||
			nominatingInfo = clearNominatedNode
 | 
			
		||||
			klog.ErrorS(err, "Error selecting node for pod", "pod", klog.KObj(pod))
 | 
			
		||||
			metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
 | 
			
		||||
			reason = SchedulerError
 | 
			
		||||
			reason = v1.PodReasonSchedulerError
 | 
			
		||||
		}
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, podInfo, err, reason, nominatingInfo)
 | 
			
		||||
		return ScheduleResult{}, nil
 | 
			
		||||
@@ -170,7 +168,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
 | 
			
		||||
		// This relies on the fact that Error will check if the pod has been bound
 | 
			
		||||
		// to a node and if so will not add it back to the unscheduled pods queue
 | 
			
		||||
		// (otherwise this would cause an infinite loop).
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, err, SchedulerError, clearNominatedNode)
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, err, v1.PodReasonSchedulerError, clearNominatedNode)
 | 
			
		||||
		return ScheduleResult{}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -182,7 +180,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
 | 
			
		||||
		if forgetErr := sched.Cache.ForgetPod(assumedPod); forgetErr != nil {
 | 
			
		||||
			klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed")
 | 
			
		||||
		}
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, sts.AsError(), SchedulerError, clearNominatedNode)
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, sts.AsError(), v1.PodReasonSchedulerError, clearNominatedNode)
 | 
			
		||||
		return ScheduleResult{}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -195,7 +193,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
 | 
			
		||||
			reason = v1.PodReasonUnschedulable
 | 
			
		||||
		} else {
 | 
			
		||||
			metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
 | 
			
		||||
			reason = SchedulerError
 | 
			
		||||
			reason = v1.PodReasonSchedulerError
 | 
			
		||||
		}
 | 
			
		||||
		// One of the plugins returned status different than success or wait.
 | 
			
		||||
		fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
 | 
			
		||||
@@ -228,7 +226,7 @@ func (sched *Scheduler) bindingCycle(ctx context.Context, state *framework.Cycle
 | 
			
		||||
			reason = v1.PodReasonUnschedulable
 | 
			
		||||
		} else {
 | 
			
		||||
			metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
 | 
			
		||||
			reason = SchedulerError
 | 
			
		||||
			reason = v1.PodReasonSchedulerError
 | 
			
		||||
		}
 | 
			
		||||
		// trigger un-reserve plugins to clean up state associated with the reserved Pod
 | 
			
		||||
		fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
 | 
			
		||||
@@ -263,7 +261,7 @@ func (sched *Scheduler) bindingCycle(ctx context.Context, state *framework.Cycle
 | 
			
		||||
			// TODO(#103853): de-duplicate the logic.
 | 
			
		||||
			sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil)
 | 
			
		||||
		}
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, preBindStatus.AsError(), SchedulerError, clearNominatedNode)
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, preBindStatus.AsError(), v1.PodReasonSchedulerError, clearNominatedNode)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -280,7 +278,7 @@ func (sched *Scheduler) bindingCycle(ctx context.Context, state *framework.Cycle
 | 
			
		||||
			// TODO(#103853): de-duplicate the logic.
 | 
			
		||||
			sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil)
 | 
			
		||||
		}
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, fmt.Errorf("binding rejected: %w", err), SchedulerError, clearNominatedNode)
 | 
			
		||||
		sched.FailureHandler(ctx, fwk, assumedPodInfo, fmt.Errorf("binding rejected: %w", err), v1.PodReasonSchedulerError, clearNominatedNode)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
 | 
			
		||||
 
 | 
			
		||||
@@ -2665,6 +2665,10 @@ const (
 | 
			
		||||
	// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
 | 
			
		||||
	// can't schedule the pod right now, for example due to insufficient resources in the cluster.
 | 
			
		||||
	PodReasonUnschedulable = "Unschedulable"
 | 
			
		||||
 | 
			
		||||
	// PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens
 | 
			
		||||
	// during scheduling, for example due to nodeAffinity parsing errors.
 | 
			
		||||
	PodReasonSchedulerError = "SchedulerError"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// PodCondition contains details for the current condition of this pod.
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user