mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 19:28:16 +00:00
This commit contains the following:
1. Scheduler bug-fix + scheduler-focussed E2E tests 2. Add cgroup v2 support for in-place pod resize 3. Enable full E2E pod resize test for containerd>=1.6.9 and EventedPLEG related changes. Co-Authored-By: Vinay Kulkarni <vskibum@gmail.com>
This commit is contained in:
committed by
vinay kulkarni
parent
f2bd94a0de
commit
7db339dba2
@@ -38,10 +38,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/heap"
|
||||
@@ -683,14 +685,47 @@ func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) {
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// isPodResourcesResizedDown returns true if a pod CPU and/or memory resize request has been
|
||||
// admitted by kubelet, is 'InProgress', and results in a net sizing down of updated resources.
|
||||
// It returns false if either CPU or memory resource is net resized up, or if no resize is in progress.
|
||||
func isPodResourcesResizedDown(pod *v1.Pod) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
// TODO(vinaykul,wangchen615,InPlacePodVerticalScaling): Fix this to determine when a
|
||||
// pod is truly resized down (might need oldPod if we cannot determine from Status alone)
|
||||
if pod.Status.Resize == v1.PodResizeStatusInProgress {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AssignedPodUpdated is called when a bound pod is updated. Change of labels
|
||||
// may make pending pods with matching affinity terms schedulable.
|
||||
func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) {
|
||||
p.lock.Lock()
|
||||
p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodUpdate)
|
||||
if isPodResourcesResizedDown(pod) {
|
||||
p.moveAllToActiveOrBackoffQueue(AssignedPodUpdate, nil)
|
||||
} else {
|
||||
p.movePodsToActiveOrBackoffQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod), AssignedPodUpdate)
|
||||
}
|
||||
p.lock.Unlock()
|
||||
}
|
||||
|
||||
// NOTE: this function assumes a lock has been acquired in the caller.
|
||||
// moveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ.
|
||||
// This function adds all pods and then signals the condition variable to ensure that
|
||||
// if Pop() is waiting for an item, it receives the signal after all the pods are in the
|
||||
// queue and the head is the highest priority pod.
|
||||
func (p *PriorityQueue) moveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) {
|
||||
unschedulablePods := make([]*framework.QueuedPodInfo, 0, len(p.unschedulablePods.podInfoMap))
|
||||
for _, pInfo := range p.unschedulablePods.podInfoMap {
|
||||
if preCheck == nil || preCheck(pInfo.Pod) {
|
||||
unschedulablePods = append(unschedulablePods, pInfo)
|
||||
}
|
||||
}
|
||||
p.movePodsToActiveOrBackoffQueue(unschedulablePods, event)
|
||||
}
|
||||
|
||||
// MoveAllToActiveOrBackoffQueue moves all pods from unschedulablePods to activeQ or backoffQ.
|
||||
// This function adds all pods and then signals the condition variable to ensure that
|
||||
// if Pop() is waiting for an item, it receives the signal after all the pods are in the
|
||||
@@ -698,13 +733,7 @@ func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) {
|
||||
func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
unschedulablePods := make([]*framework.QueuedPodInfo, 0, len(p.unschedulablePods.podInfoMap))
|
||||
for _, pInfo := range p.unschedulablePods.podInfoMap {
|
||||
if preCheck == nil || preCheck(pInfo.Pod) {
|
||||
unschedulablePods = append(unschedulablePods, pInfo)
|
||||
}
|
||||
}
|
||||
p.movePodsToActiveOrBackoffQueue(unschedulablePods, event)
|
||||
p.moveAllToActiveOrBackoffQueue(event, preCheck)
|
||||
}
|
||||
|
||||
// NOTE: this function assumes lock has been acquired in caller
|
||||
|
||||
Reference in New Issue
Block a user