mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-10-30 01:42:48 +00:00
Merge pull request #41870 from intelsdi-x/test-out-of-oir
Automatic merge from submit-queue (batch tested with PRs 31783, 41988, 42535, 42572, 41870) Pods pending due to insufficient OIR should get scheduled once sufficient OIR becomes available. This appears to be a regression since v1.5.0 in scheduler behavior for opaque integer resources, reported in https://github.com/kubernetes/kubernetes/issues/41861. - [X] Add failing e2e test to trigger the regression - [x] Restore previous behavior (pods pending due to insufficient OIR get scheduled once sufficient OIR becomes available.)
This commit is contained in:
@@ -468,6 +468,30 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// Returns a *schedulercache.Resource that covers the largest width in each
|
||||
// resource dimension. Because init-containers run sequentially, we collect the
|
||||
// max in each dimension iteratively. In contrast, we sum the resource vectors
|
||||
// for regular containers since they run simultaneously.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Pod:
|
||||
// InitContainers
|
||||
// IC1:
|
||||
// CPU: 2
|
||||
// Memory: 1G
|
||||
// IC2:
|
||||
// CPU: 2
|
||||
// Memory: 3G
|
||||
// Containers
|
||||
// C1:
|
||||
// CPU: 2
|
||||
// Memory: 1G
|
||||
// C2:
|
||||
// CPU: 1
|
||||
// Memory: 1G
|
||||
//
|
||||
// Result: CPU: 3, Memory: 3G
|
||||
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
||||
result := schedulercache.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@@ -505,10 +529,8 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
||||
default:
|
||||
if v1.IsOpaqueIntResourceName(rName) {
|
||||
value := rQuantity.Value()
|
||||
// Ensure the opaque resource map is initialized in the result.
|
||||
result.AddOpaque(rName, int64(0))
|
||||
if value > result.OpaqueIntResources[rName] {
|
||||
result.OpaqueIntResources[rName] = value
|
||||
result.SetOpaque(rName, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,11 +83,15 @@ func (r *Resource) ResourceList() v1.ResourceList {
|
||||
}
|
||||
|
||||
func (r *Resource) AddOpaque(name v1.ResourceName, quantity int64) {
|
||||
r.SetOpaque(name, r.OpaqueIntResources[name]+quantity)
|
||||
}
|
||||
|
||||
func (r *Resource) SetOpaque(name v1.ResourceName, quantity int64) {
|
||||
// Lazily allocate opaque integer resource map.
|
||||
if r.OpaqueIntResources == nil {
|
||||
r.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||
}
|
||||
r.OpaqueIntResources[name] += quantity
|
||||
r.OpaqueIntResources[name] = quantity
|
||||
}
|
||||
|
||||
// NewNodeInfo returns a ready to use empty NodeInfo object.
|
||||
@@ -333,7 +337,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||
n.allowedPodNumber = int(rQuant.Value())
|
||||
default:
|
||||
if v1.IsOpaqueIntResourceName(rName) {
|
||||
n.allocatableResource.AddOpaque(rName, rQuant.Value())
|
||||
n.allocatableResource.SetOpaque(rName, rQuant.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user