Revert "Pods pending due to insufficient OIR should get scheduled once sufficient OIR becomes available."

This commit is contained in:
Dawn Chen
2017-03-06 14:27:17 -08:00
committed by GitHub
parent 3a1db2f76b
commit 60758f3fff
4 changed files with 37 additions and 132 deletions

View File

@@ -468,30 +468,6 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *s
return true, nil, nil
}
// Returns a *schedulercache.Resource that covers the largest width in each
// resource dimension. Because init-containers run sequentially, we collect the
// max in each dimension iteratively. In contrast, we sum the resource vectors
// for regular containers since they run simultaneously.
//
// Example:
//
// Pod:
// InitContainers
// IC1:
// CPU: 2
// Memory: 1G
// IC2:
// CPU: 2
// Memory: 3G
// Containers
// C1:
// CPU: 2
// Memory: 1G
// C2:
// CPU: 1
// Memory: 1G
//
// Result: CPU: 3, Memory: 3G
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
result := schedulercache.Resource{}
for _, container := range pod.Spec.Containers {
@@ -529,8 +505,10 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
default:
if v1.IsOpaqueIntResourceName(rName) {
value := rQuantity.Value()
// Ensure the opaque resource map is initialized in the result.
result.AddOpaque(rName, int64(0))
if value > result.OpaqueIntResources[rName] {
result.SetOpaque(rName, value)
result.OpaqueIntResources[rName] = value
}
}
}