mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-03 11:48:15 +00:00
Merge pull request #47179 from ddysher/local-isolation-fix
Automatic merge from submit-queue (batch tested with PRs 47883, 47179, 46966, 47982, 47945)
Fix local isolation for pod requesting only overlay or scratch
**What this PR does / why we need it**:
Fix overlay resource predicates for pod with only overlay or scratch storage request.
E.g. the following pod can pass predicate even if overlay is only 512Gi.
```yaml
apiVersion: v1
kind: Pod
metadata:
name: pod
spec:
containers:
- name: nginx
image: nginx
resources:
requests:
storage.kubernetes.io/overlay: 1024Gi
```
similarly, following pod will also pass predicate
```yaml
apiVersion: v1
kind: Pod
metadata:
name: pod
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
emptyDir:
sizeLimit: 1024Gi
```
**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes https://github.com/kubernetes/kubernetes/issues/47798
**Special notes for your reviewer**:
**Release note**:
```release-note
```
@jingxu97 @vishh @dashpole
This commit is contained in:
@@ -584,7 +584,7 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
|
|||||||
// We couldn't parse metadata - fallback to computing it.
|
// We couldn't parse metadata - fallback to computing it.
|
||||||
podRequest = GetResourceRequest(pod)
|
podRequest = GetResourceRequest(pod)
|
||||||
}
|
}
|
||||||
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && len(podRequest.OpaqueIntResources) == 0 {
|
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.OpaqueIntResources) == 0 {
|
||||||
return len(predicateFails) == 0, predicateFails, nil
|
return len(predicateFails) == 0, predicateFails, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -435,7 +435,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
nodeInfo: schedulercache.NewNodeInfo(
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})),
|
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})),
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "due to init container scratch disk",
|
test: "due to container scratch disk",
|
||||||
reasons: []algorithm.PredicateFailureReason{
|
reasons: []algorithm.PredicateFailureReason{
|
||||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||||
NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20),
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20),
|
||||||
@@ -453,7 +453,17 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
nodeInfo: schedulercache.NewNodeInfo(
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "request exceeds allocatable",
|
test: "request exceeds allocatable overlay storage resource",
|
||||||
|
reasons: []algorithm.PredicateFailureReason{
|
||||||
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourcePod(schedulercache.Resource{StorageOverlay: 18}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
||||||
|
fits: false,
|
||||||
|
test: "request exceeds allocatable overlay storage resource",
|
||||||
reasons: []algorithm.PredicateFailureReason{
|
reasons: []algorithm.PredicateFailureReason{
|
||||||
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
|
||||||
},
|
},
|
||||||
@@ -470,6 +480,18 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourcePod(schedulercache.Resource{}),
|
||||||
|
emptyDirLimit: 25,
|
||||||
|
storageMedium: v1.StorageMediumDefault,
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
||||||
|
fits: false,
|
||||||
|
test: "storage scratchrequest exceeds allocatable",
|
||||||
|
reasons: []algorithm.PredicateFailureReason{
|
||||||
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
|
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
|
||||||
emptyDirLimit: 15,
|
emptyDirLimit: 15,
|
||||||
@@ -477,10 +499,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
nodeInfo: schedulercache.NewNodeInfo(
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "storage scratchrequest exceeds allocatable",
|
test: "pod fit with memory medium",
|
||||||
reasons: []algorithm.PredicateFailureReason{
|
|
||||||
NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user