mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Local storage does not manage overlay any more
This commit is contained in:
		@@ -504,14 +504,6 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
 | 
				
			|||||||
		result.Add(container.Resources.Requests)
 | 
							result.Add(container.Resources.Requests)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Account for storage requested by emptydir volumes
 | 
					 | 
				
			||||||
	// If the storage medium is memory, should exclude the size
 | 
					 | 
				
			||||||
	for _, vol := range pod.Spec.Volumes {
 | 
					 | 
				
			||||||
		if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory {
 | 
					 | 
				
			||||||
			result.StorageScratch += vol.EmptyDir.SizeLimit.Value()
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// take max_resource(sum_pod, any_init_container)
 | 
						// take max_resource(sum_pod, any_init_container)
 | 
				
			||||||
	for _, container := range pod.Spec.InitContainers {
 | 
						for _, container := range pod.Spec.InitContainers {
 | 
				
			||||||
		for rName, rQuantity := range container.Resources.Requests {
 | 
							for rName, rQuantity := range container.Resources.Requests {
 | 
				
			||||||
@@ -520,6 +512,10 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
 | 
				
			|||||||
				if mem := rQuantity.Value(); mem > result.Memory {
 | 
									if mem := rQuantity.Value(); mem > result.Memory {
 | 
				
			||||||
					result.Memory = mem
 | 
										result.Memory = mem
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
								case v1.ResourceEphemeralStorage:
 | 
				
			||||||
 | 
									if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage {
 | 
				
			||||||
 | 
										result.EphemeralStorage = ephemeralStorage
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
			case v1.ResourceCPU:
 | 
								case v1.ResourceCPU:
 | 
				
			||||||
				if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
 | 
									if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
 | 
				
			||||||
					result.MilliCPU = cpu
 | 
										result.MilliCPU = cpu
 | 
				
			||||||
@@ -528,10 +524,6 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
 | 
				
			|||||||
				if gpu := rQuantity.Value(); gpu > result.NvidiaGPU {
 | 
									if gpu := rQuantity.Value(); gpu > result.NvidiaGPU {
 | 
				
			||||||
					result.NvidiaGPU = gpu
 | 
										result.NvidiaGPU = gpu
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			case v1.ResourceStorageOverlay:
 | 
					 | 
				
			||||||
				if overlay := rQuantity.Value(); overlay > result.StorageOverlay {
 | 
					 | 
				
			||||||
					result.StorageOverlay = overlay
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			default:
 | 
								default:
 | 
				
			||||||
				if v1helper.IsExtendedResourceName(rName) {
 | 
									if v1helper.IsExtendedResourceName(rName) {
 | 
				
			||||||
					value := rQuantity.Value()
 | 
										value := rQuantity.Value()
 | 
				
			||||||
@@ -572,7 +564,7 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
 | 
				
			|||||||
		// We couldn't parse metadata - fallback to computing it.
 | 
							// We couldn't parse metadata - fallback to computing it.
 | 
				
			||||||
		podRequest = GetResourceRequest(pod)
 | 
							podRequest = GetResourceRequest(pod)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.ExtendedResources) == 0 {
 | 
						if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.EphemeralStorage == 0 && len(podRequest.ExtendedResources) == 0 {
 | 
				
			||||||
		return len(predicateFails) == 0, predicateFails, nil
 | 
							return len(predicateFails) == 0, predicateFails, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -587,20 +579,8 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
 | 
				
			|||||||
		predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU))
 | 
							predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	scratchSpaceRequest := podRequest.StorageScratch
 | 
						if allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage {
 | 
				
			||||||
	if allocatable.StorageOverlay == 0 {
 | 
							predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage))
 | 
				
			||||||
		scratchSpaceRequest += podRequest.StorageOverlay
 | 
					 | 
				
			||||||
		//scratchSpaceRequest += nodeInfo.RequestedResource().StorageOverlay
 | 
					 | 
				
			||||||
		nodeScratchRequest := nodeInfo.RequestedResource().StorageOverlay + nodeInfo.RequestedResource().StorageScratch
 | 
					 | 
				
			||||||
		if allocatable.StorageScratch < scratchSpaceRequest+nodeScratchRequest {
 | 
					 | 
				
			||||||
			predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageScratch, scratchSpaceRequest, nodeScratchRequest, allocatable.StorageScratch))
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	} else if allocatable.StorageScratch < scratchSpaceRequest+nodeInfo.RequestedResource().StorageScratch {
 | 
					 | 
				
			||||||
		predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageScratch, scratchSpaceRequest, nodeInfo.RequestedResource().StorageScratch, allocatable.StorageScratch))
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if allocatable.StorageOverlay > 0 && allocatable.StorageOverlay < podRequest.StorageOverlay+nodeInfo.RequestedResource().StorageOverlay {
 | 
					 | 
				
			||||||
		predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageOverlay, podRequest.StorageOverlay, nodeInfo.RequestedResource().StorageOverlay, allocatable.StorageOverlay))
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for rName, rQuant := range podRequest.ExtendedResources {
 | 
						for rName, rQuant := range podRequest.ExtendedResources {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -80,24 +80,24 @@ var (
 | 
				
			|||||||
func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.NodeResources {
 | 
					func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.NodeResources {
 | 
				
			||||||
	return v1.NodeResources{
 | 
						return v1.NodeResources{
 | 
				
			||||||
		Capacity: v1.ResourceList{
 | 
							Capacity: v1.ResourceList{
 | 
				
			||||||
			v1.ResourceCPU:            *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
 | 
								v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
 | 
				
			||||||
			v1.ResourceMemory:         *resource.NewQuantity(memory, resource.BinarySI),
 | 
								v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI),
 | 
				
			||||||
			v1.ResourcePods:           *resource.NewQuantity(pods, resource.DecimalSI),
 | 
								v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI),
 | 
				
			||||||
			v1.ResourceNvidiaGPU:      *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
 | 
								v1.ResourceNvidiaGPU:        *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
 | 
				
			||||||
			opaqueResourceA:           *resource.NewQuantity(opaqueA, resource.DecimalSI),
 | 
								opaqueResourceA:             *resource.NewQuantity(opaqueA, resource.DecimalSI),
 | 
				
			||||||
			v1.ResourceStorageScratch: *resource.NewQuantity(storage, resource.BinarySI),
 | 
								v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.ResourceList {
 | 
					func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.ResourceList {
 | 
				
			||||||
	return v1.ResourceList{
 | 
						return v1.ResourceList{
 | 
				
			||||||
		v1.ResourceCPU:            *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
 | 
							v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
 | 
				
			||||||
		v1.ResourceMemory:         *resource.NewQuantity(memory, resource.BinarySI),
 | 
							v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI),
 | 
				
			||||||
		v1.ResourcePods:           *resource.NewQuantity(pods, resource.DecimalSI),
 | 
							v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI),
 | 
				
			||||||
		v1.ResourceNvidiaGPU:      *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
 | 
							v1.ResourceNvidiaGPU:        *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
 | 
				
			||||||
		opaqueResourceA:           *resource.NewQuantity(opaqueA, resource.DecimalSI),
 | 
							opaqueResourceA:             *resource.NewQuantity(opaqueA, resource.DecimalSI),
 | 
				
			||||||
		v1.ResourceStorageScratch: *resource.NewQuantity(storage, resource.BinarySI),
 | 
							v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -421,92 +421,52 @@ func TestPodFitsResources(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	storagePodsTests := []struct {
 | 
						storagePodsTests := []struct {
 | 
				
			||||||
		pod           *v1.Pod
 | 
							pod      *v1.Pod
 | 
				
			||||||
		emptyDirLimit int64
 | 
							nodeInfo *schedulercache.NodeInfo
 | 
				
			||||||
		storageMedium v1.StorageMedium
 | 
							fits     bool
 | 
				
			||||||
		nodeInfo      *schedulercache.NodeInfo
 | 
							test     string
 | 
				
			||||||
		fits          bool
 | 
							reasons  []algorithm.PredicateFailureReason
 | 
				
			||||||
		test          string
 | 
					 | 
				
			||||||
		reasons       []algorithm.PredicateFailureReason
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 1}),
 | 
								pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
								nodeInfo: schedulercache.NewNodeInfo(
 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})),
 | 
									newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10})),
 | 
				
			||||||
			fits: false,
 | 
								fits: false,
 | 
				
			||||||
			test: "due to container scratch disk",
 | 
								test: "due to container scratch disk",
 | 
				
			||||||
			reasons: []algorithm.PredicateFailureReason{
 | 
								reasons: []algorithm.PredicateFailureReason{
 | 
				
			||||||
				NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
 | 
									NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
 | 
				
			||||||
				NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20),
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
 | 
								pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}),
 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
								nodeInfo: schedulercache.NewNodeInfo(
 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 10})),
 | 
									newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 10})),
 | 
				
			||||||
			fits: true,
 | 
								fits: true,
 | 
				
			||||||
			test: "pod fit",
 | 
								test: "pod fit",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 18}),
 | 
								pod: newResourcePod(schedulercache.Resource{EphemeralStorage: 25}),
 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
								nodeInfo: schedulercache.NewNodeInfo(
 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
 | 
									newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2})),
 | 
				
			||||||
			fits: false,
 | 
								fits: false,
 | 
				
			||||||
			test: "request exceeds allocatable overlay storage resource",
 | 
								test: "storage ephemeral local storage request exceeds allocatable",
 | 
				
			||||||
			reasons: []algorithm.PredicateFailureReason{
 | 
								reasons: []algorithm.PredicateFailureReason{
 | 
				
			||||||
				NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
 | 
									NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod: newResourcePod(schedulercache.Resource{StorageOverlay: 18}),
 | 
								pod: newResourcePod(schedulercache.Resource{EphemeralStorage: 10}),
 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
								nodeInfo: schedulercache.NewNodeInfo(
 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
 | 
									newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2})),
 | 
				
			||||||
			fits: false,
 | 
					 | 
				
			||||||
			test: "request exceeds allocatable overlay storage resource",
 | 
					 | 
				
			||||||
			reasons: []algorithm.PredicateFailureReason{
 | 
					 | 
				
			||||||
				NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			pod:           newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
 | 
					 | 
				
			||||||
			emptyDirLimit: 15,
 | 
					 | 
				
			||||||
			storageMedium: v1.StorageMediumDefault,
 | 
					 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
					 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
 | 
					 | 
				
			||||||
			fits: false,
 | 
					 | 
				
			||||||
			test: "storage scratchrequest exceeds allocatable",
 | 
					 | 
				
			||||||
			reasons: []algorithm.PredicateFailureReason{
 | 
					 | 
				
			||||||
				NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			pod:           newResourcePod(schedulercache.Resource{}),
 | 
					 | 
				
			||||||
			emptyDirLimit: 25,
 | 
					 | 
				
			||||||
			storageMedium: v1.StorageMediumDefault,
 | 
					 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
					 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
 | 
					 | 
				
			||||||
			fits: false,
 | 
					 | 
				
			||||||
			test: "storage scratchrequest exceeds allocatable",
 | 
					 | 
				
			||||||
			reasons: []algorithm.PredicateFailureReason{
 | 
					 | 
				
			||||||
				NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			pod:           newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
 | 
					 | 
				
			||||||
			emptyDirLimit: 15,
 | 
					 | 
				
			||||||
			storageMedium: v1.StorageMediumMemory,
 | 
					 | 
				
			||||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
					 | 
				
			||||||
				newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
 | 
					 | 
				
			||||||
			fits: true,
 | 
								fits: true,
 | 
				
			||||||
			test: "pod fit with memory medium",
 | 
								test: "pod fits",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, test := range storagePodsTests {
 | 
						for _, test := range storagePodsTests {
 | 
				
			||||||
		node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5, 20).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5, 20)}}
 | 
							node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5, 20).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5, 20)}}
 | 
				
			||||||
		test.nodeInfo.SetNode(&node)
 | 
							test.nodeInfo.SetNode(&node)
 | 
				
			||||||
		pod := addStorageLimit(test.pod, test.emptyDirLimit, test.storageMedium)
 | 
							fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
 | 
				
			||||||
		fits, reasons, err := PodFitsResources(pod, PredicateMetadata(pod, nil), test.nodeInfo)
 | 
					 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("%s: unexpected error: %v", test.test, err)
 | 
								t.Errorf("%s: unexpected error: %v", test.test, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -63,11 +63,10 @@ type NodeInfo struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Resource is a collection of compute resource.
 | 
					// Resource is a collection of compute resource.
 | 
				
			||||||
type Resource struct {
 | 
					type Resource struct {
 | 
				
			||||||
	MilliCPU       int64
 | 
						MilliCPU         int64
 | 
				
			||||||
	Memory         int64
 | 
						Memory           int64
 | 
				
			||||||
	NvidiaGPU      int64
 | 
						NvidiaGPU        int64
 | 
				
			||||||
	StorageScratch int64
 | 
						EphemeralStorage int64
 | 
				
			||||||
	StorageOverlay int64
 | 
					 | 
				
			||||||
	// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
 | 
						// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
 | 
				
			||||||
	// explicitly as int, to avoid conversions and improve performance.
 | 
						// explicitly as int, to avoid conversions and improve performance.
 | 
				
			||||||
	AllowedPodNumber  int
 | 
						AllowedPodNumber  int
 | 
				
			||||||
@@ -97,10 +96,8 @@ func (r *Resource) Add(rl v1.ResourceList) {
 | 
				
			|||||||
			r.NvidiaGPU += rQuant.Value()
 | 
								r.NvidiaGPU += rQuant.Value()
 | 
				
			||||||
		case v1.ResourcePods:
 | 
							case v1.ResourcePods:
 | 
				
			||||||
			r.AllowedPodNumber += int(rQuant.Value())
 | 
								r.AllowedPodNumber += int(rQuant.Value())
 | 
				
			||||||
		case v1.ResourceStorageScratch:
 | 
							case v1.ResourceEphemeralStorage:
 | 
				
			||||||
			r.StorageScratch += rQuant.Value()
 | 
								r.EphemeralStorage += rQuant.Value()
 | 
				
			||||||
		case v1.ResourceStorageOverlay:
 | 
					 | 
				
			||||||
			r.StorageOverlay += rQuant.Value()
 | 
					 | 
				
			||||||
		default:
 | 
							default:
 | 
				
			||||||
			if v1helper.IsExtendedResourceName(rName) {
 | 
								if v1helper.IsExtendedResourceName(rName) {
 | 
				
			||||||
				r.AddExtended(rName, rQuant.Value())
 | 
									r.AddExtended(rName, rQuant.Value())
 | 
				
			||||||
@@ -111,12 +108,11 @@ func (r *Resource) Add(rl v1.ResourceList) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func (r *Resource) ResourceList() v1.ResourceList {
 | 
					func (r *Resource) ResourceList() v1.ResourceList {
 | 
				
			||||||
	result := v1.ResourceList{
 | 
						result := v1.ResourceList{
 | 
				
			||||||
		v1.ResourceCPU:            *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
 | 
							v1.ResourceCPU:              *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
 | 
				
			||||||
		v1.ResourceMemory:         *resource.NewQuantity(r.Memory, resource.BinarySI),
 | 
							v1.ResourceMemory:           *resource.NewQuantity(r.Memory, resource.BinarySI),
 | 
				
			||||||
		v1.ResourceNvidiaGPU:      *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
 | 
							v1.ResourceNvidiaGPU:        *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
 | 
				
			||||||
		v1.ResourcePods:           *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
 | 
							v1.ResourcePods:             *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
 | 
				
			||||||
		v1.ResourceStorageOverlay: *resource.NewQuantity(r.StorageOverlay, resource.BinarySI),
 | 
							v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
 | 
				
			||||||
		v1.ResourceStorageScratch: *resource.NewQuantity(r.StorageScratch, resource.BinarySI),
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for rName, rQuant := range r.ExtendedResources {
 | 
						for rName, rQuant := range r.ExtendedResources {
 | 
				
			||||||
		result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
 | 
							result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
 | 
				
			||||||
@@ -130,8 +126,7 @@ func (r *Resource) Clone() *Resource {
 | 
				
			|||||||
		Memory:           r.Memory,
 | 
							Memory:           r.Memory,
 | 
				
			||||||
		NvidiaGPU:        r.NvidiaGPU,
 | 
							NvidiaGPU:        r.NvidiaGPU,
 | 
				
			||||||
		AllowedPodNumber: r.AllowedPodNumber,
 | 
							AllowedPodNumber: r.AllowedPodNumber,
 | 
				
			||||||
		StorageOverlay:   r.StorageOverlay,
 | 
							EphemeralStorage: r.EphemeralStorage,
 | 
				
			||||||
		StorageScratch:   r.StorageScratch,
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if r.ExtendedResources != nil {
 | 
						if r.ExtendedResources != nil {
 | 
				
			||||||
		res.ExtendedResources = make(map[v1.ResourceName]int64)
 | 
							res.ExtendedResources = make(map[v1.ResourceName]int64)
 | 
				
			||||||
@@ -304,8 +299,7 @@ func (n *NodeInfo) addPod(pod *v1.Pod) {
 | 
				
			|||||||
	n.requestedResource.MilliCPU += res.MilliCPU
 | 
						n.requestedResource.MilliCPU += res.MilliCPU
 | 
				
			||||||
	n.requestedResource.Memory += res.Memory
 | 
						n.requestedResource.Memory += res.Memory
 | 
				
			||||||
	n.requestedResource.NvidiaGPU += res.NvidiaGPU
 | 
						n.requestedResource.NvidiaGPU += res.NvidiaGPU
 | 
				
			||||||
	n.requestedResource.StorageOverlay += res.StorageOverlay
 | 
						n.requestedResource.EphemeralStorage += res.EphemeralStorage
 | 
				
			||||||
	n.requestedResource.StorageScratch += res.StorageScratch
 | 
					 | 
				
			||||||
	if n.requestedResource.ExtendedResources == nil && len(res.ExtendedResources) > 0 {
 | 
						if n.requestedResource.ExtendedResources == nil && len(res.ExtendedResources) > 0 {
 | 
				
			||||||
		n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{}
 | 
							n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -392,14 +386,6 @@ func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int6
 | 
				
			|||||||
		// No non-zero resources for GPUs or opaque resources.
 | 
							// No non-zero resources for GPUs or opaque resources.
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Account for storage requested by emptydir volumes
 | 
					 | 
				
			||||||
	// If the storage medium is memory, should exclude the size
 | 
					 | 
				
			||||||
	for _, vol := range pod.Spec.Volumes {
 | 
					 | 
				
			||||||
		if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory {
 | 
					 | 
				
			||||||
			res.StorageScratch += vol.EmptyDir.SizeLimit.Value()
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return
 | 
						return
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user