mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Consolidate extended resources and hugepages in Scheduler.
This commit is contained in:
		@@ -265,6 +265,11 @@ func IsIntegerResourceName(str string) bool {
 | 
			
		||||
	return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Extended and HugePages resources
 | 
			
		||||
func IsScalarResourceName(name api.ResourceName) bool {
 | 
			
		||||
	return IsExtendedResourceName(name) || IsHugePageResourceName(name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// this function aims to check if the service's ClusterIP is set or not
 | 
			
		||||
// the objective is not to perform validation here
 | 
			
		||||
func IsServiceIPSet(service *api.Service) bool {
 | 
			
		||||
 
 | 
			
		||||
@@ -95,6 +95,11 @@ func IsOvercommitAllowed(name v1.ResourceName) bool {
 | 
			
		||||
		!overcommitBlacklist.Has(string(name))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Extended and Hugepages resources
 | 
			
		||||
func IsScalarResourceName(name v1.ResourceName) bool {
 | 
			
		||||
	return IsExtendedResourceName(name) || IsHugePageResourceName(name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// this function aims to check if the service's ClusterIP is set or not
 | 
			
		||||
// the objective is not to perform validation here
 | 
			
		||||
func IsServiceIPSet(service *v1.Service) bool {
 | 
			
		||||
 
 | 
			
		||||
@@ -514,16 +514,10 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
 | 
			
		||||
					result.NvidiaGPU = gpu
 | 
			
		||||
				}
 | 
			
		||||
			default:
 | 
			
		||||
				if v1helper.IsExtendedResourceName(rName) {
 | 
			
		||||
				if v1helper.IsScalarResourceName(rName) {
 | 
			
		||||
					value := rQuantity.Value()
 | 
			
		||||
					if value > result.ExtendedResources[rName] {
 | 
			
		||||
						result.SetExtended(rName, value)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				if v1helper.IsHugePageResourceName(rName) {
 | 
			
		||||
					value := rQuantity.Value()
 | 
			
		||||
					if value > result.HugePages[rName] {
 | 
			
		||||
						result.SetHugePages(rName, value)
 | 
			
		||||
					if value > result.ScalarResources[rName] {
 | 
			
		||||
						result.SetScalar(rName, value)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
@@ -563,8 +557,7 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
 | 
			
		||||
		podRequest.Memory == 0 &&
 | 
			
		||||
		podRequest.NvidiaGPU == 0 &&
 | 
			
		||||
		podRequest.EphemeralStorage == 0 &&
 | 
			
		||||
		len(podRequest.ExtendedResources) == 0 &&
 | 
			
		||||
		len(podRequest.HugePages) == 0 {
 | 
			
		||||
		len(podRequest.ScalarResources) == 0 {
 | 
			
		||||
		return len(predicateFails) == 0, predicateFails, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -583,15 +576,9 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
 | 
			
		||||
		predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for rName, rQuant := range podRequest.ExtendedResources {
 | 
			
		||||
		if allocatable.ExtendedResources[rName] < rQuant+nodeInfo.RequestedResource().ExtendedResources[rName] {
 | 
			
		||||
			predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ExtendedResources[rName], nodeInfo.RequestedResource().ExtendedResources[rName], allocatable.ExtendedResources[rName]))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for rName, rQuant := range podRequest.HugePages {
 | 
			
		||||
		if allocatable.HugePages[rName] < rQuant+nodeInfo.RequestedResource().HugePages[rName] {
 | 
			
		||||
			predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.HugePages[rName], nodeInfo.RequestedResource().HugePages[rName], allocatable.HugePages[rName]))
 | 
			
		||||
	for rName, rQuant := range podRequest.ScalarResources {
 | 
			
		||||
		if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] {
 | 
			
		||||
			predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName]))
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -257,85 +257,85 @@ func TestPodFitsResources(t *testing.T) {
 | 
			
		||||
			test: "equal edge case for init container",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod:      newResourcePod(schedulercache.Resource{ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
			pod:      newResourcePod(schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
 | 
			
		||||
			fits:     true,
 | 
			
		||||
			test:     "opaque resource fits",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod:      newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
			pod:      newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})),
 | 
			
		||||
			fits:     true,
 | 
			
		||||
			test:     "opaque resource fits for init container",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourcePod(
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "opaque resource capacity enforced",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "opaque resource capacity enforced for init container",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourcePod(
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "opaque resource allocatable enforced",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "opaque resource allocatable enforced for init container",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourcePod(
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "opaque resource allocatable enforced for multiple containers",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
 | 
			
		||||
			fits: true,
 | 
			
		||||
			test: "opaque resource allocatable admits multiple init containers",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 6}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 6}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "opaque resource allocatable enforced for multiple init containers",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourcePod(
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
@@ -344,7 +344,7 @@ func TestPodFitsResources(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ExtendedResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
@@ -353,28 +353,28 @@ func TestPodFitsResources(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourcePod(
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "hugepages resource capacity enforced",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "hugepages resource capacity enforced for init container",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			pod: newResourcePod(
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 3}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
 | 
			
		||||
				schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
 | 
			
		||||
			nodeInfo: schedulercache.NewNodeInfo(
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
 | 
			
		||||
				newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
 | 
			
		||||
			fits:    false,
 | 
			
		||||
			test:    "hugepages resource allocatable enforced for multiple containers",
 | 
			
		||||
			reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
 | 
			
		||||
 
 | 
			
		||||
@@ -109,9 +109,9 @@ func TestAssumePodScheduled(t *testing.T) {
 | 
			
		||||
		pods: []*v1.Pod{testPods[4]},
 | 
			
		||||
		wNodeInfo: &NodeInfo{
 | 
			
		||||
			requestedResource: &Resource{
 | 
			
		||||
				MilliCPU:          100,
 | 
			
		||||
				Memory:            500,
 | 
			
		||||
				ExtendedResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
 | 
			
		||||
				MilliCPU:        100,
 | 
			
		||||
				Memory:          500,
 | 
			
		||||
				ScalarResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
 | 
			
		||||
			},
 | 
			
		||||
			nonzeroRequest: &Resource{
 | 
			
		||||
				MilliCPU: 100,
 | 
			
		||||
@@ -125,9 +125,9 @@ func TestAssumePodScheduled(t *testing.T) {
 | 
			
		||||
		pods: []*v1.Pod{testPods[4], testPods[5]},
 | 
			
		||||
		wNodeInfo: &NodeInfo{
 | 
			
		||||
			requestedResource: &Resource{
 | 
			
		||||
				MilliCPU:          300,
 | 
			
		||||
				Memory:            1524,
 | 
			
		||||
				ExtendedResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
 | 
			
		||||
				MilliCPU:        300,
 | 
			
		||||
				Memory:          1524,
 | 
			
		||||
				ScalarResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
 | 
			
		||||
			},
 | 
			
		||||
			nonzeroRequest: &Resource{
 | 
			
		||||
				MilliCPU: 300,
 | 
			
		||||
 
 | 
			
		||||
@@ -70,9 +70,9 @@ type Resource struct {
 | 
			
		||||
	EphemeralStorage int64
 | 
			
		||||
	// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
 | 
			
		||||
	// explicitly as int, to avoid conversions and improve performance.
 | 
			
		||||
	AllowedPodNumber  int
 | 
			
		||||
	ExtendedResources map[v1.ResourceName]int64
 | 
			
		||||
	HugePages         map[v1.ResourceName]int64
 | 
			
		||||
	AllowedPodNumber int
 | 
			
		||||
	// ScalarResources
 | 
			
		||||
	ScalarResources map[v1.ResourceName]int64
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// New creates a Resource from ResourceList
 | 
			
		||||
@@ -101,11 +101,8 @@ func (r *Resource) Add(rl v1.ResourceList) {
 | 
			
		||||
		case v1.ResourceEphemeralStorage:
 | 
			
		||||
			r.EphemeralStorage += rQuant.Value()
 | 
			
		||||
		default:
 | 
			
		||||
			if v1helper.IsExtendedResourceName(rName) {
 | 
			
		||||
				r.AddExtended(rName, rQuant.Value())
 | 
			
		||||
			}
 | 
			
		||||
			if v1helper.IsHugePageResourceName(rName) {
 | 
			
		||||
				r.AddHugePages(rName, rQuant.Value())
 | 
			
		||||
			if v1helper.IsScalarResourceName(rName) {
 | 
			
		||||
				r.AddScalar(rName, rQuant.Value())
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -119,11 +116,12 @@ func (r *Resource) ResourceList() v1.ResourceList {
 | 
			
		||||
		v1.ResourcePods:             *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
 | 
			
		||||
		v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
 | 
			
		||||
	}
 | 
			
		||||
	for rName, rQuant := range r.ExtendedResources {
 | 
			
		||||
		result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
 | 
			
		||||
	}
 | 
			
		||||
	for rName, rQuant := range r.HugePages {
 | 
			
		||||
		result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
 | 
			
		||||
	for rName, rQuant := range r.ScalarResources {
 | 
			
		||||
		if v1helper.IsHugePageResourceName(rName) {
 | 
			
		||||
			result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
 | 
			
		||||
		} else {
 | 
			
		||||
			result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
@@ -136,43 +134,25 @@ func (r *Resource) Clone() *Resource {
 | 
			
		||||
		AllowedPodNumber: r.AllowedPodNumber,
 | 
			
		||||
		EphemeralStorage: r.EphemeralStorage,
 | 
			
		||||
	}
 | 
			
		||||
	if r.ExtendedResources != nil {
 | 
			
		||||
		res.ExtendedResources = make(map[v1.ResourceName]int64)
 | 
			
		||||
		for k, v := range r.ExtendedResources {
 | 
			
		||||
			res.ExtendedResources[k] = v
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if r.HugePages != nil {
 | 
			
		||||
		res.HugePages = make(map[v1.ResourceName]int64)
 | 
			
		||||
		for k, v := range r.HugePages {
 | 
			
		||||
			res.HugePages[k] = v
 | 
			
		||||
	if r.ScalarResources != nil {
 | 
			
		||||
		res.ScalarResources = make(map[v1.ResourceName]int64)
 | 
			
		||||
		for k, v := range r.ScalarResources {
 | 
			
		||||
			res.ScalarResources[k] = v
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return res
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Resource) AddExtended(name v1.ResourceName, quantity int64) {
 | 
			
		||||
	r.SetExtended(name, r.ExtendedResources[name]+quantity)
 | 
			
		||||
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
 | 
			
		||||
	r.SetScalar(name, r.ScalarResources[name]+quantity)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Resource) SetExtended(name v1.ResourceName, quantity int64) {
 | 
			
		||||
	// Lazily allocate opaque integer resource map.
 | 
			
		||||
	if r.ExtendedResources == nil {
 | 
			
		||||
		r.ExtendedResources = map[v1.ResourceName]int64{}
 | 
			
		||||
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
 | 
			
		||||
	// Lazily allocate scalar resource map.
 | 
			
		||||
	if r.ScalarResources == nil {
 | 
			
		||||
		r.ScalarResources = map[v1.ResourceName]int64{}
 | 
			
		||||
	}
 | 
			
		||||
	r.ExtendedResources[name] = quantity
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Resource) AddHugePages(name v1.ResourceName, quantity int64) {
 | 
			
		||||
	r.SetHugePages(name, r.HugePages[name]+quantity)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (r *Resource) SetHugePages(name v1.ResourceName, quantity int64) {
 | 
			
		||||
	// Lazily allocate hugepages resource map.
 | 
			
		||||
	if r.HugePages == nil {
 | 
			
		||||
		r.HugePages = map[v1.ResourceName]int64{}
 | 
			
		||||
	}
 | 
			
		||||
	r.HugePages[name] = quantity
 | 
			
		||||
	r.ScalarResources[name] = quantity
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewNodeInfo returns a ready to use empty NodeInfo object.
 | 
			
		||||
@@ -326,17 +306,11 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
 | 
			
		||||
	n.requestedResource.Memory += res.Memory
 | 
			
		||||
	n.requestedResource.NvidiaGPU += res.NvidiaGPU
 | 
			
		||||
	n.requestedResource.EphemeralStorage += res.EphemeralStorage
 | 
			
		||||
	if n.requestedResource.ExtendedResources == nil && len(res.ExtendedResources) > 0 {
 | 
			
		||||
		n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{}
 | 
			
		||||
	if n.requestedResource.ScalarResources == nil && len(res.ScalarResources) > 0 {
 | 
			
		||||
		n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
 | 
			
		||||
	}
 | 
			
		||||
	for rName, rQuant := range res.ExtendedResources {
 | 
			
		||||
		n.requestedResource.ExtendedResources[rName] += rQuant
 | 
			
		||||
	}
 | 
			
		||||
	if n.requestedResource.HugePages == nil && len(res.HugePages) > 0 {
 | 
			
		||||
		n.requestedResource.HugePages = map[v1.ResourceName]int64{}
 | 
			
		||||
	}
 | 
			
		||||
	for rName, rQuant := range res.HugePages {
 | 
			
		||||
		n.requestedResource.HugePages[rName] += rQuant
 | 
			
		||||
	for rName, rQuant := range res.ScalarResources {
 | 
			
		||||
		n.requestedResource.ScalarResources[rName] += rQuant
 | 
			
		||||
	}
 | 
			
		||||
	n.nonzeroRequest.MilliCPU += non0_cpu
 | 
			
		||||
	n.nonzeroRequest.Memory += non0_mem
 | 
			
		||||
@@ -387,17 +361,11 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
 | 
			
		||||
			n.requestedResource.MilliCPU -= res.MilliCPU
 | 
			
		||||
			n.requestedResource.Memory -= res.Memory
 | 
			
		||||
			n.requestedResource.NvidiaGPU -= res.NvidiaGPU
 | 
			
		||||
			if len(res.ExtendedResources) > 0 && n.requestedResource.ExtendedResources == nil {
 | 
			
		||||
				n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{}
 | 
			
		||||
			if len(res.ScalarResources) > 0 && n.requestedResource.ScalarResources == nil {
 | 
			
		||||
				n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
 | 
			
		||||
			}
 | 
			
		||||
			for rName, rQuant := range res.ExtendedResources {
 | 
			
		||||
				n.requestedResource.ExtendedResources[rName] -= rQuant
 | 
			
		||||
			}
 | 
			
		||||
			if len(res.HugePages) > 0 && n.requestedResource.HugePages == nil {
 | 
			
		||||
				n.requestedResource.HugePages = map[v1.ResourceName]int64{}
 | 
			
		||||
			}
 | 
			
		||||
			for rName, rQuant := range res.HugePages {
 | 
			
		||||
				n.requestedResource.HugePages[rName] -= rQuant
 | 
			
		||||
			for rName, rQuant := range res.ScalarResources {
 | 
			
		||||
				n.requestedResource.ScalarResources[rName] -= rQuant
 | 
			
		||||
			}
 | 
			
		||||
			n.nonzeroRequest.MilliCPU -= non0_cpu
 | 
			
		||||
			n.nonzeroRequest.Memory -= non0_mem
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user