mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 10:18:13 +00:00 
			
		
		
		
	Support opaque integer resource accounting.
- Prevents kubelet from overwriting capacity during sync.
- Handles opaque integer resources in the scheduler.
  - Adds scheduler predicate tests for opaque resources.
- Validates opaque int resources:
  - Ensures supplied opaque int quantities in node capacity,
    node allocatable, pod request and pod limit are integers.
  - Adds tests for new validation logic (node update and pod spec).
- Added e2e tests for opaque integer resources.
			
			
This commit is contained in:
		| @@ -430,19 +430,53 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo * | ||||
| func GetResourceRequest(pod *api.Pod) *schedulercache.Resource { | ||||
| 	result := schedulercache.Resource{} | ||||
| 	for _, container := range pod.Spec.Containers { | ||||
| 		requests := container.Resources.Requests | ||||
| 		result.Memory += requests.Memory().Value() | ||||
| 		result.MilliCPU += requests.Cpu().MilliValue() | ||||
| 		result.NvidiaGPU += requests.NvidiaGPU().Value() | ||||
| 		for rName, rQuantity := range container.Resources.Requests { | ||||
| 			switch rName { | ||||
| 			case api.ResourceMemory: | ||||
| 				result.Memory += rQuantity.Value() | ||||
| 			case api.ResourceCPU: | ||||
| 				result.MilliCPU += rQuantity.MilliValue() | ||||
| 			case api.ResourceNvidiaGPU: | ||||
| 				result.NvidiaGPU += rQuantity.Value() | ||||
| 			default: | ||||
| 				if api.IsOpaqueIntResourceName(rName) { | ||||
| 					// Lazily allocate this map only if required. | ||||
| 					if result.OpaqueIntResources == nil { | ||||
| 						result.OpaqueIntResources = map[api.ResourceName]int64{} | ||||
| 					} | ||||
| 					result.OpaqueIntResources[rName] += rQuantity.Value() | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	// take max_resource(sum_pod, any_init_container) | ||||
| 	for _, container := range pod.Spec.InitContainers { | ||||
| 		requests := container.Resources.Requests | ||||
| 		if mem := requests.Memory().Value(); mem > result.Memory { | ||||
| 			result.Memory = mem | ||||
| 		} | ||||
| 		if cpu := requests.Cpu().MilliValue(); cpu > result.MilliCPU { | ||||
| 			result.MilliCPU = cpu | ||||
| 		for rName, rQuantity := range container.Resources.Requests { | ||||
| 			switch rName { | ||||
| 			case api.ResourceMemory: | ||||
| 				if mem := rQuantity.Value(); mem > result.Memory { | ||||
| 					result.Memory = mem | ||||
| 				} | ||||
| 			case api.ResourceCPU: | ||||
| 				if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { | ||||
| 					result.MilliCPU = cpu | ||||
| 				} | ||||
| 			case api.ResourceNvidiaGPU: | ||||
| 				if gpu := rQuantity.Value(); gpu > result.NvidiaGPU { | ||||
| 					result.NvidiaGPU = gpu | ||||
| 				} | ||||
| 			default: | ||||
| 				if api.IsOpaqueIntResourceName(rName) { | ||||
| 					// Lazily allocate this map only if required. | ||||
| 					if result.OpaqueIntResources == nil { | ||||
| 						result.OpaqueIntResources = map[api.ResourceName]int64{} | ||||
| 					} | ||||
| 					value := rQuantity.Value() | ||||
| 					if value > result.OpaqueIntResources[rName] { | ||||
| 						result.OpaqueIntResources[rName] = value | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return &result | ||||
| @@ -471,7 +505,7 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N | ||||
| 		// We couldn't parse metadata - fallback to computing it. | ||||
| 		podRequest = GetResourceRequest(pod) | ||||
| 	} | ||||
| 	if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 { | ||||
| 	if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && len(podRequest.OpaqueIntResources) == 0 { | ||||
| 		return len(predicateFails) == 0, predicateFails, nil | ||||
| 	} | ||||
|  | ||||
| @@ -485,6 +519,12 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N | ||||
| 	if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { | ||||
| 		predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)) | ||||
| 	} | ||||
| 	for rName, rQuant := range podRequest.OpaqueIntResources { | ||||
| 		if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] { | ||||
| 			predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.OpaqueIntResources[rName], nodeInfo.RequestedResource().OpaqueIntResources[rName], allocatable.OpaqueIntResources[rName])) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if glog.V(10) { | ||||
| 		// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is | ||||
| 		// not logged. There is visible performance gain from it. | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Connor Doyle
					Connor Doyle