mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Respond to review comments.
This commit is contained in:
		@@ -34,7 +34,7 @@ func calculateScore(requested int64, capacity int64, node string) int {
 | 
				
			|||||||
		return 0
 | 
							return 0
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if requested > capacity {
 | 
						if requested > capacity {
 | 
				
			||||||
		glog.Infof("Combined requested resources %d from existing pods exceeds capacity %d on minion: %s",
 | 
							glog.Infof("Combined requested resources %d from existing pods exceeds capacity %d on node %s",
 | 
				
			||||||
			requested, capacity, node)
 | 
								requested, capacity, node)
 | 
				
			||||||
		return 0
 | 
							return 0
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -52,7 +52,7 @@ const defaultMemoryLimit int64 = 60 * 1024 * 1024  // 60 MB
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
 | 
					// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
 | 
				
			||||||
// as an additional argument here) rather than using constants
 | 
					// as an additional argument here) rather than using constants
 | 
				
			||||||
func toNonzeroLimits(limits *api.ResourceList) (int64, int64) {
 | 
					func getNonzeroLimits(limits *api.ResourceList) (int64, int64) {
 | 
				
			||||||
	var out_millicpu, out_memory int64
 | 
						var out_millicpu, out_memory int64
 | 
				
			||||||
	// Override if un-set, but not if explicitly set to zero
 | 
						// Override if un-set, but not if explicitly set to zero
 | 
				
			||||||
	if (*limits.Cpu() == resource.Quantity{}) {
 | 
						if (*limits.Cpu() == resource.Quantity{}) {
 | 
				
			||||||
@@ -79,7 +79,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for _, existingPod := range pods {
 | 
						for _, existingPod := range pods {
 | 
				
			||||||
		for _, container := range existingPod.Spec.Containers {
 | 
							for _, container := range existingPod.Spec.Containers {
 | 
				
			||||||
			cpu, memory := toNonzeroLimits(&container.Resources.Limits)
 | 
								cpu, memory := getNonzeroLimits(&container.Resources.Limits)
 | 
				
			||||||
			totalMilliCPU += cpu
 | 
								totalMilliCPU += cpu
 | 
				
			||||||
			totalMemory += memory
 | 
								totalMemory += memory
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -87,7 +87,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
 | 
				
			|||||||
	// Add the resources requested by the current pod being scheduled.
 | 
						// Add the resources requested by the current pod being scheduled.
 | 
				
			||||||
	// This also helps differentiate between differently sized, but empty, minions.
 | 
						// This also helps differentiate between differently sized, but empty, minions.
 | 
				
			||||||
	for _, container := range pod.Spec.Containers {
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
		cpu, memory := toNonzeroLimits(&container.Resources.Limits)
 | 
							cpu, memory := getNonzeroLimits(&container.Resources.Limits)
 | 
				
			||||||
		totalMilliCPU += cpu
 | 
							totalMilliCPU += cpu
 | 
				
			||||||
		totalMemory += memory
 | 
							totalMemory += memory
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -195,7 +195,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
 | 
				
			|||||||
	score := int(0)
 | 
						score := int(0)
 | 
				
			||||||
	for _, existingPod := range pods {
 | 
						for _, existingPod := range pods {
 | 
				
			||||||
		for _, container := range existingPod.Spec.Containers {
 | 
							for _, container := range existingPod.Spec.Containers {
 | 
				
			||||||
			cpu, memory := toNonzeroLimits(&container.Resources.Limits)
 | 
								cpu, memory := getNonzeroLimits(&container.Resources.Limits)
 | 
				
			||||||
			totalMilliCPU += cpu
 | 
								totalMilliCPU += cpu
 | 
				
			||||||
			totalMemory += memory
 | 
								totalMemory += memory
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -203,7 +203,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
 | 
				
			|||||||
	// Add the resources requested by the current pod being scheduled.
 | 
						// Add the resources requested by the current pod being scheduled.
 | 
				
			||||||
	// This also helps differentiate between differently sized, but empty, minions.
 | 
						// This also helps differentiate between differently sized, but empty, minions.
 | 
				
			||||||
	for _, container := range pod.Spec.Containers {
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
		cpu, memory := toNonzeroLimits(&container.Resources.Limits)
 | 
							cpu, memory := getNonzeroLimits(&container.Resources.Limits)
 | 
				
			||||||
		totalMilliCPU += cpu
 | 
							totalMilliCPU += cpu
 | 
				
			||||||
		totalMemory += memory
 | 
							totalMemory += memory
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -84,7 +84,7 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
 | 
							result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
 | 
				
			||||||
		glog.V(10).Infof(
 | 
							glog.V(10).Infof(
 | 
				
			||||||
			"%v -> %v: ServiceSpreadPriority, Sore: (%d)", pod.Name, minion.Name, int(fScore),
 | 
								"%v -> %v: ServiceSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore),
 | 
				
			||||||
		)
 | 
							)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return result, nil
 | 
						return result, nil
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user