mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 02:08:13 +00:00 
			
		
		
		
	kuberuntime: refactor linux resources for better reuse
Seperate the CPU/Memory req/limit -> linux resource conversion into its own function for better reuse. Elsewhere in kuberuntime pkg, we will want to leverage this requests/limits to Linux Resource type conversion. Signed-off-by: Eric Ernst <eric_ernst@apple.com>
This commit is contained in:
		| @@ -26,6 +26,7 @@ import ( | |||||||
| 	libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" | 	libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" | ||||||
| 	cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" | 	cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" | ||||||
| 	v1 "k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
|  | 	"k8s.io/apimachinery/pkg/api/resource" | ||||||
| 	utilfeature "k8s.io/apiserver/pkg/util/feature" | 	utilfeature "k8s.io/apiserver/pkg/util/feature" | ||||||
| 	runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" | 	runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" | ||||||
| 	"k8s.io/klog/v2" | 	"k8s.io/klog/v2" | ||||||
| @@ -62,42 +63,10 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// set linux container resources | 	// set linux container resources | ||||||
| 	var cpuShares int64 | 	lc.Resources = m.calculateLinuxResources(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), container.Resources.Limits.Memory()) | ||||||
| 	cpuRequest := container.Resources.Requests.Cpu() |  | ||||||
| 	cpuLimit := container.Resources.Limits.Cpu() |  | ||||||
| 	memoryLimit := container.Resources.Limits.Memory().Value() |  | ||||||
| 	memoryRequest := container.Resources.Requests.Memory().Value() |  | ||||||
| 	oomScoreAdj := int64(qos.GetContainerOOMScoreAdjust(pod, container, |  | ||||||
| 		int64(m.machineInfo.MemoryCapacity))) |  | ||||||
| 	// If request is not specified, but limit is, we want request to default to limit. |  | ||||||
| 	// API server does this for new containers, but we repeat this logic in Kubelet |  | ||||||
| 	// for containers running on existing Kubernetes clusters. |  | ||||||
| 	if cpuRequest.IsZero() && !cpuLimit.IsZero() { |  | ||||||
| 		cpuShares = milliCPUToShares(cpuLimit.MilliValue()) |  | ||||||
| 	} else { |  | ||||||
| 		// if cpuRequest.Amount is nil, then milliCPUToShares will return the minimal number |  | ||||||
| 		// of CPU shares. |  | ||||||
| 		cpuShares = milliCPUToShares(cpuRequest.MilliValue()) |  | ||||||
| 	} |  | ||||||
| 	lc.Resources.CpuShares = cpuShares |  | ||||||
| 	if memoryLimit != 0 { |  | ||||||
| 		lc.Resources.MemoryLimitInBytes = memoryLimit |  | ||||||
| 	} |  | ||||||
| 	// Set OOM score of the container based on qos policy. Processes in lower-priority pods should |  | ||||||
| 	// be killed first if the system runs out of memory. |  | ||||||
| 	lc.Resources.OomScoreAdj = oomScoreAdj |  | ||||||
|  |  | ||||||
| 	if m.cpuCFSQuota { | 	lc.Resources.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container, | ||||||
| 		// if cpuLimit.Amount is nil, then the appropriate default value is returned | 		int64(m.machineInfo.MemoryCapacity))) | ||||||
| 		// to allow full usage of cpu resource. |  | ||||||
| 		cpuPeriod := int64(quotaPeriod) |  | ||||||
| 		if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) { |  | ||||||
| 			cpuPeriod = int64(m.cpuCFSQuotaPeriod.Duration / time.Microsecond) |  | ||||||
| 		} |  | ||||||
| 		cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod) |  | ||||||
| 		lc.Resources.CpuQuota = cpuQuota |  | ||||||
| 		lc.Resources.CpuPeriod = cpuPeriod |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	lc.Resources.HugepageLimits = GetHugepageLimitsFromResources(container.Resources) | 	lc.Resources.HugepageLimits = GetHugepageLimitsFromResources(container.Resources) | ||||||
|  |  | ||||||
| @@ -121,7 +90,8 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C | |||||||
| 	// Set memory.min and memory.high to enforce MemoryQoS | 	// Set memory.min and memory.high to enforce MemoryQoS | ||||||
| 	if enforceMemoryQoS { | 	if enforceMemoryQoS { | ||||||
| 		unified := map[string]string{} | 		unified := map[string]string{} | ||||||
|  | 		memoryRequest := container.Resources.Requests.Memory().Value() | ||||||
|  | 		memoryLimit := container.Resources.Limits.Memory().Value() | ||||||
| 		if memoryRequest != 0 { | 		if memoryRequest != 0 { | ||||||
| 			unified[cm.MemoryMin] = strconv.FormatInt(memoryRequest, 10) | 			unified[cm.MemoryMin] = strconv.FormatInt(memoryRequest, 10) | ||||||
| 		} | 		} | ||||||
| @@ -158,6 +128,43 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C | |||||||
| 	return lc | 	return lc | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits | ||||||
|  | func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.LinuxContainerResources { | ||||||
|  | 	resources := runtimeapi.LinuxContainerResources{} | ||||||
|  | 	var cpuShares int64 | ||||||
|  |  | ||||||
|  | 	memLimit := memoryLimit.Value() | ||||||
|  |  | ||||||
|  | 	// If request is not specified, but limit is, we want request to default to limit. | ||||||
|  | 	// API server does this for new containers, but we repeat this logic in Kubelet | ||||||
|  | 	// for containers running on existing Kubernetes clusters. | ||||||
|  | 	if cpuRequest.IsZero() && !cpuLimit.IsZero() { | ||||||
|  | 		cpuShares = milliCPUToShares(cpuLimit.MilliValue()) | ||||||
|  | 	} else { | ||||||
|  | 		// if cpuRequest.Amount is nil, then milliCPUToShares will return the minimal number | ||||||
|  | 		// of CPU shares. | ||||||
|  | 		cpuShares = milliCPUToShares(cpuRequest.MilliValue()) | ||||||
|  | 	} | ||||||
|  | 	resources.CpuShares = cpuShares | ||||||
|  | 	if memLimit != 0 { | ||||||
|  | 		resources.MemoryLimitInBytes = memLimit | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	if m.cpuCFSQuota { | ||||||
|  | 		// if cpuLimit.Amount is nil, then the appropriate default value is returned | ||||||
|  | 		// to allow full usage of cpu resource. | ||||||
|  | 		cpuPeriod := int64(quotaPeriod) | ||||||
|  | 		if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) { | ||||||
|  | 			cpuPeriod = int64(m.cpuCFSQuotaPeriod.Duration / time.Microsecond) | ||||||
|  | 		} | ||||||
|  | 		cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod) | ||||||
|  | 		resources.CpuQuota = cpuQuota | ||||||
|  | 		resources.CpuPeriod = cpuPeriod | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	return &resources | ||||||
|  | } | ||||||
|  |  | ||||||
| // GetHugepageLimitsFromResources returns limits of each hugepages from resources. | // GetHugepageLimitsFromResources returns limits of each hugepages from resources. | ||||||
| func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtimeapi.HugepageLimit { | func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtimeapi.HugepageLimit { | ||||||
| 	var hugepageLimits []*runtimeapi.HugepageLimit | 	var hugepageLimits []*runtimeapi.HugepageLimit | ||||||
|   | |||||||
| @@ -223,6 +223,50 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) { | |||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func TestCalculateLinuxResources(t *testing.T) { | ||||||
|  | 	_, _, m, err := createTestRuntimeManager() | ||||||
|  | 	m.cpuCFSQuota = true | ||||||
|  |  | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	tests := []struct { | ||||||
|  | 		name     string | ||||||
|  | 		cpuReq   resource.Quantity | ||||||
|  | 		cpuLim   resource.Quantity | ||||||
|  | 		memLim   resource.Quantity | ||||||
|  | 		expected *runtimeapi.LinuxContainerResources | ||||||
|  | 	}{ | ||||||
|  | 		{ | ||||||
|  | 			name:   "Request128MBLimit256MB", | ||||||
|  | 			cpuReq: resource.MustParse("1"), | ||||||
|  | 			cpuLim: resource.MustParse("2"), | ||||||
|  | 			memLim: resource.MustParse("128Mi"), | ||||||
|  | 			expected: &runtimeapi.LinuxContainerResources{ | ||||||
|  | 				CpuPeriod:          100000, | ||||||
|  | 				CpuQuota:           200000, | ||||||
|  | 				CpuShares:          1024, | ||||||
|  | 				MemoryLimitInBytes: 134217728, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:   "RequestNoMemory", | ||||||
|  | 			cpuReq: resource.MustParse("2"), | ||||||
|  | 			cpuLim: resource.MustParse("8"), | ||||||
|  | 			memLim: resource.MustParse("0"), | ||||||
|  | 			expected: &runtimeapi.LinuxContainerResources{ | ||||||
|  | 				CpuPeriod:          100000, | ||||||
|  | 				CpuQuota:           800000, | ||||||
|  | 				CpuShares:          2048, | ||||||
|  | 				MemoryLimitInBytes: 0, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	for _, test := range tests { | ||||||
|  | 		linuxContainerResources := m.calculateLinuxResources(&test.cpuReq, &test.cpuLim, &test.memLim) | ||||||
|  | 		assert.Equal(t, test.expected, linuxContainerResources) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
| func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { | func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { | ||||||
| 	_, _, m, err := createTestRuntimeManager() | 	_, _, m, err := createTestRuntimeManager() | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Eric Ernst
					Eric Ernst