From ae5247afc18d4bbdf65ea7b0b0bca14bb04431ec Mon Sep 17 00:00:00 2001 From: Natasha Sarkar Date: Fri, 18 Jul 2025 21:22:41 +0000 Subject: [PATCH] address feedback --- pkg/kubelet/kubelet.go | 9 ++- pkg/kubelet/kubelet_node_status_test.go | 4 +- pkg/kubelet/kubelet_test.go | 89 +++++++++++++++++++------ pkg/kubelet/kubelet_volumes_test.go | 4 +- 4 files changed, 81 insertions(+), 25 deletions(-) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 9c963017102..a82bbfd68f5 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2824,8 +2824,13 @@ func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) { UseStatusResources: true, SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources), } - oldRequest := resourcehelper.PodRequests(oldPod, opts) - newRequest := resourcehelper.PodRequests(pod, opts) + + // Ignore desired resources when aggregating the resources. + allocatedOldPod, _ := kl.allocationManager.UpdatePodFromAllocation(oldPod) + allocatedPod, _ := kl.allocationManager.UpdatePodFromAllocation(pod) + + oldRequest := resourcehelper.PodRequests(allocatedOldPod, opts) + newRequest := resourcehelper.PodRequests(allocatedPod, opts) // If cpu or memory requests shrank, then retry the pending resizes. retryPendingResizes = newRequest.Memory().Cmp(*oldRequest.Memory()) < 0 || diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index b423d7e8fc1..86f03304701 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -213,7 +213,7 @@ func TestUpdateNewNodeStatus(t *testing.T) { } inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages)) testKubelet := newTestKubeletWithImageList( - t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/, true /* localStorageCapacityIsolation */, false /*excludePodAdmitHandlers*/, false /*sourcesReady*/) + t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/, true /* localStorageCapacityIsolation */, false /*excludePodAdmitHandlers*/, false /*enableResizing*/) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages @@ -1588,7 +1588,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { // generate one more in inputImageList than we configure the Kubelet to report inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages) testKubelet := newTestKubeletWithImageList( - t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*sourcesReady*/) + t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*enableResizing*/) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.nodeStatusMaxImages = nodeStatusMaxImages diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index f56d3ec428a..b10cbfbe988 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -177,7 +177,7 @@ func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubel Size: 456, }, } - return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*sourcesReady*/) + return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*enableResizing*/) } func newTestKubeletExcludeAdmitHandlers(t *testing.T, controllerAttachDetachEnabled, sourcesReady bool) *TestKubelet { @@ -203,7 +203,7 @@ func newTestKubeletWithImageList( initFakeVolumePlugin bool, localStorageCapacityIsolation bool, excludeAdmitHandlers bool, - sourcesReady bool, + enableResizing bool, ) *TestKubelet { logger, _ := ktesting.NewTestContext(t) @@ -329,7 +329,7 @@ func newTestKubeletWithImageList( func(pod *v1.Pod) { kubelet.HandlePodSyncs([]*v1.Pod{pod}) }, kubelet.GetActivePods, kubelet.podManager.GetPodByUID, - config.NewSourcesReady(func(_ sets.Set[string]) bool { return sourcesReady }), + config.NewSourcesReady(func(_ sets.Set[string]) bool { return enableResizing }), ) kubelet.allocationManager.SetContainerRuntime(fakeRuntime) volumeStatsAggPeriod := time.Second * 10 @@ -1074,7 +1074,7 @@ func TestHandleMemExceeded(t *testing.T) { // Tests that we handle result of interface UpdatePluginResources correctly // by setting corresponding status in status map. func TestHandlePluginResources(t *testing.T) { - testKubelet := newTestKubeletExcludeAdmitHandlers(t, false /* controllerAttachDetachEnabled */, false /*sourcesReady*/) + testKubelet := newTestKubeletExcludeAdmitHandlers(t, false /* controllerAttachDetachEnabled */, false /*enableResizing*/) defer testKubelet.Cleanup() kl := testKubelet.kubelet @@ -4232,16 +4232,20 @@ func TestHandlePodReconcile_RetryPendingResizes(t *testing.T) { } featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true) - testKubelet := newTestKubeletExcludeAdmitHandlers(t, false /* controllerAttachDetachEnabled */, true /*sourcesReady*/) + testKubelet := newTestKubeletExcludeAdmitHandlers(t, false /* controllerAttachDetachEnabled */, true /*enableResizing*/) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet - cpu500m := resource.MustParse("500m") - cpu1000m := resource.MustParse("1") - mem500M := resource.MustParse("500Mi") - mem1000M := resource.MustParse("1Gi") + lowCPU := resource.MustParse("500m") + highCPU := resource.MustParse("1") + lowMem := resource.MustParse("500Mi") + highMem := resource.MustParse("1Gi") - makePodWithRequests := func(name string, requests v1.ResourceList) *v1.Pod { + // Set desired resources to some huge value to verify that they are being ignored in the aggregate check. + enormousCPU := resource.MustParse("2000m") + enormousMem := resource.MustParse("2000Mi") + + makePodWithResources := func(name string, requests v1.ResourceList, status v1.ResourceList) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -4258,11 +4262,54 @@ func TestHandlePodReconcile_RetryPendingResizes(t *testing.T) { }, }, }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "c1", + Resources: &v1.ResourceRequirements{ + Requests: status, + }, + }, + }, + }, } } - pendingResizeAllocated := makePodWithRequests("pod-pending-resize", v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}) - pendingResizeDesired := makePodWithRequests("pod-pending-resize", v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}) + pendingResizeAllocated := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-pending-resize", + UID: types.UID("pod-pending-resize"), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "i1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}, + }, + }, + }, + }, + } + + pendingResizeDesired := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-pending-resize", + UID: types.UID("pod-pending-resize"), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "c1", + Image: "i1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}, + }, + }, + }, + }, + } testCases := []struct { name string @@ -4272,20 +4319,20 @@ func TestHandlePodReconcile_RetryPendingResizes(t *testing.T) { }{ { name: "requests are increasing", - oldPod: makePodWithRequests("updated-pod", v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}), - newPod: makePodWithRequests("updated-pod", v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}), + oldPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}), + newPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: enormousCPU, v1.ResourceMemory: enormousMem}, v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}), shouldRetryPendingResize: false, }, { name: "requests are unchanged", - oldPod: makePodWithRequests("updated-pod", v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}), - newPod: makePodWithRequests("updated-pod", v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}), + oldPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}), + newPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: enormousCPU, v1.ResourceMemory: enormousMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}), shouldRetryPendingResize: false, }, { name: "requests are decreasing", - oldPod: makePodWithRequests("updated-pod", v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}), - newPod: makePodWithRequests("updated-pod", v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}), + oldPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}, v1.ResourceList{v1.ResourceCPU: highCPU, v1.ResourceMemory: highMem}), + newPod: makePodWithResources("updated-pod", v1.ResourceList{v1.ResourceCPU: enormousCPU, v1.ResourceMemory: enormousMem}, v1.ResourceList{v1.ResourceCPU: lowCPU, v1.ResourceMemory: lowMem}), shouldRetryPendingResize: true, }, } @@ -4297,7 +4344,11 @@ func TestHandlePodReconcile_RetryPendingResizes(t *testing.T) { kubelet.allocationManager.AddPodAdmitHandlers(lifecycle.PodAdmitHandlers{handler}) require.NoError(t, kubelet.allocationManager.SetAllocatedResources(pendingResizeAllocated)) - require.NoError(t, kubelet.allocationManager.SetActuatedResources(pendingResizeAllocated, nil)) + require.NoError(t, kubelet.allocationManager.SetAllocatedResources(tc.oldPod)) + + // We only expect status resources to change in HandlePodReconcile. + tc.oldPod.Spec = tc.newPod.Spec + kubelet.podManager.AddPod(pendingResizeDesired) kubelet.podManager.AddPod(tc.oldPod) kubelet.allocationManager.PushPendingResize(pendingResizeDesired.UID) diff --git a/pkg/kubelet/kubelet_volumes_test.go b/pkg/kubelet/kubelet_volumes_test.go index ef4bae385df..9ba12c2f31a 100644 --- a/pkg/kubelet/kubelet_volumes_test.go +++ b/pkg/kubelet/kubelet_volumes_test.go @@ -220,7 +220,7 @@ func TestPodVolumeDeadlineAttachAndMount(t *testing.T) { } testKubelet := newTestKubeletWithImageList(t, nil /*imageList*/, false, /* controllerAttachDetachEnabled */ - false /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*sourcesReady*/) + false /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*enableResizing*/) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -282,7 +282,7 @@ func TestPodVolumeDeadlineUnmount(t *testing.T) { } testKubelet := newTestKubeletWithImageList(t, nil /*imageList*/, false, /* controllerAttachDetachEnabled */ - true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*sourcesReady*/) + true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/, false /*excludePodAdmitHandlers*/, false /*enableResizing*/) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet