mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-01 18:58:18 +00:00
Merge pull request #128269 from tallclair/allocated
[FG:InPlacePodVerticalScaling] Rework handling of allocated resources
This commit is contained in:
@@ -2391,6 +2391,13 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.statusManager = status.NewFakeManager()
|
||||
|
||||
// fakePodWorkers trigger syncPodFn synchronously on update, but entering
|
||||
// kubelet.SyncPod while holding the podResizeMutex can lead to deadlock.
|
||||
kubelet.podWorkers.(*fakePodWorkers).syncPodFn =
|
||||
func(_ context.Context, _ kubetypes.SyncPodType, _, _ *v1.Pod, _ *kubecontainer.PodStatus) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nodes := []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
@@ -2447,8 +2454,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
name: "Having both memory and cpu, resource allocation not exists",
|
||||
pod: podWithUIDNameNsSpec("1", "pod1", "foo", *cpu500mMem500MPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"1": map[string]v1.ResourceList{
|
||||
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources.Requests,
|
||||
"1": map[string]v1.ResourceRequirements{
|
||||
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2457,8 +2464,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"2": map[string]v1.ResourceList{
|
||||
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources.Requests,
|
||||
"2": map[string]v1.ResourceRequirements{
|
||||
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2467,8 +2474,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu500mMem500MPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu800mMem800MPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"3": map[string]v1.ResourceList{
|
||||
cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources.Requests,
|
||||
"3": map[string]v1.ResourceRequirements{
|
||||
cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2476,8 +2483,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
name: "Only has cpu, resource allocation not exists",
|
||||
pod: podWithUIDNameNsSpec("4", "pod5", "foo", *cpu500mPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"4": map[string]v1.ResourceList{
|
||||
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources.Requests,
|
||||
"4": map[string]v1.ResourceRequirements{
|
||||
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2486,8 +2493,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"5": map[string]v1.ResourceList{
|
||||
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources.Requests,
|
||||
"5": map[string]v1.ResourceRequirements{
|
||||
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2496,8 +2503,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu500mPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu800mPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"6": map[string]v1.ResourceList{
|
||||
cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources.Requests,
|
||||
"6": map[string]v1.ResourceRequirements{
|
||||
cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2505,8 +2512,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
name: "Only has memory, resource allocation not exists",
|
||||
pod: podWithUIDNameNsSpec("7", "pod7", "foo", *mem500MPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"7": map[string]v1.ResourceList{
|
||||
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources.Requests,
|
||||
"7": map[string]v1.ResourceRequirements{
|
||||
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2515,8 +2522,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"8": map[string]v1.ResourceList{
|
||||
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources.Requests,
|
||||
"8": map[string]v1.ResourceRequirements{
|
||||
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2525,8 +2532,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("9", "pod9", "foo", *mem500MPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("9", "pod9", "foo", *mem800MPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"9": map[string]v1.ResourceList{
|
||||
mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources.Requests,
|
||||
"9": map[string]v1.ResourceRequirements{
|
||||
mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2534,8 +2541,8 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
name: "No CPU and memory, resource allocation not exists",
|
||||
pod: podWithUIDNameNsSpec("10", "pod10", "foo", *emptyPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"10": map[string]v1.ResourceList{
|
||||
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources.Requests,
|
||||
"10": map[string]v1.ResourceRequirements{
|
||||
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2544,27 +2551,29 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
pod: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
|
||||
existingPodAllocation: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
|
||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
||||
"11": map[string]v1.ResourceList{
|
||||
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources.Requests,
|
||||
"11": map[string]v1.ResourceRequirements{
|
||||
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
if tc.existingPodAllocation != nil {
|
||||
// when kubelet restarts, AllocatedResources has already existed before adding pod
|
||||
err := kubelet.statusManager.SetPodAllocation(tc.existingPodAllocation)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set pod allocation: %v", err)
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.existingPodAllocation != nil {
|
||||
// when kubelet restarts, AllocatedResources has already existed before adding pod
|
||||
err := kubelet.statusManager.SetPodAllocation(tc.existingPodAllocation)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set pod allocation: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
kubelet.HandlePodAdditions([]*v1.Pod{tc.pod})
|
||||
kubelet.HandlePodAdditions([]*v1.Pod{tc.pod})
|
||||
|
||||
allocatedResources, found := kubelet.statusManager.GetContainerResourceAllocation(string(tc.pod.UID), tc.pod.Spec.Containers[0].Name)
|
||||
if !found {
|
||||
t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name)
|
||||
}
|
||||
assert.Equal(t, tc.expectedPodResourceAllocation[string(tc.pod.UID)][tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name)
|
||||
allocatedResources, found := kubelet.statusManager.GetContainerResourceAllocation(string(tc.pod.UID), tc.pod.Spec.Containers[0].Name)
|
||||
if !found {
|
||||
t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name)
|
||||
}
|
||||
assert.Equal(t, tc.expectedPodResourceAllocation[string(tc.pod.UID)][tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2573,7 +2582,6 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.statusManager = status.NewFakeManager()
|
||||
|
||||
cpu500m := resource.MustParse("500m")
|
||||
cpu1000m := resource.MustParse("1")
|
||||
@@ -2715,14 +2723,24 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt.pod.Spec.Containers[0].Resources.Requests = tt.newRequests
|
||||
tt.pod.Status.ContainerStatuses[0].AllocatedResources = v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}
|
||||
kubelet.handlePodResourcesResize(tt.pod)
|
||||
updatedPod, found := kubelet.podManager.GetPodByName(tt.pod.Namespace, tt.pod.Name)
|
||||
assert.True(t, found, "expected to find pod %s", tt.pod.Name)
|
||||
assert.Equal(t, tt.expectedAllocations, updatedPod.Status.ContainerStatuses[0].AllocatedResources, tt.name)
|
||||
assert.Equal(t, tt.expectedResize, updatedPod.Status.Resize, tt.name)
|
||||
testKubelet.fakeKubeClient.ClearActions()
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
kubelet.statusManager = status.NewFakeManager()
|
||||
require.NoError(t, kubelet.statusManager.SetPodAllocation(tt.pod))
|
||||
|
||||
pod := tt.pod.DeepCopy()
|
||||
pod.Spec.Containers[0].Resources.Requests = tt.newRequests
|
||||
updatedPod, err := kubelet.handlePodResourcesResize(pod)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedAllocations, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec resources")
|
||||
|
||||
alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(pod.UID), pod.Spec.Containers[0].Name)
|
||||
require.True(t, found, "container allocation")
|
||||
assert.Equal(t, tt.expectedAllocations, alloc.Requests, "stored container allocation")
|
||||
|
||||
resizeStatus, found := kubelet.statusManager.GetPodResizeStatus(string(pod.UID))
|
||||
require.True(t, found, "pod resize status")
|
||||
assert.Equal(t, tt.expectedResize, resizeStatus)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3274,3 +3292,132 @@ func TestSyncPodSpans(t *testing.T) {
|
||||
assert.Equalf(t, span.Parent.SpanID(), rootSpan.SpanContext.SpanID(), "runtime service span %s %s should be child of root span", span.Name, span.Parent.SpanID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPodResizeInProgress(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345",
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "c1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "c2",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
steadyStateC1Status := &kubecontainer.Status{
|
||||
Name: "c1",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewQuantity(400, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
resizeMemC1Status := &kubecontainer.Status{
|
||||
Name: "c1",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
resizeCPUReqC1Status := &kubecontainer.Status{
|
||||
Name: "c1",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewQuantity(400, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
resizeCPULimitC1Status := &kubecontainer.Status{
|
||||
Name: "c1",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(600, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewQuantity(400, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
steadyStateC2Status := &kubecontainer.Status{
|
||||
Name: "c2",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
Resources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(700, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
mkPodStatus := func(containerStatuses ...*kubecontainer.Status) *kubecontainer.PodStatus {
|
||||
return &kubecontainer.PodStatus{
|
||||
ID: pod.UID,
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
ContainerStatuses: containerStatuses,
|
||||
}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
status *kubecontainer.PodStatus
|
||||
expectResize bool
|
||||
}{{
|
||||
name: "steady state",
|
||||
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status),
|
||||
expectResize: false,
|
||||
}, {
|
||||
name: "terminated container",
|
||||
status: mkPodStatus(&kubecontainer.Status{
|
||||
Name: "c1",
|
||||
State: kubecontainer.ContainerStateExited,
|
||||
Resources: resizeMemC1Status.Resources,
|
||||
}, steadyStateC2Status),
|
||||
expectResize: false,
|
||||
}, {
|
||||
name: "missing container",
|
||||
status: mkPodStatus(steadyStateC2Status),
|
||||
expectResize: false,
|
||||
}, {
|
||||
name: "resizing memory limit",
|
||||
status: mkPodStatus(resizeMemC1Status, steadyStateC2Status),
|
||||
expectResize: true,
|
||||
}, {
|
||||
name: "resizing cpu request",
|
||||
status: mkPodStatus(resizeCPUReqC1Status, steadyStateC2Status),
|
||||
expectResize: true,
|
||||
}, {
|
||||
name: "resizing cpu limit",
|
||||
status: mkPodStatus(resizeCPULimitC1Status, steadyStateC2Status),
|
||||
expectResize: true,
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expectResize, isPodResizeInProgress(pod, test.status))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user