mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #128377 from tallclair/allocated-status-2
[FG:InPlacePodVerticalScaling] Implement AllocatedResources status changes for Beta
This commit is contained in:
		@@ -831,18 +831,29 @@ func dropDisabledPodStatusFields(podStatus, oldPodStatus *api.PodStatus, podSpec
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
 | 
						if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
 | 
				
			||||||
		// Drop Resize, AllocatedResources, and Resources fields
 | 
							// Drop Resize and Resources fields
 | 
				
			||||||
		dropResourcesFields := func(csl []api.ContainerStatus) {
 | 
							dropResourcesField := func(csl []api.ContainerStatus) {
 | 
				
			||||||
			for i := range csl {
 | 
								for i := range csl {
 | 
				
			||||||
				csl[i].AllocatedResources = nil
 | 
					 | 
				
			||||||
				csl[i].Resources = nil
 | 
									csl[i].Resources = nil
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		dropResourcesFields(podStatus.ContainerStatuses)
 | 
							dropResourcesField(podStatus.ContainerStatuses)
 | 
				
			||||||
		dropResourcesFields(podStatus.InitContainerStatuses)
 | 
							dropResourcesField(podStatus.InitContainerStatuses)
 | 
				
			||||||
		dropResourcesFields(podStatus.EphemeralContainerStatuses)
 | 
							dropResourcesField(podStatus.EphemeralContainerStatuses)
 | 
				
			||||||
		podStatus.Resize = ""
 | 
							podStatus.Resize = ""
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) ||
 | 
				
			||||||
 | 
							!utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
 | 
				
			||||||
 | 
							// Drop AllocatedResources field
 | 
				
			||||||
 | 
							dropAllocatedResourcesField := func(csl []api.ContainerStatus) {
 | 
				
			||||||
 | 
								for i := range csl {
 | 
				
			||||||
 | 
									csl[i].AllocatedResources = nil
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							dropAllocatedResourcesField(podStatus.ContainerStatuses)
 | 
				
			||||||
 | 
							dropAllocatedResourcesField(podStatus.InitContainerStatuses)
 | 
				
			||||||
 | 
							dropAllocatedResourcesField(podStatus.EphemeralContainerStatuses)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
 | 
						if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
 | 
				
			||||||
		podStatus.ResourceClaimStatuses = nil
 | 
							podStatus.ResourceClaimStatuses = nil
 | 
				
			||||||
@@ -1289,26 +1300,17 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i, c := range newPod.Spec.Containers {
 | 
						for i, c := range newPod.Spec.Containers {
 | 
				
			||||||
 | 
							if c.Name != oldPod.Spec.Containers[i].Name {
 | 
				
			||||||
 | 
								return // Update is invalid (container mismatch): let validation handle it.
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		if c.Resources.Requests == nil {
 | 
							if c.Resources.Requests == nil {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) {
 | 
							if cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		findContainerStatus := func(css []api.ContainerStatus, cName string) (api.ContainerStatus, bool) {
 | 
							newPod.Status.Resize = api.PodResizeStatusProposed
 | 
				
			||||||
			for i := range css {
 | 
							return
 | 
				
			||||||
				if css[i].Name == cName {
 | 
					 | 
				
			||||||
					return css[i], true
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			return api.ContainerStatus{}, false
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if cs, ok := findContainerStatus(newPod.Status.ContainerStatuses, c.Name); ok {
 | 
					 | 
				
			||||||
			if !cmp.Equal(c.Resources.Requests, cs.AllocatedResources) {
 | 
					 | 
				
			||||||
				newPod.Status.Resize = api.PodResizeStatusProposed
 | 
					 | 
				
			||||||
				break
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -2638,56 +2638,69 @@ func TestDropInPlacePodVerticalScaling(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, enabled := range []bool{true, false} {
 | 
						for _, ippvsEnabled := range []bool{true, false} {
 | 
				
			||||||
		for _, oldPodInfo := range podInfo {
 | 
							t.Run(fmt.Sprintf("InPlacePodVerticalScaling=%t", ippvsEnabled), func(t *testing.T) {
 | 
				
			||||||
			for _, newPodInfo := range podInfo {
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, ippvsEnabled)
 | 
				
			||||||
				oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
 | 
					 | 
				
			||||||
				newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
 | 
					 | 
				
			||||||
				if newPod == nil {
 | 
					 | 
				
			||||||
					continue
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
 | 
								for _, allocatedStatusEnabled := range []bool{true, false} {
 | 
				
			||||||
					featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, enabled)
 | 
									t.Run(fmt.Sprintf("AllocatedStatus=%t", allocatedStatusEnabled), func(t *testing.T) {
 | 
				
			||||||
 | 
										featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, allocatedStatusEnabled)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					var oldPodSpec *api.PodSpec
 | 
										for _, oldPodInfo := range podInfo {
 | 
				
			||||||
					var oldPodStatus *api.PodStatus
 | 
											for _, newPodInfo := range podInfo {
 | 
				
			||||||
					if oldPod != nil {
 | 
												oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
 | 
				
			||||||
						oldPodSpec = &oldPod.Spec
 | 
												newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
 | 
				
			||||||
						oldPodStatus = &oldPod.Status
 | 
												if newPod == nil {
 | 
				
			||||||
					}
 | 
													continue
 | 
				
			||||||
					dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
 | 
												}
 | 
				
			||||||
					dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
					// old pod should never be changed
 | 
												t.Run(fmt.Sprintf("old pod %v, new pod %v", oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
 | 
				
			||||||
					if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
 | 
													var oldPodSpec *api.PodSpec
 | 
				
			||||||
						t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
 | 
													var oldPodStatus *api.PodStatus
 | 
				
			||||||
					}
 | 
													if oldPod != nil {
 | 
				
			||||||
 | 
														oldPodSpec = &oldPod.Spec
 | 
				
			||||||
 | 
														oldPodStatus = &oldPod.Status
 | 
				
			||||||
 | 
													}
 | 
				
			||||||
 | 
													dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
 | 
				
			||||||
 | 
													dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					switch {
 | 
													// old pod should never be changed
 | 
				
			||||||
					case enabled || oldPodHasInPlaceVerticalScaling:
 | 
													if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
 | 
				
			||||||
						// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
 | 
														t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
 | 
				
			||||||
						if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
 | 
													}
 | 
				
			||||||
							t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
 | 
					
 | 
				
			||||||
						}
 | 
													switch {
 | 
				
			||||||
					case newPodHasInPlaceVerticalScaling:
 | 
													case ippvsEnabled || oldPodHasInPlaceVerticalScaling:
 | 
				
			||||||
						// new pod should be changed
 | 
														// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
 | 
				
			||||||
						if reflect.DeepEqual(newPod, newPodInfo.pod()) {
 | 
														expected := newPodInfo.pod()
 | 
				
			||||||
							t.Errorf("new pod was not changed")
 | 
														if !ippvsEnabled || !allocatedStatusEnabled {
 | 
				
			||||||
						}
 | 
															expected.Status.ContainerStatuses[0].AllocatedResources = nil
 | 
				
			||||||
						// new pod should not have ResizePolicy
 | 
														}
 | 
				
			||||||
						if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
 | 
														if !reflect.DeepEqual(newPod, expected) {
 | 
				
			||||||
							t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
 | 
															t.Errorf("new pod changed: %v", cmp.Diff(newPod, expected))
 | 
				
			||||||
						}
 | 
														}
 | 
				
			||||||
					default:
 | 
													case newPodHasInPlaceVerticalScaling:
 | 
				
			||||||
						// new pod should not need to be changed
 | 
														// new pod should be changed
 | 
				
			||||||
						if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
 | 
														if reflect.DeepEqual(newPod, newPodInfo.pod()) {
 | 
				
			||||||
							t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
 | 
															t.Errorf("new pod was not changed")
 | 
				
			||||||
 | 
														}
 | 
				
			||||||
 | 
														// new pod should not have ResizePolicy
 | 
				
			||||||
 | 
														if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
 | 
				
			||||||
 | 
															t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
 | 
				
			||||||
 | 
														}
 | 
				
			||||||
 | 
													default:
 | 
				
			||||||
 | 
														// new pod should not need to be changed
 | 
				
			||||||
 | 
														if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
 | 
				
			||||||
 | 
															t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
 | 
				
			||||||
 | 
														}
 | 
				
			||||||
 | 
													}
 | 
				
			||||||
 | 
												})
 | 
				
			||||||
						}
 | 
											}
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				})
 | 
									})
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -2796,439 +2809,277 @@ func TestDropSidecarContainers(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestMarkPodProposedForResize(t *testing.T) {
 | 
					func TestMarkPodProposedForResize(t *testing.T) {
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		desc        string
 | 
							desc                 string
 | 
				
			||||||
		newPod      *api.Pod
 | 
							newPodSpec           api.PodSpec
 | 
				
			||||||
		oldPod      *api.Pod
 | 
							oldPodSpec           api.PodSpec
 | 
				
			||||||
		expectedPod *api.Pod
 | 
							expectProposedResize bool
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			desc: "nil requests",
 | 
								desc: "nil requests",
 | 
				
			||||||
			newPod: &api.Pod{
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			oldPod: &api.Pod{
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			expectedPod: &api.Pod{
 | 
					 | 
				
			||||||
				Spec: api.PodSpec{
 | 
					 | 
				
			||||||
					Containers: []api.Container{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectProposedResize: false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			desc: "resources unchanged",
 | 
								desc: "resources unchanged",
 | 
				
			||||||
			newPod: &api.Pod{
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			oldPod: &api.Pod{
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			expectedPod: &api.Pod{
 | 
					 | 
				
			||||||
				Spec: api.PodSpec{
 | 
					 | 
				
			||||||
					Containers: []api.Container{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectProposedResize: false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			desc: "resize desired",
 | 
								desc: "requests resized",
 | 
				
			||||||
			newPod: &api.Pod{
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c2",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
										{
 | 
				
			||||||
				Status: api.PodStatus{
 | 
											Name:  "c2",
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
											Image: "image",
 | 
				
			||||||
						{
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Name:               "c1",
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
							Image:              "image",
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c2",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			oldPod: &api.Pod{
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c2",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
										{
 | 
				
			||||||
				Status: api.PodStatus{
 | 
											Name:  "c2",
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
											Image: "image",
 | 
				
			||||||
						{
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Name:               "c1",
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
							Image:              "image",
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c2",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedPod: &api.Pod{
 | 
								expectProposedResize: true,
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
							},
 | 
				
			||||||
					Containers: []api.Container{
 | 
							{
 | 
				
			||||||
						{
 | 
								desc: "limits resized",
 | 
				
			||||||
							Name:  "c1",
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
							Image: "image",
 | 
									Containers: []api.Container{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
										{
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
											Name:  "c1",
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
											Image: "image",
 | 
				
			||||||
							},
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
						},
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
						{
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
							Name:  "c2",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
										{
 | 
				
			||||||
				Status: api.PodStatus{
 | 
											Name:  "c2",
 | 
				
			||||||
					Resize: api.PodResizeStatusProposed,
 | 
											Image: "image",
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
						{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
							Name:               "c1",
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c2",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
 | 
									Containers: []api.Container{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											Name:  "c1",
 | 
				
			||||||
 | 
											Image: "image",
 | 
				
			||||||
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											Name:  "c2",
 | 
				
			||||||
 | 
											Image: "image",
 | 
				
			||||||
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("500m")},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectProposedResize: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			desc: "the number of containers in the pod has increased; no action should be taken.",
 | 
								desc: "the number of containers in the pod has increased; no action should be taken.",
 | 
				
			||||||
			newPod: &api.Pod{
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c2",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
										{
 | 
				
			||||||
				Status: api.PodStatus{
 | 
											Name:  "c2",
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
											Image: "image",
 | 
				
			||||||
						{
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Name:               "c1",
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
							Image:              "image",
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c2",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			oldPod: &api.Pod{
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c1",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			expectedPod: &api.Pod{
 | 
					 | 
				
			||||||
				Spec: api.PodSpec{
 | 
					 | 
				
			||||||
					Containers: []api.Container{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c1",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c2",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c1",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c2",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectProposedResize: false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			desc: "the number of containers in the pod has decreased; no action should be taken.",
 | 
								desc: "the number of containers in the pod has decreased; no action should be taken.",
 | 
				
			||||||
			newPod: &api.Pod{
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Status: api.PodStatus{
 | 
					 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c1",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			oldPod: &api.Pod{
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Containers: []api.Container{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										{
 | 
				
			||||||
						{
 | 
											Name:  "c1",
 | 
				
			||||||
							Name:  "c1",
 | 
											Image: "image",
 | 
				
			||||||
							Image: "image",
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  "c2",
 | 
					 | 
				
			||||||
							Image: "image",
 | 
					 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
					 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
										{
 | 
				
			||||||
				Status: api.PodStatus{
 | 
											Name:  "c2",
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
											Image: "image",
 | 
				
			||||||
						{
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Name:               "c1",
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
							Image:              "image",
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:               "c2",
 | 
					 | 
				
			||||||
							Image:              "image",
 | 
					 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedPod: &api.Pod{
 | 
								expectProposedResize: false,
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
							},
 | 
				
			||||||
					Containers: []api.Container{
 | 
							{
 | 
				
			||||||
						{
 | 
								desc: "containers reordered",
 | 
				
			||||||
							Name:  "c1",
 | 
								newPodSpec: api.PodSpec{
 | 
				
			||||||
							Image: "image",
 | 
									Containers: []api.Container{
 | 
				
			||||||
							Resources: api.ResourceRequirements{
 | 
										{
 | 
				
			||||||
								Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
											Name:  "c1",
 | 
				
			||||||
								Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
											Image: "image",
 | 
				
			||||||
							},
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
										{
 | 
				
			||||||
				Status: api.PodStatus{
 | 
											Name:  "c2",
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
											Image: "image",
 | 
				
			||||||
						{
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
							Name:               "c1",
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
							Image:              "image",
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
				
			||||||
							AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
					 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								oldPodSpec: api.PodSpec{
 | 
				
			||||||
 | 
									Containers: []api.Container{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											Name:  "c2",
 | 
				
			||||||
 | 
											Image: "image",
 | 
				
			||||||
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
 | 
				
			||||||
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											Name:  "c1",
 | 
				
			||||||
 | 
											Image: "image",
 | 
				
			||||||
 | 
											Resources: api.ResourceRequirements{
 | 
				
			||||||
 | 
												Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
 | 
				
			||||||
 | 
												Limits:   api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectProposedResize: false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, tc := range testCases {
 | 
						for _, tc := range testCases {
 | 
				
			||||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
							t.Run(tc.desc, func(t *testing.T) {
 | 
				
			||||||
			MarkPodProposedForResize(tc.oldPod, tc.newPod)
 | 
								newPod := &api.Pod{Spec: tc.newPodSpec}
 | 
				
			||||||
			if diff := cmp.Diff(tc.expectedPod, tc.newPod); diff != "" {
 | 
								newPodUnchanged := newPod.DeepCopy()
 | 
				
			||||||
				t.Errorf("unexpected pod spec (-want, +got):\n%s", diff)
 | 
								oldPod := &api.Pod{Spec: tc.oldPodSpec}
 | 
				
			||||||
 | 
								MarkPodProposedForResize(oldPod, newPod)
 | 
				
			||||||
 | 
								if tc.expectProposedResize {
 | 
				
			||||||
 | 
									assert.Equal(t, api.PodResizeStatusProposed, newPod.Status.Resize)
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									assert.Equal(t, api.PodResizeStatus(""), newPod.Status.Resize)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
								newPod.Status.Resize = newPodUnchanged.Status.Resize // Only field that might have changed.
 | 
				
			||||||
 | 
								assert.Equal(t, newPodUnchanged, newPod, "No fields other than .status.resize should be modified")
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -257,6 +257,19 @@ const (
 | 
				
			|||||||
	// deletion ordering.
 | 
						// deletion ordering.
 | 
				
			||||||
	HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
 | 
						HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// owner: @vinaykul,@tallclair
 | 
				
			||||||
 | 
						// kep: http://kep.k8s.io/1287
 | 
				
			||||||
 | 
						//
 | 
				
			||||||
 | 
						// Enables In-Place Pod Vertical Scaling
 | 
				
			||||||
 | 
						InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// owner: @tallclair
 | 
				
			||||||
 | 
						// kep: http://kep.k8s.io/1287
 | 
				
			||||||
 | 
						//
 | 
				
			||||||
 | 
						// Enables the AllocatedResources field in container status. This feature requires
 | 
				
			||||||
 | 
						// InPlacePodVerticalScaling also be enabled.
 | 
				
			||||||
 | 
						InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// owner: @trierra
 | 
						// owner: @trierra
 | 
				
			||||||
	//
 | 
						//
 | 
				
			||||||
	// Disables the Portworx in-tree driver.
 | 
						// Disables the Portworx in-tree driver.
 | 
				
			||||||
@@ -741,12 +754,6 @@ const (
 | 
				
			|||||||
	// Initial implementation focused on ReadWriteOncePod volumes.
 | 
						// Initial implementation focused on ReadWriteOncePod volumes.
 | 
				
			||||||
	SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
 | 
						SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// owner: @vinaykul
 | 
					 | 
				
			||||||
	// kep: http://kep.k8s.io/1287
 | 
					 | 
				
			||||||
	//
 | 
					 | 
				
			||||||
	// Enables In-Place Pod Vertical Scaling
 | 
					 | 
				
			||||||
	InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// owner: @Sh4d1,@RyanAoh,@rikatz
 | 
						// owner: @Sh4d1,@RyanAoh,@rikatz
 | 
				
			||||||
	// kep: http://kep.k8s.io/1860
 | 
						// kep: http://kep.k8s.io/1860
 | 
				
			||||||
	// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
 | 
						// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -384,6 +384,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
 | 
				
			|||||||
		{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
 | 
							{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
 | 
				
			||||||
	},
 | 
						},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						InPlacePodVerticalScalingAllocatedStatus: {
 | 
				
			||||||
 | 
							{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	InTreePluginPortworxUnregister: {
 | 
						InTreePluginPortworxUnregister: {
 | 
				
			||||||
		{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
 | 
							{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
 | 
				
			||||||
	},
 | 
						},
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -117,18 +117,17 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
 | 
					// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
 | 
				
			||||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
 | 
					func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
 | 
				
			||||||
	inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.InPlacePodVerticalScaling)
 | 
						reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
 | 
				
			||||||
	// sum requests and limits.
 | 
							// pod is already configured to the allocated resources, and we explicitly don't want to use
 | 
				
			||||||
	reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
 | 
							// the actual resources if we're instantiating a resize.
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled,
 | 
							UseStatusResources: false,
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	// track if limits were applied for each resource.
 | 
						// track if limits were applied for each resource.
 | 
				
			||||||
	memoryLimitsDeclared := true
 | 
						memoryLimitsDeclared := true
 | 
				
			||||||
	cpuLimitsDeclared := true
 | 
						cpuLimitsDeclared := true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	limits := resource.PodLimits(pod, resource.PodResourcesOptions{
 | 
						limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled,
 | 
					 | 
				
			||||||
		ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
 | 
							ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
 | 
				
			||||||
			if res.Cpu().IsZero() {
 | 
								if res.Cpu().IsZero() {
 | 
				
			||||||
				cpuLimitsDeclared = false
 | 
									cpuLimitsDeclared = false
 | 
				
			||||||
@@ -164,7 +163,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// determine the qos class
 | 
						// determine the qos class
 | 
				
			||||||
	qosClass := v1qos.GetPodQOS(pod)
 | 
						qosClass := v1qos.GetPodQOS(allocatedPod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// build the result
 | 
						// build the result
 | 
				
			||||||
	result := &ResourceConfig{}
 | 
						result := &ResourceConfig{}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -114,6 +114,7 @@ import (
 | 
				
			|||||||
	kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
 | 
						kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/userns"
 | 
						"k8s.io/kubernetes/pkg/kubelet/userns"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/util"
 | 
						"k8s.io/kubernetes/pkg/kubelet/util"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/kubelet/util/format"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/util/manager"
 | 
						"k8s.io/kubernetes/pkg/kubelet/util/manager"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/util/queue"
 | 
						"k8s.io/kubernetes/pkg/kubelet/util/queue"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
 | 
						"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
 | 
				
			||||||
@@ -2795,6 +2796,14 @@ func (kl *Kubelet) canResizePod(pod *v1.Pod) (bool, v1.PodResizeStatus) {
 | 
				
			|||||||
func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod) (*v1.Pod, error) {
 | 
					func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod) (*v1.Pod, error) {
 | 
				
			||||||
	allocatedPod, updated := kl.statusManager.UpdatePodFromAllocation(pod)
 | 
						allocatedPod, updated := kl.statusManager.UpdatePodFromAllocation(pod)
 | 
				
			||||||
	if !updated {
 | 
						if !updated {
 | 
				
			||||||
 | 
							// Unless a resize is in-progress, clear the resize status.
 | 
				
			||||||
 | 
							resizeStatus, _ := kl.statusManager.GetPodResizeStatus(string(pod.UID))
 | 
				
			||||||
 | 
							if resizeStatus != v1.PodResizeStatusInProgress {
 | 
				
			||||||
 | 
								if err := kl.statusManager.SetPodResizeStatus(pod.UID, ""); err != nil {
 | 
				
			||||||
 | 
									klog.ErrorS(err, "Failed to clear resize status", "pod", format.Pod(pod))
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Pod is not resizing, nothing more to do here.
 | 
							// Pod is not resizing, nothing more to do here.
 | 
				
			||||||
		return allocatedPod, nil
 | 
							return allocatedPod, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -33,7 +33,6 @@ import (
 | 
				
			|||||||
	"strconv"
 | 
						"strconv"
 | 
				
			||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/google/go-cmp/cmp"
 | 
					 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/errors"
 | 
						"k8s.io/apimachinery/pkg/api/errors"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
@@ -61,6 +60,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/kubelet/metrics"
 | 
						"k8s.io/kubernetes/pkg/kubelet/metrics"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/status"
 | 
						"k8s.io/kubernetes/pkg/kubelet/status"
 | 
				
			||||||
	kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
 | 
						kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/kubelet/util/format"
 | 
				
			||||||
	utilfs "k8s.io/kubernetes/pkg/util/filesystem"
 | 
						utilfs "k8s.io/kubernetes/pkg/util/filesystem"
 | 
				
			||||||
	utilkernel "k8s.io/kubernetes/pkg/util/kernel"
 | 
						utilkernel "k8s.io/kubernetes/pkg/util/kernel"
 | 
				
			||||||
	utilpod "k8s.io/kubernetes/pkg/util/pod"
 | 
						utilpod "k8s.io/kubernetes/pkg/util/pod"
 | 
				
			||||||
@@ -1743,46 +1743,79 @@ func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.Pod
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func deleteCustomResourceFromResourceRequirements(target *v1.ResourceRequirements) {
 | 
					func (kl *Kubelet) determinePodResizeStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus, podIsTerminal bool) v1.PodResizeStatus {
 | 
				
			||||||
	for resource := range target.Limits {
 | 
						if kubetypes.IsStaticPod(allocatedPod) {
 | 
				
			||||||
		if resource != v1.ResourceCPU && resource != v1.ResourceMemory && resource != v1.ResourceEphemeralStorage {
 | 
							return ""
 | 
				
			||||||
			delete(target.Limits, resource)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for resource := range target.Requests {
 | 
					
 | 
				
			||||||
		if resource != v1.ResourceCPU && resource != v1.ResourceMemory && resource != v1.ResourceEphemeralStorage {
 | 
						// If pod is terminal, clear the resize status.
 | 
				
			||||||
			delete(target.Requests, resource)
 | 
						if podIsTerminal {
 | 
				
			||||||
 | 
							if err := kl.statusManager.SetPodResizeStatus(allocatedPod.UID, ""); err != nil {
 | 
				
			||||||
 | 
								klog.ErrorS(err, "SetPodResizeStatus failed for terminal pod", "pod", format.Pod(allocatedPod))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							return ""
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						resizeStatus, _ := kl.statusManager.GetPodResizeStatus(string(allocatedPod.UID))
 | 
				
			||||||
 | 
						// If the resize was in-progress and the actual resources match the allocated resources, mark
 | 
				
			||||||
 | 
						// the resize as complete by clearing the resize status.
 | 
				
			||||||
 | 
						if resizeStatus == v1.PodResizeStatusInProgress &&
 | 
				
			||||||
 | 
							allocatedResourcesMatchStatus(allocatedPod, podStatus) {
 | 
				
			||||||
 | 
							if err := kl.statusManager.SetPodResizeStatus(allocatedPod.UID, ""); err != nil {
 | 
				
			||||||
 | 
								klog.ErrorS(err, "SetPodResizeStatus failed", "pod", format.Pod(allocatedPod))
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return ""
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return resizeStatus
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (kl *Kubelet) determinePodResizeStatus(pod *v1.Pod, podStatus *v1.PodStatus) v1.PodResizeStatus {
 | 
					// allocatedResourcesMatchStatus tests whether the resizeable resources in the pod spec match the
 | 
				
			||||||
	var podResizeStatus v1.PodResizeStatus
 | 
					// resources reported in the status.
 | 
				
			||||||
	specStatusDiffer := false
 | 
					func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
 | 
				
			||||||
	for _, c := range pod.Spec.Containers {
 | 
						for _, c := range allocatedPod.Spec.Containers {
 | 
				
			||||||
		if cs, ok := podutil.GetContainerStatus(podStatus.ContainerStatuses, c.Name); ok {
 | 
							if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil {
 | 
				
			||||||
			cResourceCopy := c.Resources.DeepCopy()
 | 
								if cs.State != kubecontainer.ContainerStateRunning {
 | 
				
			||||||
			// for both requests and limits, we only compare the cpu, memory and ephemeralstorage
 | 
									// If the container isn't running, it isn't resizing.
 | 
				
			||||||
			// which are included in convertToAPIContainerStatuses
 | 
									continue
 | 
				
			||||||
			deleteCustomResourceFromResourceRequirements(cResourceCopy)
 | 
								}
 | 
				
			||||||
			csResourceCopy := cs.Resources.DeepCopy()
 | 
					
 | 
				
			||||||
			if csResourceCopy != nil && !cmp.Equal(*cResourceCopy, *csResourceCopy) {
 | 
								cpuReq, hasCPUReq := c.Resources.Requests[v1.ResourceCPU]
 | 
				
			||||||
				specStatusDiffer = true
 | 
								cpuLim, hasCPULim := c.Resources.Limits[v1.ResourceCPU]
 | 
				
			||||||
				break
 | 
								memLim, hasMemLim := c.Resources.Limits[v1.ResourceMemory]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if cs.Resources == nil {
 | 
				
			||||||
 | 
									if hasCPUReq || hasCPULim || hasMemLim {
 | 
				
			||||||
 | 
										// Container status is missing Resources information, but the container does
 | 
				
			||||||
 | 
										// have resizable resources configured.
 | 
				
			||||||
 | 
										klog.ErrorS(nil, "Missing runtime resources information for resizing container",
 | 
				
			||||||
 | 
											"pod", format.Pod(allocatedPod), "container", c.Name)
 | 
				
			||||||
 | 
										return false // We don't want to clear resize status with insufficient information.
 | 
				
			||||||
 | 
									} else {
 | 
				
			||||||
 | 
										// No resizable resources configured; this might be ok.
 | 
				
			||||||
 | 
										continue
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								// Only compare resizeable resources, and only compare resources that are explicitly configured.
 | 
				
			||||||
 | 
								if hasCPUReq {
 | 
				
			||||||
 | 
									if !cpuReq.Equal(*cs.Resources.CPURequest) {
 | 
				
			||||||
 | 
										return false
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								if hasCPULim {
 | 
				
			||||||
 | 
									if !cpuLim.Equal(*cs.Resources.CPULimit) {
 | 
				
			||||||
 | 
										return false
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								if hasMemLim {
 | 
				
			||||||
 | 
									if !memLim.Equal(*cs.Resources.MemoryLimit) {
 | 
				
			||||||
 | 
										return false
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if !specStatusDiffer {
 | 
					
 | 
				
			||||||
		// Clear last resize state from checkpoint
 | 
						return true
 | 
				
			||||||
		if err := kl.statusManager.SetPodResizeStatus(pod.UID, ""); err != nil {
 | 
					 | 
				
			||||||
			klog.ErrorS(err, "SetPodResizeStatus failed", "pod", pod.Name)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		if resizeStatus, found := kl.statusManager.GetPodResizeStatus(string(pod.UID)); found {
 | 
					 | 
				
			||||||
			podResizeStatus = resizeStatus
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return podResizeStatus
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// generateAPIPodStatus creates the final API pod status for a pod, given the
 | 
					// generateAPIPodStatus creates the final API pod status for a pod, given the
 | 
				
			||||||
@@ -1796,7 +1829,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
 | 
						s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
 | 
				
			||||||
	if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
 | 
						if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
 | 
				
			||||||
		s.Resize = kl.determinePodResizeStatus(pod, s)
 | 
							s.Resize = kl.determinePodResizeStatus(pod, podStatus, podIsTerminal)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// calculate the next phase and preserve reason
 | 
						// calculate the next phase and preserve reason
 | 
				
			||||||
	allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
 | 
						allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
 | 
				
			||||||
@@ -2076,103 +2109,63 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	convertContainerStatusResources := func(cName string, status *v1.ContainerStatus, cStatus *kubecontainer.Status, oldStatuses map[string]v1.ContainerStatus) *v1.ResourceRequirements {
 | 
						convertContainerStatusResources := func(cName string, status *v1.ContainerStatus, cStatus *kubecontainer.Status, oldStatuses map[string]v1.ContainerStatus) *v1.ResourceRequirements {
 | 
				
			||||||
		var requests, limits v1.ResourceList
 | 
					 | 
				
			||||||
		// oldStatus should always exist if container is running
 | 
							// oldStatus should always exist if container is running
 | 
				
			||||||
		oldStatus, oldStatusFound := oldStatuses[cName]
 | 
							oldStatus, oldStatusFound := oldStatuses[cName]
 | 
				
			||||||
		// Initialize limits/requests from container's spec upon transition to Running state
 | 
					
 | 
				
			||||||
		// For cpu & memory, values queried from runtime via CRI always supercedes spec values
 | 
							// If the new status is missing resources, then if the container is running and previous
 | 
				
			||||||
		// For ephemeral-storage, a running container's status.limit/request equals spec.limit/request
 | 
							// status was also running, preserve the resources previously reported.
 | 
				
			||||||
		determineResource := func(rName v1.ResourceName, v1ContainerResource, oldStatusResource, resource v1.ResourceList) {
 | 
							preserveOldResourcesValue := func(rName v1.ResourceName, oldStatusResource, resource v1.ResourceList) {
 | 
				
			||||||
			if oldStatusFound {
 | 
								if cStatus.State == kubecontainer.ContainerStateRunning &&
 | 
				
			||||||
				if oldStatus.State.Running == nil || status.ContainerID != oldStatus.ContainerID {
 | 
									oldStatusFound && oldStatus.State.Running != nil &&
 | 
				
			||||||
					if r, exists := v1ContainerResource[rName]; exists {
 | 
									status.ContainerID == oldStatus.ContainerID &&
 | 
				
			||||||
						resource[rName] = r.DeepCopy()
 | 
									oldStatusResource != nil {
 | 
				
			||||||
					}
 | 
									if r, exists := oldStatusResource[rName]; exists {
 | 
				
			||||||
				} else {
 | 
										resource[rName] = r.DeepCopy()
 | 
				
			||||||
					if oldStatusResource != nil {
 | 
					 | 
				
			||||||
						if r, exists := oldStatusResource[rName]; exists {
 | 
					 | 
				
			||||||
							resource[rName] = r.DeepCopy()
 | 
					 | 
				
			||||||
						}
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		container := kubecontainer.GetContainerSpec(pod, cName)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Always set the status to the latest allocated resources, even if it differs from the
 | 
							// Always set the status to the latest allocated resources, even if it differs from the
 | 
				
			||||||
		// allocation used by the current sync loop.
 | 
							// allocation used by the current sync loop.
 | 
				
			||||||
		alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
 | 
							alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
 | 
				
			||||||
		if found {
 | 
							if !found {
 | 
				
			||||||
			status.AllocatedResources = alloc.Requests
 | 
								// This case is expected for non-resizable containers (ephemeral & non-restartable init containers).
 | 
				
			||||||
		} else if !(container.Resources.Requests == nil && container.Resources.Limits == nil) {
 | 
								// Don't set status.Resources in this case.
 | 
				
			||||||
			// This case is expected for ephemeral containers.
 | 
								return nil
 | 
				
			||||||
			if oldStatusFound {
 | 
							}
 | 
				
			||||||
				status.AllocatedResources = oldStatus.AllocatedResources
 | 
							if cStatus.State != kubecontainer.ContainerStateRunning {
 | 
				
			||||||
			}
 | 
								// If the container isn't running, just use the allocated resources.
 | 
				
			||||||
 | 
								return &alloc
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if oldStatus.Resources == nil {
 | 
							if oldStatus.Resources == nil {
 | 
				
			||||||
			oldStatus.Resources = &v1.ResourceRequirements{}
 | 
								oldStatus.Resources = &v1.ResourceRequirements{}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		convertCustomResources := func(inResources, outResources v1.ResourceList) {
 | 
							// Status resources default to the allocated resources.
 | 
				
			||||||
			for resourceName, resourceQuantity := range inResources {
 | 
							// For non-running containers this will be the reported values.
 | 
				
			||||||
				if resourceName == v1.ResourceCPU || resourceName == v1.ResourceMemory ||
 | 
							// For non-resizable resources, these values will also be used.
 | 
				
			||||||
					resourceName == v1.ResourceStorage || resourceName == v1.ResourceEphemeralStorage {
 | 
							resources := alloc
 | 
				
			||||||
					continue
 | 
							if resources.Limits != nil {
 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
				outResources[resourceName] = resourceQuantity.DeepCopy()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		// Convert Limits
 | 
					 | 
				
			||||||
		if alloc.Limits != nil {
 | 
					 | 
				
			||||||
			limits = make(v1.ResourceList)
 | 
					 | 
				
			||||||
			if cStatus.Resources != nil && cStatus.Resources.CPULimit != nil {
 | 
								if cStatus.Resources != nil && cStatus.Resources.CPULimit != nil {
 | 
				
			||||||
				limits[v1.ResourceCPU] = cStatus.Resources.CPULimit.DeepCopy()
 | 
									resources.Limits[v1.ResourceCPU] = cStatus.Resources.CPULimit.DeepCopy()
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				determineResource(v1.ResourceCPU, alloc.Limits, oldStatus.Resources.Limits, limits)
 | 
									preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Limits, resources.Limits)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if cStatus.Resources != nil && cStatus.Resources.MemoryLimit != nil {
 | 
								if cStatus.Resources != nil && cStatus.Resources.MemoryLimit != nil {
 | 
				
			||||||
				limits[v1.ResourceMemory] = cStatus.Resources.MemoryLimit.DeepCopy()
 | 
									resources.Limits[v1.ResourceMemory] = cStatus.Resources.MemoryLimit.DeepCopy()
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				determineResource(v1.ResourceMemory, alloc.Limits, oldStatus.Resources.Limits, limits)
 | 
									preserveOldResourcesValue(v1.ResourceMemory, oldStatus.Resources.Limits, resources.Limits)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if ephemeralStorage, found := alloc.Limits[v1.ResourceEphemeralStorage]; found {
 | 
					 | 
				
			||||||
				limits[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			if storage, found := alloc.Limits[v1.ResourceStorage]; found {
 | 
					 | 
				
			||||||
				limits[v1.ResourceStorage] = storage.DeepCopy()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			convertCustomResources(alloc.Limits, limits)
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// Convert Requests
 | 
							if resources.Requests != nil {
 | 
				
			||||||
		if alloc.Requests != nil {
 | 
					 | 
				
			||||||
			requests = make(v1.ResourceList)
 | 
					 | 
				
			||||||
			if cStatus.Resources != nil && cStatus.Resources.CPURequest != nil {
 | 
								if cStatus.Resources != nil && cStatus.Resources.CPURequest != nil {
 | 
				
			||||||
				requests[v1.ResourceCPU] = cStatus.Resources.CPURequest.DeepCopy()
 | 
									resources.Requests[v1.ResourceCPU] = cStatus.Resources.CPURequest.DeepCopy()
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				determineResource(v1.ResourceCPU, alloc.Requests, oldStatus.Resources.Requests, requests)
 | 
									preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Requests, resources.Requests)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if memory, found := alloc.Requests[v1.ResourceMemory]; found {
 | 
					 | 
				
			||||||
				requests[v1.ResourceMemory] = memory.DeepCopy()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			if ephemeralStorage, found := alloc.Requests[v1.ResourceEphemeralStorage]; found {
 | 
					 | 
				
			||||||
				requests[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			if storage, found := alloc.Requests[v1.ResourceStorage]; found {
 | 
					 | 
				
			||||||
				requests[v1.ResourceStorage] = storage.DeepCopy()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			convertCustomResources(alloc.Requests, requests)
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		resources := &v1.ResourceRequirements{
 | 
							return &resources
 | 
				
			||||||
			Limits:   limits,
 | 
					 | 
				
			||||||
			Requests: requests,
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return resources
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	convertContainerStatusUser := func(cStatus *kubecontainer.Status) *v1.ContainerUser {
 | 
						convertContainerStatusUser := func(cStatus *kubecontainer.Status) *v1.ContainerUser {
 | 
				
			||||||
@@ -2341,10 +2334,15 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		status := convertContainerStatus(cStatus, oldStatusPtr)
 | 
							status := convertContainerStatus(cStatus, oldStatusPtr)
 | 
				
			||||||
		if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
 | 
							if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
 | 
				
			||||||
			if status.State.Running != nil {
 | 
								status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
 | 
				
			||||||
				status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
 | 
					
 | 
				
			||||||
 | 
								if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
 | 
				
			||||||
 | 
									if alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName); found {
 | 
				
			||||||
 | 
										status.AllocatedResources = alloc.Requests
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
 | 
							if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
 | 
				
			||||||
			status.User = convertContainerStatusUser(cStatus)
 | 
								status.User = convertContainerStatusUser(cStatus)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -4543,6 +4543,7 @@ func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
 | 
					func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
 | 
				
			||||||
	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
 | 
						featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nowTime := time.Now()
 | 
						nowTime := time.Now()
 | 
				
			||||||
	testContainerName := "ctr0"
 | 
						testContainerName := "ctr0"
 | 
				
			||||||
	testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
 | 
						testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
 | 
				
			||||||
@@ -4566,26 +4567,39 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
 | 
				
			|||||||
			ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
 | 
								ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	testKubeContainerStatus := kubecontainer.Status{
 | 
					
 | 
				
			||||||
		Name:      testContainerName,
 | 
						testPodStatus := func(state kubecontainer.State, resources *kubecontainer.ContainerResources) *kubecontainer.PodStatus {
 | 
				
			||||||
		ID:        testContainerID,
 | 
							cStatus := kubecontainer.Status{
 | 
				
			||||||
		Image:     "img",
 | 
								Name:      testContainerName,
 | 
				
			||||||
		ImageID:   "1234",
 | 
								ID:        testContainerID,
 | 
				
			||||||
		ImageRef:  "img1234",
 | 
								Image:     "img",
 | 
				
			||||||
		State:     kubecontainer.ContainerStateRunning,
 | 
								ImageID:   "1234",
 | 
				
			||||||
		StartedAt: nowTime,
 | 
								ImageRef:  "img1234",
 | 
				
			||||||
	}
 | 
								State:     state,
 | 
				
			||||||
	testPodStatus := &kubecontainer.PodStatus{
 | 
								Resources: resources,
 | 
				
			||||||
		ID:                testPod.UID,
 | 
							}
 | 
				
			||||||
		Name:              testPod.Name,
 | 
							switch state {
 | 
				
			||||||
		Namespace:         testPod.Namespace,
 | 
							case kubecontainer.ContainerStateRunning:
 | 
				
			||||||
		ContainerStatuses: []*kubecontainer.Status{&testKubeContainerStatus},
 | 
								cStatus.StartedAt = nowTime
 | 
				
			||||||
 | 
							case kubecontainer.ContainerStateExited:
 | 
				
			||||||
 | 
								cStatus.StartedAt = nowTime
 | 
				
			||||||
 | 
								cStatus.FinishedAt = nowTime
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return &kubecontainer.PodStatus{
 | 
				
			||||||
 | 
								ID:                testPod.UID,
 | 
				
			||||||
 | 
								Name:              testPod.Name,
 | 
				
			||||||
 | 
								Namespace:         testPod.Namespace,
 | 
				
			||||||
 | 
								ContainerStatuses: []*kubecontainer.Status{&cStatus},
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
 | 
						CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
 | 
				
			||||||
	CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")}
 | 
						CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")}
 | 
				
			||||||
	CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
 | 
						CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
 | 
				
			||||||
	CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
 | 
						CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
 | 
				
			||||||
	CPU1AndMem1GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
 | 
						CPU1AndMem1GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
 | 
				
			||||||
 | 
						CPU1AndMem2GAndStorage2G := CPU1AndMem1GAndStorage2G.DeepCopy()
 | 
				
			||||||
 | 
						CPU1AndMem2GAndStorage2G[v1.ResourceMemory] = resource.MustParse("2Gi")
 | 
				
			||||||
	CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy()
 | 
						CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy()
 | 
				
			||||||
	CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
 | 
						CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
 | 
				
			||||||
	CPU2AndMem2GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
 | 
						CPU2AndMem2GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
 | 
				
			||||||
@@ -4611,258 +4625,314 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	idx := 0
 | 
						idx := 0
 | 
				
			||||||
	for tdesc, tc := range map[string]struct {
 | 
						for tdesc, tc := range map[string]struct {
 | 
				
			||||||
		Resources []v1.ResourceRequirements
 | 
							State              kubecontainer.State // Defaults to Running
 | 
				
			||||||
		OldStatus []v1.ContainerStatus
 | 
							Resources          v1.ResourceRequirements
 | 
				
			||||||
		Expected  []v1.ContainerStatus
 | 
							AllocatedResources *v1.ResourceRequirements // Defaults to Resources
 | 
				
			||||||
 | 
							OldStatus          v1.ContainerStatus
 | 
				
			||||||
 | 
							Expected           v1.ContainerStatus
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"GuaranteedQoSPod with CPU and memory CRI status": {
 | 
							"GuaranteedQoSPod with CPU and memory CRI status": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: CPU1AndMem1G,
 | 
				
			||||||
					AllocatedResources: CPU1AndMem1G,
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BurstableQoSPod with CPU and memory CRI status": {
 | 
							"BurstableQoSPod with CPU and memory CRI status": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: CPU1AndMem1G,
 | 
				
			||||||
					AllocatedResources: CPU1AndMem1G,
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"GuaranteedQoSPod with CPU and memory CRI status, with ephemeral storage": {
 | 
							"GuaranteedQoSPod with CPU and memory CRI status, with ephemeral storage": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
				
			||||||
					AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage": {
 | 
							"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
				
			||||||
					AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage, nil resources in OldStatus": {
 | 
							"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage, nil resources in OldStatus": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:    testContainerName,
 | 
				
			||||||
					Name:    testContainerName,
 | 
									Image:   "img",
 | 
				
			||||||
					Image:   "img",
 | 
									ImageID: "img1234",
 | 
				
			||||||
					ImageID: "img1234",
 | 
									State:   v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:   v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
				
			||||||
					AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BestEffortQoSPod": {
 | 
							"BestEffortQoSPod": {
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:        testContainerName,
 | 
				
			||||||
					Name:        testContainerName,
 | 
									ContainerID: testContainerID.String(),
 | 
				
			||||||
					ContainerID: testContainerID.String(),
 | 
									Image:       "img",
 | 
				
			||||||
					Image:       "img",
 | 
									ImageID:     "img1234",
 | 
				
			||||||
					ImageID:     "img1234",
 | 
									State:       v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:       v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									Resources:   &v1.ResourceRequirements{},
 | 
				
			||||||
					Resources:   &v1.ResourceRequirements{},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BestEffort QoSPod with extended resources": {
 | 
							"BestEffort QoSPod with extended resources": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(v1.ResourceList{})}},
 | 
								Resources: v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: addExtendedResource(v1.ResourceList{}),
 | 
				
			||||||
					AllocatedResources: addExtendedResource(v1.ResourceList{}),
 | 
									Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BurstableQoSPod with extended resources": {
 | 
							"BurstableQoSPod with extended resources": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G)}},
 | 
								Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: addExtendedResource(CPU1AndMem1G),
 | 
				
			||||||
					AllocatedResources: addExtendedResource(CPU1AndMem1G),
 | 
									Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"BurstableQoSPod with storage, ephemeral storage and extended resources": {
 | 
							"BurstableQoSPod with storage, ephemeral storage and extended resources": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)}},
 | 
								Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: addExtendedResource(CPU1AndMem1GAndStorage2G),
 | 
				
			||||||
					AllocatedResources: addExtendedResource(CPU1AndMem1GAndStorage2G),
 | 
									Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"GuaranteedQoSPod with extended resources": {
 | 
							"GuaranteedQoSPod with extended resources": {
 | 
				
			||||||
			Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)}},
 | 
								Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
 | 
				
			||||||
			OldStatus: []v1.ContainerStatus{
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:      testContainerName,
 | 
				
			||||||
					Name:      testContainerName,
 | 
									Image:     "img",
 | 
				
			||||||
					Image:     "img",
 | 
									ImageID:   "img1234",
 | 
				
			||||||
					ImageID:   "img1234",
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
									Resources: &v1.ResourceRequirements{},
 | 
				
			||||||
					Resources: &v1.ResourceRequirements{},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			Expected: []v1.ContainerStatus{
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									Name:               testContainerName,
 | 
				
			||||||
					Name:               testContainerName,
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
					ContainerID:        testContainerID.String(),
 | 
									Image:              "img",
 | 
				
			||||||
					Image:              "img",
 | 
									ImageID:            "img1234",
 | 
				
			||||||
					ImageID:            "img1234",
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
									AllocatedResources: addExtendedResource(CPU1AndMem1G),
 | 
				
			||||||
					AllocatedResources: addExtendedResource(CPU1AndMem1G),
 | 
									Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
 | 
				
			||||||
					Resources:          &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
 | 
								},
 | 
				
			||||||
				},
 | 
							},
 | 
				
			||||||
 | 
							"newly created Pod": {
 | 
				
			||||||
 | 
								State:     kubecontainer.ContainerStateCreated,
 | 
				
			||||||
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								OldStatus: v1.ContainerStatus{},
 | 
				
			||||||
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:               testContainerName,
 | 
				
			||||||
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
 | 
									Image:              "img",
 | 
				
			||||||
 | 
									ImageID:            "img1234",
 | 
				
			||||||
 | 
									State:              v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}},
 | 
				
			||||||
 | 
									AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
				
			||||||
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							"newly running Pod": {
 | 
				
			||||||
 | 
								Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:    testContainerName,
 | 
				
			||||||
 | 
									Image:   "img",
 | 
				
			||||||
 | 
									ImageID: "img1234",
 | 
				
			||||||
 | 
									State:   v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:               testContainerName,
 | 
				
			||||||
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
 | 
									Image:              "img",
 | 
				
			||||||
 | 
									ImageID:            "img1234",
 | 
				
			||||||
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
 | 
									AllocatedResources: CPU1AndMem1GAndStorage2G,
 | 
				
			||||||
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							"newly terminated Pod": {
 | 
				
			||||||
 | 
								State: kubecontainer.ContainerStateExited,
 | 
				
			||||||
 | 
								// Actual resources were different, but they should be ignored once the container is terminated.
 | 
				
			||||||
 | 
								Resources:          v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								AllocatedResources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
 | 
				
			||||||
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:      testContainerName,
 | 
				
			||||||
 | 
									Image:     "img",
 | 
				
			||||||
 | 
									ImageID:   "img1234",
 | 
				
			||||||
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
 | 
									Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:        testContainerName,
 | 
				
			||||||
 | 
									ContainerID: testContainerID.String(),
 | 
				
			||||||
 | 
									Image:       "img",
 | 
				
			||||||
 | 
									ImageID:     "img1234",
 | 
				
			||||||
 | 
									State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{
 | 
				
			||||||
 | 
										ContainerID: testContainerID.String(),
 | 
				
			||||||
 | 
										StartedAt:   metav1.NewTime(nowTime),
 | 
				
			||||||
 | 
										FinishedAt:  metav1.NewTime(nowTime),
 | 
				
			||||||
 | 
									}},
 | 
				
			||||||
 | 
									AllocatedResources: CPU2AndMem2GAndStorage2G,
 | 
				
			||||||
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							"resizing Pod": {
 | 
				
			||||||
 | 
								Resources:          v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								AllocatedResources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
 | 
				
			||||||
 | 
								OldStatus: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:      testContainerName,
 | 
				
			||||||
 | 
									Image:     "img",
 | 
				
			||||||
 | 
									ImageID:   "img1234",
 | 
				
			||||||
 | 
									State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
 | 
				
			||||||
 | 
									Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Expected: v1.ContainerStatus{
 | 
				
			||||||
 | 
									Name:               testContainerName,
 | 
				
			||||||
 | 
									ContainerID:        testContainerID.String(),
 | 
				
			||||||
 | 
									Image:              "img",
 | 
				
			||||||
 | 
									ImageID:            "img1234",
 | 
				
			||||||
 | 
									State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
 | 
				
			||||||
 | 
									AllocatedResources: CPU2AndMem2GAndStorage2G,
 | 
				
			||||||
 | 
									Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem2GAndStorage2G},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	} {
 | 
						} {
 | 
				
			||||||
		tPod := testPod.DeepCopy()
 | 
							t.Run(tdesc, func(t *testing.T) {
 | 
				
			||||||
		tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
 | 
								tPod := testPod.DeepCopy()
 | 
				
			||||||
		for i := range tPod.Spec.Containers {
 | 
								tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
 | 
				
			||||||
			if tc.Resources != nil {
 | 
					
 | 
				
			||||||
				tPod.Spec.Containers[i].Resources = tc.Resources[i]
 | 
								if tc.AllocatedResources != nil {
 | 
				
			||||||
 | 
									tPod.Spec.Containers[0].Resources = *tc.AllocatedResources
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									tPod.Spec.Containers[0].Resources = tc.Resources
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			kubelet.statusManager.SetPodAllocation(tPod)
 | 
								kubelet.statusManager.SetPodAllocation(tPod)
 | 
				
			||||||
			if tc.Resources != nil {
 | 
								resources := &kubecontainer.ContainerResources{
 | 
				
			||||||
				tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
 | 
									MemoryLimit: tc.Resources.Limits.Memory(),
 | 
				
			||||||
				testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
 | 
									CPULimit:    tc.Resources.Limits.Cpu(),
 | 
				
			||||||
					MemoryLimit: tc.Resources[i].Limits.Memory(),
 | 
									CPURequest:  tc.Resources.Requests.Cpu(),
 | 
				
			||||||
					CPULimit:    tc.Resources[i].Limits.Cpu(),
 | 
					 | 
				
			||||||
					CPURequest:  tc.Resources[i].Requests.Cpu(),
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
								state := kubecontainer.ContainerStateRunning
 | 
				
			||||||
 | 
								if tc.State != "" {
 | 
				
			||||||
 | 
									state = tc.State
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								podStatus := testPodStatus(state, resources)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		t.Logf("TestCase: %q", tdesc)
 | 
								for _, enableAllocatedStatus := range []bool{true, false} {
 | 
				
			||||||
		cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
 | 
									t.Run(fmt.Sprintf("AllocatedStatus=%t", enableAllocatedStatus), func(t *testing.T) {
 | 
				
			||||||
		assert.Equal(t, tc.Expected, cStatuses)
 | 
										featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, enableAllocatedStatus)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
										expected := tc.Expected
 | 
				
			||||||
 | 
										if !enableAllocatedStatus {
 | 
				
			||||||
 | 
											expected = *expected.DeepCopy()
 | 
				
			||||||
 | 
											expected.AllocatedResources = nil
 | 
				
			||||||
 | 
										}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
										cStatuses := kubelet.convertToAPIContainerStatuses(tPod, podStatus, []v1.ContainerStatus{tc.OldStatus}, tPod.Spec.Containers, false, false)
 | 
				
			||||||
 | 
										assert.Equal(t, expected, cStatuses[0])
 | 
				
			||||||
 | 
									})
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -6422,3 +6492,155 @@ func TestResolveRecursiveReadOnly(t *testing.T) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestAllocatedResourcesMatchStatus(t *testing.T) {
 | 
				
			||||||
 | 
						tests := []struct {
 | 
				
			||||||
 | 
							name               string
 | 
				
			||||||
 | 
							allocatedResources v1.ResourceRequirements
 | 
				
			||||||
 | 
							statusResources    *kubecontainer.ContainerResources
 | 
				
			||||||
 | 
							statusTerminated   bool
 | 
				
			||||||
 | 
							expectMatch        bool
 | 
				
			||||||
 | 
						}{{
 | 
				
			||||||
 | 
							name: "guaranteed pod: match",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
								Requests: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Limits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest:  resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								CPULimit:    resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								MemoryLimit: resource.NewScaledQuantity(100, 6),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							expectMatch: true,
 | 
				
			||||||
 | 
						}, {
 | 
				
			||||||
 | 
							name: "guaranteed pod: cpu request mismatch",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
								Requests: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Limits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest:  resource.NewMilliQuantity(50, resource.DecimalSI),
 | 
				
			||||||
 | 
								CPULimit:    resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								MemoryLimit: resource.NewScaledQuantity(100, 6),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							expectMatch: false,
 | 
				
			||||||
 | 
						}, {
 | 
				
			||||||
 | 
							name: "guaranteed pod: cpu limit mismatch",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
								Requests: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Limits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest:  resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								CPULimit:    resource.NewMilliQuantity(50, resource.DecimalSI),
 | 
				
			||||||
 | 
								MemoryLimit: resource.NewScaledQuantity(100, 6),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							expectMatch: false,
 | 
				
			||||||
 | 
						}, {
 | 
				
			||||||
 | 
							name: "guaranteed pod: memory limit mismatch",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
								Requests: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Limits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest:  resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								CPULimit:    resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								MemoryLimit: resource.NewScaledQuantity(50, 6),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							expectMatch: false,
 | 
				
			||||||
 | 
						}, {
 | 
				
			||||||
 | 
							name: "guaranteed pod: terminated mismatch",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
								Requests: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								Limits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceCPU:    resource.MustParse("100m"),
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest:  resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								CPULimit:    resource.NewMilliQuantity(100, resource.DecimalSI),
 | 
				
			||||||
 | 
								MemoryLimit: resource.NewScaledQuantity(50, 6),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusTerminated: true,
 | 
				
			||||||
 | 
							expectMatch:      true,
 | 
				
			||||||
 | 
						}, {
 | 
				
			||||||
 | 
							name: "burstable: no cpu request",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
								Requests: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("100M"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							expectMatch: true,
 | 
				
			||||||
 | 
						}, {
 | 
				
			||||||
 | 
							name:               "best effort",
 | 
				
			||||||
 | 
							allocatedResources: v1.ResourceRequirements{},
 | 
				
			||||||
 | 
							statusResources: &kubecontainer.ContainerResources{
 | 
				
			||||||
 | 
								CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							expectMatch: true,
 | 
				
			||||||
 | 
						}}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, test := range tests {
 | 
				
			||||||
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
 | 
								allocatedPod := v1.Pod{
 | 
				
			||||||
 | 
									ObjectMeta: metav1.ObjectMeta{
 | 
				
			||||||
 | 
										Name: "test",
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Spec: v1.PodSpec{
 | 
				
			||||||
 | 
										Containers: []v1.Container{{
 | 
				
			||||||
 | 
											Name:      "c",
 | 
				
			||||||
 | 
											Resources: test.allocatedResources,
 | 
				
			||||||
 | 
										}},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								state := kubecontainer.ContainerStateRunning
 | 
				
			||||||
 | 
								if test.statusTerminated {
 | 
				
			||||||
 | 
									state = kubecontainer.ContainerStateExited
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								podStatus := &kubecontainer.PodStatus{
 | 
				
			||||||
 | 
									Name: "test",
 | 
				
			||||||
 | 
									ContainerStatuses: []*kubecontainer.Status{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											Name:      "c",
 | 
				
			||||||
 | 
											State:     state,
 | 
				
			||||||
 | 
											Resources: test.statusResources,
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								match := allocatedResourcesMatchStatus(&allocatedPod, podStatus)
 | 
				
			||||||
 | 
								assert.Equal(t, test.expectMatch, match)
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -365,7 +365,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	opts := resourcehelper.PodResourcesOptions{
 | 
						opts := resourcehelper.PodResourcesOptions{
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
							UseStatusResources: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	requests := resourcehelper.PodRequests(pod, opts)
 | 
						requests := resourcehelper.PodRequests(pod, opts)
 | 
				
			||||||
	limits := resourcehelper.PodLimits(pod, opts)
 | 
						limits := resourcehelper.PodLimits(pod, opts)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -906,7 +906,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
		usageFgEnabled  corev1.ResourceList
 | 
							usageFgEnabled  corev1.ResourceList
 | 
				
			||||||
		usageFgDisabled corev1.ResourceList
 | 
							usageFgDisabled corev1.ResourceList
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for memory resource": {
 | 
							"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for memory resource": {
 | 
				
			||||||
			pod: &api.Pod{
 | 
								pod: &api.Pod{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Spec: api.PodSpec{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										Containers: []api.Container{
 | 
				
			||||||
@@ -925,8 +925,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
				Status: api.PodStatus{
 | 
									Status: api.PodStatus{
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
										ContainerStatuses: []api.ContainerStatus{
 | 
				
			||||||
						{
 | 
											{
 | 
				
			||||||
							AllocatedResources: api.ResourceList{
 | 
												Resources: &api.ResourceRequirements{
 | 
				
			||||||
								api.ResourceMemory: resource.MustParse("150Mi"),
 | 
													Requests: api.ResourceList{
 | 
				
			||||||
 | 
														api.ResourceMemory: resource.MustParse("150Mi"),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
							},
 | 
												},
 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
@@ -947,7 +949,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
				generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
 | 
									generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU resource": {
 | 
							"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU resource": {
 | 
				
			||||||
			pod: &api.Pod{
 | 
								pod: &api.Pod{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Spec: api.PodSpec{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										Containers: []api.Container{
 | 
				
			||||||
@@ -966,8 +968,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
				Status: api.PodStatus{
 | 
									Status: api.PodStatus{
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
										ContainerStatuses: []api.ContainerStatus{
 | 
				
			||||||
						{
 | 
											{
 | 
				
			||||||
							AllocatedResources: api.ResourceList{
 | 
												Resources: &api.ResourceRequirements{
 | 
				
			||||||
								api.ResourceCPU: resource.MustParse("150m"),
 | 
													Requests: api.ResourceList{
 | 
				
			||||||
 | 
														api.ResourceCPU: resource.MustParse("150m"),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
							},
 | 
												},
 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
@@ -988,7 +992,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
				generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
 | 
									generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU and memory resource": {
 | 
							"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU and memory resource": {
 | 
				
			||||||
			pod: &api.Pod{
 | 
								pod: &api.Pod{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Spec: api.PodSpec{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										Containers: []api.Container{
 | 
				
			||||||
@@ -1009,9 +1013,11 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
				Status: api.PodStatus{
 | 
									Status: api.PodStatus{
 | 
				
			||||||
					ContainerStatuses: []api.ContainerStatus{
 | 
										ContainerStatuses: []api.ContainerStatus{
 | 
				
			||||||
						{
 | 
											{
 | 
				
			||||||
							AllocatedResources: api.ResourceList{
 | 
												Resources: &api.ResourceRequirements{
 | 
				
			||||||
								api.ResourceCPU:    resource.MustParse("150m"),
 | 
													Requests: api.ResourceList{
 | 
				
			||||||
								api.ResourceMemory: resource.MustParse("250Mi"),
 | 
														api.ResourceCPU:    resource.MustParse("150m"),
 | 
				
			||||||
 | 
														api.ResourceMemory: resource.MustParse("250Mi"),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
							},
 | 
												},
 | 
				
			||||||
						},
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
@@ -1038,7 +1044,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
 | 
				
			|||||||
				generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
 | 
									generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources==nil) for CPU and memory resource": {
 | 
							"verify Max(Container.Spec.Requests, ContainerStatus.Resources==nil) for CPU and memory resource": {
 | 
				
			||||||
			pod: &api.Pod{
 | 
								pod: &api.Pod{
 | 
				
			||||||
				Spec: api.PodSpec{
 | 
									Spec: api.PodSpec{
 | 
				
			||||||
					Containers: []api.Container{
 | 
										Containers: []api.Container{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -91,7 +91,7 @@ type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) ActionType
 | 
				
			|||||||
// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down.
 | 
					// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down.
 | 
				
			||||||
func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType {
 | 
					func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType {
 | 
				
			||||||
	opt := resource.PodResourcesOptions{
 | 
						opt := resource.PodResourcesOptions{
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
							UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	newPodRequests := resource.PodRequests(newPod, opt)
 | 
						newPodRequests := resource.PodRequests(newPod, opt)
 | 
				
			||||||
	oldPodRequests := resource.PodRequests(oldPod, opt)
 | 
						oldPodRequests := resource.PodRequests(oldPod, opt)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -330,11 +330,11 @@ func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// the other pod was scheduled, so modification or deletion may free up some resources.
 | 
						// the other pod was scheduled, so modification or deletion may free up some resources.
 | 
				
			||||||
	originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
 | 
						originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
 | 
				
			||||||
	originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}))
 | 
						originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
 | 
				
			||||||
	modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}))
 | 
						modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// check whether the resource request of the modified pod is less than the original pod.
 | 
						// check whether the resource request of the modified pod is less than the original pod.
 | 
				
			||||||
	podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling})
 | 
						podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})
 | 
				
			||||||
	for rName, rValue := range podRequests {
 | 
						for rName, rValue := range podRequests {
 | 
				
			||||||
		if rValue.IsZero() {
 | 
							if rValue.IsZero() {
 | 
				
			||||||
			// We only care about the resources requested by the pod we are trying to schedule.
 | 
								// We only care about the resources requested by the pod we are trying to schedule.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -118,7 +118,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger kl
 | 
				
			|||||||
func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 {
 | 
					func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	opts := resourcehelper.PodResourcesOptions{
 | 
						opts := resourcehelper.PodResourcesOptions{
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
							UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if !r.useRequested {
 | 
						if !r.useRequested {
 | 
				
			||||||
		opts.NonMissingContainerRequests = v1.ResourceList{
 | 
							opts.NonMissingContainerRequests = v1.ResourceList{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1054,11 +1054,11 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
 | 
					func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
 | 
				
			||||||
	requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
 | 
						requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
							UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
 | 
						non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
 | 
				
			||||||
		InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
							UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
				
			||||||
		NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
 | 
							NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
 | 
				
			||||||
			v1.ResourceCPU:    *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
 | 
								v1.ResourceCPU:    *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
 | 
				
			||||||
			v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
 | 
								v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1533,9 +1533,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
 | 
				
			|||||||
	restartAlways := v1.ContainerRestartPolicyAlways
 | 
						restartAlways := v1.ContainerRestartPolicyAlways
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	preparePod := func(pod v1.Pod,
 | 
						preparePod := func(pod v1.Pod,
 | 
				
			||||||
		requests, allocatedResources,
 | 
							requests, statusResources,
 | 
				
			||||||
		initRequests, initAllocatedResources,
 | 
							initRequests, initStatusResources,
 | 
				
			||||||
		sidecarRequests, sidecarAllocatedResources *v1.ResourceList,
 | 
							sidecarRequests, sidecarStatusResources *v1.ResourceList,
 | 
				
			||||||
		resizeStatus v1.PodResizeStatus) v1.Pod {
 | 
							resizeStatus v1.PodResizeStatus) v1.Pod {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if requests != nil {
 | 
							if requests != nil {
 | 
				
			||||||
@@ -1545,11 +1545,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
 | 
				
			|||||||
					Resources: v1.ResourceRequirements{Requests: *requests},
 | 
										Resources: v1.ResourceRequirements{Requests: *requests},
 | 
				
			||||||
				})
 | 
									})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if allocatedResources != nil {
 | 
							if statusResources != nil {
 | 
				
			||||||
			pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
 | 
								pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
 | 
				
			||||||
				v1.ContainerStatus{
 | 
									v1.ContainerStatus{
 | 
				
			||||||
					Name:               "c1",
 | 
										Name: "c1",
 | 
				
			||||||
					AllocatedResources: *allocatedResources,
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Requests: *statusResources,
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
				})
 | 
									})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1561,11 +1563,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			)
 | 
								)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if initAllocatedResources != nil {
 | 
							if initStatusResources != nil {
 | 
				
			||||||
			pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
 | 
								pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
 | 
				
			||||||
				v1.ContainerStatus{
 | 
									v1.ContainerStatus{
 | 
				
			||||||
					Name:               "i1",
 | 
										Name: "i1",
 | 
				
			||||||
					AllocatedResources: *initAllocatedResources,
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Requests: *initStatusResources,
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
				})
 | 
									})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1578,11 +1582,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			)
 | 
								)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if sidecarAllocatedResources != nil {
 | 
							if sidecarStatusResources != nil {
 | 
				
			||||||
			pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
 | 
								pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
 | 
				
			||||||
				v1.ContainerStatus{
 | 
									v1.ContainerStatus{
 | 
				
			||||||
					Name:               "s1",
 | 
										Name: "s1",
 | 
				
			||||||
					AllocatedResources: *sidecarAllocatedResources,
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Requests: *sidecarStatusResources,
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
				})
 | 
									})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1591,74 +1597,74 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		name                      string
 | 
							name                   string
 | 
				
			||||||
		requests                  v1.ResourceList
 | 
							requests               v1.ResourceList
 | 
				
			||||||
		allocatedResources        v1.ResourceList
 | 
							statusResources        v1.ResourceList
 | 
				
			||||||
		initRequests              *v1.ResourceList
 | 
							initRequests           *v1.ResourceList
 | 
				
			||||||
		initAllocatedResources    *v1.ResourceList
 | 
							initStatusResources    *v1.ResourceList
 | 
				
			||||||
		sidecarRequests           *v1.ResourceList
 | 
							sidecarRequests        *v1.ResourceList
 | 
				
			||||||
		sidecarAllocatedResources *v1.ResourceList
 | 
							sidecarStatusResources *v1.ResourceList
 | 
				
			||||||
		resizeStatus              v1.PodResizeStatus
 | 
							resizeStatus           v1.PodResizeStatus
 | 
				
			||||||
		expectedResource          Resource
 | 
							expectedResource       Resource
 | 
				
			||||||
		expectedNon0CPU           int64
 | 
							expectedNon0CPU        int64
 | 
				
			||||||
		expectedNon0Mem           int64
 | 
							expectedNon0Mem        int64
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:               "Pod with no pending resize",
 | 
								name:             "Pod with no pending resize",
 | 
				
			||||||
			requests:           v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								requests:         v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								statusResources:  v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			resizeStatus:       "",
 | 
								resizeStatus:     "",
 | 
				
			||||||
			expectedResource:   Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
 | 
								expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
 | 
				
			||||||
			expectedNon0CPU:    cpu500m.MilliValue(),
 | 
								expectedNon0CPU:  cpu500m.MilliValue(),
 | 
				
			||||||
			expectedNon0Mem:    mem500M.Value(),
 | 
								expectedNon0Mem:  mem500M.Value(),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:               "Pod with resize in progress",
 | 
								name:             "Pod with resize in progress",
 | 
				
			||||||
			requests:           v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								requests:         v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								statusResources:  v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			resizeStatus:       v1.PodResizeStatusInProgress,
 | 
								resizeStatus:     v1.PodResizeStatusInProgress,
 | 
				
			||||||
			expectedResource:   Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
 | 
								expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
 | 
				
			||||||
			expectedNon0CPU:    cpu500m.MilliValue(),
 | 
								expectedNon0CPU:  cpu500m.MilliValue(),
 | 
				
			||||||
			expectedNon0Mem:    mem500M.Value(),
 | 
								expectedNon0Mem:  mem500M.Value(),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:               "Pod with deferred resize",
 | 
								name:             "Pod with deferred resize",
 | 
				
			||||||
			requests:           v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
								requests:         v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
			allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								statusResources:  v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			resizeStatus:       v1.PodResizeStatusDeferred,
 | 
								resizeStatus:     v1.PodResizeStatusDeferred,
 | 
				
			||||||
			expectedResource:   Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
 | 
								expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
 | 
				
			||||||
			expectedNon0CPU:    cpu700m.MilliValue(),
 | 
								expectedNon0CPU:  cpu700m.MilliValue(),
 | 
				
			||||||
			expectedNon0Mem:    mem800M.Value(),
 | 
								expectedNon0Mem:  mem800M.Value(),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:               "Pod with infeasible resize",
 | 
								name:             "Pod with infeasible resize",
 | 
				
			||||||
			requests:           v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
								requests:         v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
			allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								statusResources:  v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			resizeStatus:       v1.PodResizeStatusInfeasible,
 | 
								resizeStatus:     v1.PodResizeStatusInfeasible,
 | 
				
			||||||
			expectedResource:   Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
 | 
								expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
 | 
				
			||||||
			expectedNon0CPU:    cpu500m.MilliValue(),
 | 
								expectedNon0CPU:  cpu500m.MilliValue(),
 | 
				
			||||||
			expectedNon0Mem:    mem500M.Value(),
 | 
								expectedNon0Mem:  mem500M.Value(),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:                   "Pod with init container and no pending resize",
 | 
								name:                "Pod with init container and no pending resize",
 | 
				
			||||||
 | 
								requests:            v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
 | 
								statusResources:     v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
 | 
								initRequests:        &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
 | 
								initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
 | 
								resizeStatus:        "",
 | 
				
			||||||
 | 
								expectedResource:    Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
 | 
				
			||||||
 | 
								expectedNon0CPU:     cpu700m.MilliValue(),
 | 
				
			||||||
 | 
								expectedNon0Mem:     mem800M.Value(),
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								name:                   "Pod with sider container and no pending resize",
 | 
				
			||||||
			requests:               v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								requests:               v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			allocatedResources:     v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
								statusResources:        v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
				
			||||||
			initRequests:           &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
								initRequests:           &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
			initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
								initStatusResources:    &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
 | 
								sidecarRequests:        &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
 | 
								sidecarStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
				
			||||||
			resizeStatus:           "",
 | 
								resizeStatus:           "",
 | 
				
			||||||
			expectedResource:       Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
 | 
					 | 
				
			||||||
			expectedNon0CPU:        cpu700m.MilliValue(),
 | 
					 | 
				
			||||||
			expectedNon0Mem:        mem800M.Value(),
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			name:                      "Pod with sider container and no pending resize",
 | 
					 | 
				
			||||||
			requests:                  v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
					 | 
				
			||||||
			allocatedResources:        v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
 | 
					 | 
				
			||||||
			initRequests:              &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
					 | 
				
			||||||
			initAllocatedResources:    &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
					 | 
				
			||||||
			sidecarRequests:           &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
					 | 
				
			||||||
			sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
 | 
					 | 
				
			||||||
			resizeStatus:              "",
 | 
					 | 
				
			||||||
			expectedResource: Resource{
 | 
								expectedResource: Resource{
 | 
				
			||||||
				MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
 | 
									MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
 | 
				
			||||||
				Memory:   mem500M.Value() + mem800M.Value(),
 | 
									Memory:   mem500M.Value() + mem800M.Value(),
 | 
				
			||||||
@@ -1671,9 +1677,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
 | 
				
			|||||||
	for _, tt := range tests {
 | 
						for _, tt := range tests {
 | 
				
			||||||
		t.Run(tt.name, func(t *testing.T) {
 | 
							t.Run(tt.name, func(t *testing.T) {
 | 
				
			||||||
			pod := preparePod(*testpod.DeepCopy(),
 | 
								pod := preparePod(*testpod.DeepCopy(),
 | 
				
			||||||
				&tt.requests, &tt.allocatedResources,
 | 
									&tt.requests, &tt.statusResources,
 | 
				
			||||||
				tt.initRequests, tt.initAllocatedResources,
 | 
									tt.initRequests, tt.initStatusResources,
 | 
				
			||||||
				tt.sidecarRequests, tt.sidecarAllocatedResources,
 | 
									tt.sidecarRequests, tt.sidecarStatusResources,
 | 
				
			||||||
				tt.resizeStatus)
 | 
									tt.resizeStatus)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			res, non0CPU, non0Mem := calculateResource(&pod)
 | 
								res, non0CPU, non0Mem := calculateResource(&pod)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -34,14 +34,12 @@ import (
 | 
				
			|||||||
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
						utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
				
			||||||
	"k8s.io/apiserver/pkg/admission"
 | 
						"k8s.io/apiserver/pkg/admission"
 | 
				
			||||||
	genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer"
 | 
						genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer"
 | 
				
			||||||
	"k8s.io/apiserver/pkg/util/feature"
 | 
					 | 
				
			||||||
	"k8s.io/client-go/informers"
 | 
						"k8s.io/client-go/informers"
 | 
				
			||||||
	"k8s.io/client-go/kubernetes"
 | 
						"k8s.io/client-go/kubernetes"
 | 
				
			||||||
	corev1listers "k8s.io/client-go/listers/core/v1"
 | 
						corev1listers "k8s.io/client-go/listers/core/v1"
 | 
				
			||||||
	"k8s.io/utils/lru"
 | 
						"k8s.io/utils/lru"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	api "k8s.io/kubernetes/pkg/apis/core"
 | 
						api "k8s.io/kubernetes/pkg/apis/core"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
@@ -523,11 +521,8 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		// enforce pod limits on init containers
 | 
							// enforce pod limits on init containers
 | 
				
			||||||
		if limitType == corev1.LimitTypePod {
 | 
							if limitType == corev1.LimitTypePod {
 | 
				
			||||||
			opts := podResourcesOptions{
 | 
								podRequests := podRequests(pod)
 | 
				
			||||||
				InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
 | 
								podLimits := podLimits(pod)
 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			podRequests := podRequests(pod, opts)
 | 
					 | 
				
			||||||
			podLimits := podLimits(pod, opts)
 | 
					 | 
				
			||||||
			for k, v := range limit.Min {
 | 
								for k, v := range limit.Min {
 | 
				
			||||||
				if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil {
 | 
									if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil {
 | 
				
			||||||
					errs = append(errs, err)
 | 
										errs = append(errs, err)
 | 
				
			||||||
@@ -548,39 +543,17 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
 | 
				
			|||||||
	return utilerrors.NewAggregate(errs)
 | 
						return utilerrors.NewAggregate(errs)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type podResourcesOptions struct {
 | 
					 | 
				
			||||||
	// InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled.
 | 
					 | 
				
			||||||
	InPlacePodVerticalScalingEnabled bool
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// podRequests is a simplified version of pkg/api/v1/resource/PodRequests that operates against the core version of
 | 
					// podRequests is a simplified version of pkg/api/v1/resource/PodRequests that operates against the core version of
 | 
				
			||||||
// pod. Any changes to that calculation should be reflected here.
 | 
					// pod. Any changes to that calculation should be reflected here.
 | 
				
			||||||
 | 
					// NOTE: We do not want to check status resources here, only the spec. This is equivalent to setting
 | 
				
			||||||
 | 
					// UseStatusResources=false in the common helper.
 | 
				
			||||||
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
 | 
					// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
 | 
				
			||||||
// type and then using the pkg/api/v1/resource/PodRequests.
 | 
					// type and then using the pkg/api/v1/resource/PodRequests.
 | 
				
			||||||
func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
 | 
					func podRequests(pod *api.Pod) api.ResourceList {
 | 
				
			||||||
	reqs := api.ResourceList{}
 | 
						reqs := api.ResourceList{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var containerStatuses map[string]*api.ContainerStatus
 | 
					 | 
				
			||||||
	if opts.InPlacePodVerticalScalingEnabled {
 | 
					 | 
				
			||||||
		containerStatuses = map[string]*api.ContainerStatus{}
 | 
					 | 
				
			||||||
		for i := range pod.Status.ContainerStatuses {
 | 
					 | 
				
			||||||
			containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for _, container := range pod.Spec.Containers {
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
		containerReqs := container.Resources.Requests
 | 
							containerReqs := container.Resources.Requests
 | 
				
			||||||
		if opts.InPlacePodVerticalScalingEnabled {
 | 
					 | 
				
			||||||
			cs, found := containerStatuses[container.Name]
 | 
					 | 
				
			||||||
			if found {
 | 
					 | 
				
			||||||
				if pod.Status.Resize == api.PodResizeStatusInfeasible {
 | 
					 | 
				
			||||||
					containerReqs = cs.AllocatedResources
 | 
					 | 
				
			||||||
				} else {
 | 
					 | 
				
			||||||
					containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		addResourceList(reqs, containerReqs)
 | 
							addResourceList(reqs, containerReqs)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -614,9 +587,11 @@ func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// podLimits is a simplified version of pkg/api/v1/resource/PodLimits that operates against the core version of
 | 
					// podLimits is a simplified version of pkg/api/v1/resource/PodLimits that operates against the core version of
 | 
				
			||||||
// pod. Any changes to that calculation should be reflected here.
 | 
					// pod. Any changes to that calculation should be reflected here.
 | 
				
			||||||
 | 
					// NOTE: We do not want to check status resources here, only the spec. This is equivalent to setting
 | 
				
			||||||
 | 
					// UseStatusResources=false in the common helper.
 | 
				
			||||||
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
 | 
					// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
 | 
				
			||||||
// type and then using the pkg/api/v1/resource/PodLimits.
 | 
					// type and then using the pkg/api/v1/resource/PodLimits.
 | 
				
			||||||
func podLimits(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
 | 
					func podLimits(pod *api.Pod) api.ResourceList {
 | 
				
			||||||
	limits := api.ResourceList{}
 | 
						limits := api.ResourceList{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, container := range pod.Spec.Containers {
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
@@ -669,24 +644,3 @@ func maxResourceList(list, newList api.ResourceList) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// max returns the result of max(a, b) for each named resource and is only used if we can't
 | 
					 | 
				
			||||||
// accumulate into an existing resource list
 | 
					 | 
				
			||||||
func max(a api.ResourceList, b api.ResourceList) api.ResourceList {
 | 
					 | 
				
			||||||
	result := api.ResourceList{}
 | 
					 | 
				
			||||||
	for key, value := range a {
 | 
					 | 
				
			||||||
		if other, found := b[key]; found {
 | 
					 | 
				
			||||||
			if value.Cmp(other) <= 0 {
 | 
					 | 
				
			||||||
				result[key] = other.DeepCopy()
 | 
					 | 
				
			||||||
				continue
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		result[key] = value.DeepCopy()
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	for key, value := range b {
 | 
					 | 
				
			||||||
		if _, found := result[key]; !found {
 | 
					 | 
				
			||||||
			result[key] = value.DeepCopy()
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return result
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -35,8 +35,10 @@ type PodResourcesOptions struct {
 | 
				
			|||||||
	// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
 | 
						// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
 | 
				
			||||||
	// functions. All existing values in Reuse will be lost.
 | 
						// functions. All existing values in Reuse will be lost.
 | 
				
			||||||
	Reuse v1.ResourceList
 | 
						Reuse v1.ResourceList
 | 
				
			||||||
	// InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled.
 | 
						// UseStatusResources indicates whether resources reported by the PodStatus should be considered
 | 
				
			||||||
	InPlacePodVerticalScalingEnabled bool
 | 
						// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
 | 
				
			||||||
 | 
						// feature is not enabled.
 | 
				
			||||||
 | 
						UseStatusResources bool
 | 
				
			||||||
	// ExcludeOverhead controls if pod overhead is excluded from the calculation.
 | 
						// ExcludeOverhead controls if pod overhead is excluded from the calculation.
 | 
				
			||||||
	ExcludeOverhead bool
 | 
						ExcludeOverhead bool
 | 
				
			||||||
	// ContainerFn is called with the effective resources required for each container within the pod.
 | 
						// ContainerFn is called with the effective resources required for each container within the pod.
 | 
				
			||||||
@@ -54,7 +56,7 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
 | 
				
			|||||||
	reqs := reuseOrClearResourceList(opts.Reuse)
 | 
						reqs := reuseOrClearResourceList(opts.Reuse)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var containerStatuses map[string]*v1.ContainerStatus
 | 
						var containerStatuses map[string]*v1.ContainerStatus
 | 
				
			||||||
	if opts.InPlacePodVerticalScalingEnabled {
 | 
						if opts.UseStatusResources {
 | 
				
			||||||
		containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
 | 
							containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
 | 
				
			||||||
		for i := range pod.Status.ContainerStatuses {
 | 
							for i := range pod.Status.ContainerStatuses {
 | 
				
			||||||
			containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
 | 
								containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
 | 
				
			||||||
@@ -63,13 +65,13 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for _, container := range pod.Spec.Containers {
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
		containerReqs := container.Resources.Requests
 | 
							containerReqs := container.Resources.Requests
 | 
				
			||||||
		if opts.InPlacePodVerticalScalingEnabled {
 | 
							if opts.UseStatusResources {
 | 
				
			||||||
			cs, found := containerStatuses[container.Name]
 | 
								cs, found := containerStatuses[container.Name]
 | 
				
			||||||
			if found {
 | 
								if found && cs.Resources != nil {
 | 
				
			||||||
				if pod.Status.Resize == v1.PodResizeStatusInfeasible {
 | 
									if pod.Status.Resize == v1.PodResizeStatusInfeasible {
 | 
				
			||||||
					containerReqs = cs.AllocatedResources.DeepCopy()
 | 
										containerReqs = cs.Resources.Requests.DeepCopy()
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
 | 
										containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -155,11 +157,31 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
 | 
				
			|||||||
	// attempt to reuse the maps if passed, or allocate otherwise
 | 
						// attempt to reuse the maps if passed, or allocate otherwise
 | 
				
			||||||
	limits := reuseOrClearResourceList(opts.Reuse)
 | 
						limits := reuseOrClearResourceList(opts.Reuse)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, container := range pod.Spec.Containers {
 | 
						var containerStatuses map[string]*v1.ContainerStatus
 | 
				
			||||||
		if opts.ContainerFn != nil {
 | 
						if opts.UseStatusResources {
 | 
				
			||||||
			opts.ContainerFn(container.Resources.Limits, Containers)
 | 
							containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
 | 
				
			||||||
 | 
							for i := range pod.Status.ContainerStatuses {
 | 
				
			||||||
 | 
								containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		addResourceList(limits, container.Resources.Limits)
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
 | 
							containerLimits := container.Resources.Limits
 | 
				
			||||||
 | 
							if opts.UseStatusResources {
 | 
				
			||||||
 | 
								cs, found := containerStatuses[container.Name]
 | 
				
			||||||
 | 
								if found && cs.Resources != nil {
 | 
				
			||||||
 | 
									if pod.Status.Resize == v1.PodResizeStatusInfeasible {
 | 
				
			||||||
 | 
										containerLimits = cs.Resources.Limits.DeepCopy()
 | 
				
			||||||
 | 
									} else {
 | 
				
			||||||
 | 
										containerLimits = max(container.Resources.Limits, cs.Resources.Limits)
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if opts.ContainerFn != nil {
 | 
				
			||||||
 | 
								opts.ContainerFn(containerLimits, Containers)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							addResourceList(limits, containerLimits)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	restartableInitContainerLimits := v1.ResourceList{}
 | 
						restartableInitContainerLimits := v1.ResourceList{}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -432,7 +432,7 @@ func TestPodResourceRequests(t *testing.T) {
 | 
				
			|||||||
				v1.ResourceCPU: resource.MustParse("2"),
 | 
									v1.ResourceCPU: resource.MustParse("2"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			podResizeStatus: v1.PodResizeStatusInfeasible,
 | 
								podResizeStatus: v1.PodResizeStatusInfeasible,
 | 
				
			||||||
			options:         PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
 | 
								options:         PodResourcesOptions{UseStatusResources: true},
 | 
				
			||||||
			containers: []v1.Container{
 | 
								containers: []v1.Container{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name: "container-1",
 | 
										Name: "container-1",
 | 
				
			||||||
@@ -446,8 +446,10 @@ func TestPodResourceRequests(t *testing.T) {
 | 
				
			|||||||
			containerStatus: []v1.ContainerStatus{
 | 
								containerStatus: []v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name: "container-1",
 | 
										Name: "container-1",
 | 
				
			||||||
					AllocatedResources: v1.ResourceList{
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
						v1.ResourceCPU: resource.MustParse("2"),
 | 
											Requests: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceCPU: resource.MustParse("2"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
@@ -457,7 +459,7 @@ func TestPodResourceRequests(t *testing.T) {
 | 
				
			|||||||
			expectedRequests: v1.ResourceList{
 | 
								expectedRequests: v1.ResourceList{
 | 
				
			||||||
				v1.ResourceCPU: resource.MustParse("4"),
 | 
									v1.ResourceCPU: resource.MustParse("4"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
 | 
								options: PodResourcesOptions{UseStatusResources: true},
 | 
				
			||||||
			containers: []v1.Container{
 | 
								containers: []v1.Container{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name: "container-1",
 | 
										Name: "container-1",
 | 
				
			||||||
@@ -471,19 +473,21 @@ func TestPodResourceRequests(t *testing.T) {
 | 
				
			|||||||
			containerStatus: []v1.ContainerStatus{
 | 
								containerStatus: []v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name: "container-1",
 | 
										Name: "container-1",
 | 
				
			||||||
					AllocatedResources: v1.ResourceList{
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
						v1.ResourceCPU: resource.MustParse("2"),
 | 
											Requests: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceCPU: resource.MustParse("2"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description: "resized, infeasible, feature gate disabled",
 | 
								description: "resized, infeasible, but don't use status",
 | 
				
			||||||
			expectedRequests: v1.ResourceList{
 | 
								expectedRequests: v1.ResourceList{
 | 
				
			||||||
				v1.ResourceCPU: resource.MustParse("4"),
 | 
									v1.ResourceCPU: resource.MustParse("4"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			podResizeStatus: v1.PodResizeStatusInfeasible,
 | 
								podResizeStatus: v1.PodResizeStatusInfeasible,
 | 
				
			||||||
			options:         PodResourcesOptions{InPlacePodVerticalScalingEnabled: false},
 | 
								options:         PodResourcesOptions{UseStatusResources: false},
 | 
				
			||||||
			containers: []v1.Container{
 | 
								containers: []v1.Container{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name: "container-1",
 | 
										Name: "container-1",
 | 
				
			||||||
@@ -497,8 +501,10 @@ func TestPodResourceRequests(t *testing.T) {
 | 
				
			|||||||
			containerStatus: []v1.ContainerStatus{
 | 
								containerStatus: []v1.ContainerStatus{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name: "container-1",
 | 
										Name: "container-1",
 | 
				
			||||||
					AllocatedResources: v1.ResourceList{
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
						v1.ResourceCPU: resource.MustParse("2"),
 | 
											Requests: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceCPU: resource.MustParse("2"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
@@ -742,12 +748,13 @@ func TestPodResourceRequestsReuse(t *testing.T) {
 | 
				
			|||||||
func TestPodResourceLimits(t *testing.T) {
 | 
					func TestPodResourceLimits(t *testing.T) {
 | 
				
			||||||
	restartAlways := v1.ContainerRestartPolicyAlways
 | 
						restartAlways := v1.ContainerRestartPolicyAlways
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		description    string
 | 
							description       string
 | 
				
			||||||
		options        PodResourcesOptions
 | 
							options           PodResourcesOptions
 | 
				
			||||||
		overhead       v1.ResourceList
 | 
							overhead          v1.ResourceList
 | 
				
			||||||
		initContainers []v1.Container
 | 
							initContainers    []v1.Container
 | 
				
			||||||
		containers     []v1.Container
 | 
							containers        []v1.Container
 | 
				
			||||||
		expectedLimits v1.ResourceList
 | 
							containerStatuses []v1.ContainerStatus
 | 
				
			||||||
 | 
							expectedLimits    v1.ResourceList
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description: "nil options, larger init container",
 | 
								description: "nil options, larger init container",
 | 
				
			||||||
@@ -1119,6 +1126,87 @@ func TestPodResourceLimits(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								description: "pod scaled up",
 | 
				
			||||||
 | 
								expectedLimits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("2Gi"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								options: PodResourcesOptions{UseStatusResources: true},
 | 
				
			||||||
 | 
								containers: []v1.Container{
 | 
				
			||||||
 | 
									{
 | 
				
			||||||
 | 
										Name: "container-1",
 | 
				
			||||||
 | 
										Resources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Limits: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceMemory: resource.MustParse("2Gi"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								containerStatuses: []v1.ContainerStatus{
 | 
				
			||||||
 | 
									{
 | 
				
			||||||
 | 
										Name: "container-1",
 | 
				
			||||||
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Limits: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceMemory: resource.MustParse("1Gi"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								description: "pod scaled down",
 | 
				
			||||||
 | 
								expectedLimits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("2Gi"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								options: PodResourcesOptions{UseStatusResources: true},
 | 
				
			||||||
 | 
								containers: []v1.Container{
 | 
				
			||||||
 | 
									{
 | 
				
			||||||
 | 
										Name: "container-1",
 | 
				
			||||||
 | 
										Resources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Limits: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceMemory: resource.MustParse("1Gi"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								containerStatuses: []v1.ContainerStatus{
 | 
				
			||||||
 | 
									{
 | 
				
			||||||
 | 
										Name: "container-1",
 | 
				
			||||||
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Limits: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceMemory: resource.MustParse("2Gi"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								description: "pod scaled down, don't use status",
 | 
				
			||||||
 | 
								expectedLimits: v1.ResourceList{
 | 
				
			||||||
 | 
									v1.ResourceMemory: resource.MustParse("1Gi"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								options: PodResourcesOptions{UseStatusResources: false},
 | 
				
			||||||
 | 
								containers: []v1.Container{
 | 
				
			||||||
 | 
									{
 | 
				
			||||||
 | 
										Name: "container-1",
 | 
				
			||||||
 | 
										Resources: v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Limits: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceMemory: resource.MustParse("1Gi"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								containerStatuses: []v1.ContainerStatus{
 | 
				
			||||||
 | 
									{
 | 
				
			||||||
 | 
										Name: "container-1",
 | 
				
			||||||
 | 
										Resources: &v1.ResourceRequirements{
 | 
				
			||||||
 | 
											Limits: v1.ResourceList{
 | 
				
			||||||
 | 
												v1.ResourceMemory: resource.MustParse("2Gi"),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, tc := range testCases {
 | 
						for _, tc := range testCases {
 | 
				
			||||||
		t.Run(tc.description, func(t *testing.T) {
 | 
							t.Run(tc.description, func(t *testing.T) {
 | 
				
			||||||
@@ -1128,6 +1216,9 @@ func TestPodResourceLimits(t *testing.T) {
 | 
				
			|||||||
					InitContainers: tc.initContainers,
 | 
										InitContainers: tc.initContainers,
 | 
				
			||||||
					Overhead:       tc.overhead,
 | 
										Overhead:       tc.overhead,
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
 | 
									Status: v1.PodStatus{
 | 
				
			||||||
 | 
										ContainerStatuses: tc.containerStatuses,
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			limits := PodLimits(p, tc.options)
 | 
								limits := PodLimits(p, tc.options)
 | 
				
			||||||
			if diff := cmp.Diff(limits, tc.expectedLimits); diff != "" {
 | 
								if diff := cmp.Diff(limits, tc.expectedLimits); diff != "" {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -49,11 +49,11 @@ func podRequests(pod *corev1.Pod) corev1.ResourceList {
 | 
				
			|||||||
	for _, container := range pod.Spec.Containers {
 | 
						for _, container := range pod.Spec.Containers {
 | 
				
			||||||
		containerReqs := container.Resources.Requests
 | 
							containerReqs := container.Resources.Requests
 | 
				
			||||||
		cs, found := containerStatuses[container.Name]
 | 
							cs, found := containerStatuses[container.Name]
 | 
				
			||||||
		if found {
 | 
							if found && cs.Resources != nil {
 | 
				
			||||||
			if pod.Status.Resize == corev1.PodResizeStatusInfeasible {
 | 
								if pod.Status.Resize == corev1.PodResizeStatusInfeasible {
 | 
				
			||||||
				containerReqs = cs.AllocatedResources.DeepCopy()
 | 
									containerReqs = cs.Resources.Requests.DeepCopy()
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
 | 
									containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		addResourceList(reqs, containerReqs)
 | 
							addResourceList(reqs, containerReqs)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -122,13 +122,16 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
		patchString         string
 | 
							patchString         string
 | 
				
			||||||
		expected            []e2epod.ResizableContainerInfo
 | 
							expected            []e2epod.ResizableContainerInfo
 | 
				
			||||||
		addExtendedResource bool
 | 
							addExtendedResource bool
 | 
				
			||||||
 | 
							// TODO(123940): test rollback for all test cases once resize is more responsive.
 | 
				
			||||||
 | 
							testRollback bool
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	noRestart := v1.NotRequired
 | 
						noRestart := v1.NotRequired
 | 
				
			||||||
	doRestart := v1.RestartContainer
 | 
						doRestart := v1.RestartContainer
 | 
				
			||||||
	tests := []testCase{
 | 
						tests := []testCase{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Guaranteed QoS pod, one container - increase CPU & memory",
 | 
								name:         "Guaranteed QoS pod, one container - increase CPU & memory",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -208,7 +211,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)",
 | 
								name:         "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -256,7 +260,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
 | 
								name:         "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -274,7 +279,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only",
 | 
								name:         "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -328,7 +334,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
 | 
								name:         "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -346,7 +353,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
 | 
								name:         "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -634,7 +642,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
 | 
								name:         "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -657,7 +666,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)",
 | 
								name:         "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)",
 | 
				
			||||||
 | 
								testRollback: true,
 | 
				
			||||||
			containers: []e2epod.ResizableContainerInfo{
 | 
								containers: []e2epod.ResizableContainerInfo{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					Name:      "c1",
 | 
										Name:      "c1",
 | 
				
			||||||
@@ -850,8 +860,6 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	timeouts := framework.NewTimeoutContext()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for idx := range tests {
 | 
						for idx := range tests {
 | 
				
			||||||
		tc := tests[idx]
 | 
							tc := tests[idx]
 | 
				
			||||||
		ginkgo.It(tc.name, func(ctx context.Context) {
 | 
							ginkgo.It(tc.name, func(ctx context.Context) {
 | 
				
			||||||
@@ -862,7 +870,8 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			tStamp := strconv.Itoa(time.Now().Nanosecond())
 | 
								tStamp := strconv.Itoa(time.Now().Nanosecond())
 | 
				
			||||||
			e2epod.InitDefaultResizePolicy(tc.containers)
 | 
								e2epod.InitDefaultResizePolicy(tc.containers)
 | 
				
			||||||
			e2epod.InitDefaultResizePolicy(tc.expected)
 | 
								e2epod.InitDefaultResizePolicy(tc.expected)
 | 
				
			||||||
			testPod = e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod", tStamp, tc.containers)
 | 
								testPod = e2epod.MakePodWithResizableContainers(f.Namespace.Name, "", tStamp, tc.containers)
 | 
				
			||||||
 | 
								testPod.GenerateName = "resize-test-"
 | 
				
			||||||
			testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
 | 
								testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if tc.addExtendedResource {
 | 
								if tc.addExtendedResource {
 | 
				
			||||||
@@ -882,17 +891,17 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
			ginkgo.By("creating pod")
 | 
								ginkgo.By("creating pod")
 | 
				
			||||||
			newPod := podClient.CreateSync(ctx, testPod)
 | 
								newPod := podClient.CreateSync(ctx, testPod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying initial pod resources, allocations are as expected")
 | 
								ginkgo.By("verifying initial pod resources are as expected")
 | 
				
			||||||
			e2epod.VerifyPodResources(newPod, tc.containers)
 | 
								e2epod.VerifyPodResources(newPod, tc.containers)
 | 
				
			||||||
			ginkgo.By("verifying initial pod resize policy is as expected")
 | 
								ginkgo.By("verifying initial pod resize policy is as expected")
 | 
				
			||||||
			e2epod.VerifyPodResizePolicy(newPod, tc.containers)
 | 
								e2epod.VerifyPodResizePolicy(newPod, tc.containers)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying initial pod status resources are as expected")
 | 
								ginkgo.By("verifying initial pod status resources are as expected")
 | 
				
			||||||
			e2epod.VerifyPodStatusResources(newPod, tc.containers)
 | 
								framework.ExpectNoError(e2epod.VerifyPodStatusResources(newPod, tc.containers))
 | 
				
			||||||
			ginkgo.By("verifying initial cgroup config are as expected")
 | 
								ginkgo.By("verifying initial cgroup config are as expected")
 | 
				
			||||||
			framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, newPod, tc.containers))
 | 
								framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, newPod, tc.containers))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			patchAndVerify := func(patchString string, expectedContainers []e2epod.ResizableContainerInfo, initialContainers []e2epod.ResizableContainerInfo, opStr string, isRollback bool) {
 | 
								patchAndVerify := func(patchString string, expectedContainers []e2epod.ResizableContainerInfo, opStr string) {
 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
 | 
									ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
 | 
				
			||||||
				patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
 | 
									patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
 | 
				
			||||||
					types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
 | 
										types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
 | 
				
			||||||
@@ -900,35 +909,32 @@ func doPodResizeTests(f *framework.Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
 | 
									ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
 | 
				
			||||||
				e2epod.VerifyPodResources(patchedPod, expectedContainers)
 | 
									e2epod.VerifyPodResources(patchedPod, expectedContainers)
 | 
				
			||||||
				gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
					 | 
				
			||||||
					WithArguments(patchedPod, initialContainers).
 | 
					 | 
				
			||||||
					Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
 | 
									ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
 | 
				
			||||||
				resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback)
 | 
									resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod)
 | 
				
			||||||
 | 
									e2epod.ExpectPodResized(ctx, f, resizedPod, expectedContainers)
 | 
				
			||||||
				// Check cgroup values only for containerd versions before 1.6.9
 | 
					 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr))
 | 
					 | 
				
			||||||
				framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr))
 | 
					 | 
				
			||||||
				e2epod.VerifyPodResources(resizedPod, expectedContainers)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
				ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr))
 | 
					 | 
				
			||||||
				gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
					 | 
				
			||||||
					WithArguments(resizedPod, expectedContainers).
 | 
					 | 
				
			||||||
					Should(gomega.BeNil(), "failed to verify Pod allocations for resizedPod")
 | 
					 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false)
 | 
								patchAndVerify(tc.patchString, tc.expected, "resize")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rbPatchStr, err := e2epod.ResizeContainerPatch(tc.containers)
 | 
								if tc.testRollback {
 | 
				
			||||||
			framework.ExpectNoError(err)
 | 
									// Resize has been actuated, test rollback
 | 
				
			||||||
			// Resize has been actuated, test rollback
 | 
									rollbackContainers := make([]e2epod.ResizableContainerInfo, len(tc.containers))
 | 
				
			||||||
			patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true)
 | 
									copy(rollbackContainers, tc.containers)
 | 
				
			||||||
 | 
									for i, c := range rollbackContainers {
 | 
				
			||||||
 | 
										gomega.Expect(c.Name).To(gomega.Equal(tc.expected[i].Name),
 | 
				
			||||||
 | 
											"test case containers & expectations should be in the same order")
 | 
				
			||||||
 | 
										// Resizes that trigger a restart should trigger a second restart when rolling back.
 | 
				
			||||||
 | 
										rollbackContainers[i].RestartCount = tc.expected[i].RestartCount * 2
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
									rbPatchStr, err := e2epod.ResizeContainerPatch(tc.containers)
 | 
				
			||||||
 | 
									framework.ExpectNoError(err)
 | 
				
			||||||
 | 
									patchAndVerify(rbPatchStr, rollbackContainers, "rollback")
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("deleting pod")
 | 
								ginkgo.By("deleting pod")
 | 
				
			||||||
			podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
 | 
								framework.ExpectNoError(podClient.Delete(ctx, newPod.Name, metav1.DeleteOptions{}))
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -963,7 +969,7 @@ func doPodResizeErrorTests(f *framework.Framework) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	timeouts := framework.NewTimeoutContext()
 | 
						timeouts := f.Timeouts
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for idx := range tests {
 | 
						for idx := range tests {
 | 
				
			||||||
		tc := tests[idx]
 | 
							tc := tests[idx]
 | 
				
			||||||
@@ -981,12 +987,12 @@ func doPodResizeErrorTests(f *framework.Framework) {
 | 
				
			|||||||
			ginkgo.By("creating pod")
 | 
								ginkgo.By("creating pod")
 | 
				
			||||||
			newPod := podClient.CreateSync(ctx, testPod)
 | 
								newPod := podClient.CreateSync(ctx, testPod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
 | 
								ginkgo.By("verifying initial pod resources, and policy are as expected")
 | 
				
			||||||
			e2epod.VerifyPodResources(newPod, tc.containers)
 | 
								e2epod.VerifyPodResources(newPod, tc.containers)
 | 
				
			||||||
			e2epod.VerifyPodResizePolicy(newPod, tc.containers)
 | 
								e2epod.VerifyPodResizePolicy(newPod, tc.containers)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
 | 
								ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
 | 
				
			||||||
			e2epod.VerifyPodStatusResources(newPod, tc.containers)
 | 
								framework.ExpectNoError(e2epod.VerifyPodStatusResources(newPod, tc.containers))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("patching pod for resize")
 | 
								ginkgo.By("patching pod for resize")
 | 
				
			||||||
			patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(ctx, newPod.Name,
 | 
								patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(ctx, newPod.Name,
 | 
				
			||||||
@@ -1001,10 +1007,8 @@ func doPodResizeErrorTests(f *framework.Framework) {
 | 
				
			|||||||
			ginkgo.By("verifying pod resources after patch")
 | 
								ginkgo.By("verifying pod resources after patch")
 | 
				
			||||||
			e2epod.VerifyPodResources(patchedPod, tc.expected)
 | 
								e2epod.VerifyPodResources(patchedPod, tc.expected)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying pod allocations after patch")
 | 
								ginkgo.By("verifying pod status resources after patch")
 | 
				
			||||||
			gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
								framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPod, tc.expected))
 | 
				
			||||||
				WithArguments(patchedPod, tc.expected).
 | 
					 | 
				
			||||||
				Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("deleting pod")
 | 
								ginkgo.By("deleting pod")
 | 
				
			||||||
			podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
 | 
								podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -19,6 +19,7 @@ package pod
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	"context"
 | 
						"context"
 | 
				
			||||||
	"encoding/json"
 | 
						"encoding/json"
 | 
				
			||||||
 | 
						"errors"
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"strconv"
 | 
						"strconv"
 | 
				
			||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
@@ -26,12 +27,11 @@ import (
 | 
				
			|||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
				
			||||||
	kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
 | 
						kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
 | 
				
			||||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
						"k8s.io/kubernetes/test/e2e/framework"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/google/go-cmp/cmp"
 | 
					 | 
				
			||||||
	"github.com/onsi/ginkgo/v2"
 | 
						"github.com/onsi/ginkgo/v2"
 | 
				
			||||||
	"github.com/onsi/gomega"
 | 
						"github.com/onsi/gomega"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -64,17 +64,42 @@ type ContainerResources struct {
 | 
				
			|||||||
	ExtendedResourceLim string
 | 
						ExtendedResourceLim string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type ContainerAllocations struct {
 | 
					func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
 | 
				
			||||||
	CPUAlloc              string
 | 
						if cr == nil {
 | 
				
			||||||
	MemAlloc              string
 | 
							return nil
 | 
				
			||||||
	ephStorAlloc          string
 | 
						}
 | 
				
			||||||
	ExtendedResourceAlloc string
 | 
					
 | 
				
			||||||
 | 
						var lim, req v1.ResourceList
 | 
				
			||||||
 | 
						if cr.CPULim != "" || cr.MemLim != "" || cr.EphStorLim != "" {
 | 
				
			||||||
 | 
							lim = make(v1.ResourceList)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.CPUReq != "" || cr.MemReq != "" || cr.EphStorReq != "" {
 | 
				
			||||||
 | 
							req = make(v1.ResourceList)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.CPULim != "" {
 | 
				
			||||||
 | 
							lim[v1.ResourceCPU] = resource.MustParse(cr.CPULim)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.MemLim != "" {
 | 
				
			||||||
 | 
							lim[v1.ResourceMemory] = resource.MustParse(cr.MemLim)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.EphStorLim != "" {
 | 
				
			||||||
 | 
							lim[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorLim)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.CPUReq != "" {
 | 
				
			||||||
 | 
							req[v1.ResourceCPU] = resource.MustParse(cr.CPUReq)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.MemReq != "" {
 | 
				
			||||||
 | 
							req[v1.ResourceMemory] = resource.MustParse(cr.MemReq)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if cr.EphStorReq != "" {
 | 
				
			||||||
 | 
							req[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorReq)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return &v1.ResourceRequirements{Limits: lim, Requests: req}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type ResizableContainerInfo struct {
 | 
					type ResizableContainerInfo struct {
 | 
				
			||||||
	Name         string
 | 
						Name         string
 | 
				
			||||||
	Resources    *ContainerResources
 | 
						Resources    *ContainerResources
 | 
				
			||||||
	Allocations  *ContainerAllocations
 | 
					 | 
				
			||||||
	CPUPolicy    *v1.ResourceResizeRestartPolicy
 | 
						CPUPolicy    *v1.ResourceResizeRestartPolicy
 | 
				
			||||||
	MemPolicy    *v1.ResourceResizeRestartPolicy
 | 
						MemPolicy    *v1.ResourceResizeRestartPolicy
 | 
				
			||||||
	RestartCount int32
 | 
						RestartCount int32
 | 
				
			||||||
@@ -102,51 +127,9 @@ type patchSpec struct {
 | 
				
			|||||||
	} `json:"spec"`
 | 
						} `json:"spec"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func getTestResourceInfo(tcInfo ResizableContainerInfo) (v1.ResourceRequirements, v1.ResourceList, []v1.ContainerResizePolicy) {
 | 
					func getTestResourceInfo(tcInfo ResizableContainerInfo) (res v1.ResourceRequirements, resizePol []v1.ContainerResizePolicy) {
 | 
				
			||||||
	var res v1.ResourceRequirements
 | 
					 | 
				
			||||||
	var alloc v1.ResourceList
 | 
					 | 
				
			||||||
	var resizePol []v1.ContainerResizePolicy
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if tcInfo.Resources != nil {
 | 
						if tcInfo.Resources != nil {
 | 
				
			||||||
		var lim, req v1.ResourceList
 | 
							res = *tcInfo.Resources.ResourceRequirements()
 | 
				
			||||||
		if tcInfo.Resources.CPULim != "" || tcInfo.Resources.MemLim != "" || tcInfo.Resources.EphStorLim != "" {
 | 
					 | 
				
			||||||
			lim = make(v1.ResourceList)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.CPUReq != "" || tcInfo.Resources.MemReq != "" || tcInfo.Resources.EphStorReq != "" {
 | 
					 | 
				
			||||||
			req = make(v1.ResourceList)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.CPULim != "" {
 | 
					 | 
				
			||||||
			lim[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPULim)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.MemLim != "" {
 | 
					 | 
				
			||||||
			lim[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemLim)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.EphStorLim != "" {
 | 
					 | 
				
			||||||
			lim[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorLim)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.CPUReq != "" {
 | 
					 | 
				
			||||||
			req[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPUReq)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.MemReq != "" {
 | 
					 | 
				
			||||||
			req[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemReq)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Resources.EphStorReq != "" {
 | 
					 | 
				
			||||||
			req[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorReq)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		res = v1.ResourceRequirements{Limits: lim, Requests: req}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if tcInfo.Allocations != nil {
 | 
					 | 
				
			||||||
		alloc = make(v1.ResourceList)
 | 
					 | 
				
			||||||
		if tcInfo.Allocations.CPUAlloc != "" {
 | 
					 | 
				
			||||||
			alloc[v1.ResourceCPU] = resource.MustParse(tcInfo.Allocations.CPUAlloc)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Allocations.MemAlloc != "" {
 | 
					 | 
				
			||||||
			alloc[v1.ResourceMemory] = resource.MustParse(tcInfo.Allocations.MemAlloc)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if tcInfo.Allocations.ephStorAlloc != "" {
 | 
					 | 
				
			||||||
			alloc[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Allocations.ephStorAlloc)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if tcInfo.CPUPolicy != nil {
 | 
						if tcInfo.CPUPolicy != nil {
 | 
				
			||||||
		cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
 | 
							cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
 | 
				
			||||||
@@ -156,7 +139,7 @@ func getTestResourceInfo(tcInfo ResizableContainerInfo) (v1.ResourceRequirements
 | 
				
			|||||||
		memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
 | 
							memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
 | 
				
			||||||
		resizePol = append(resizePol, memPol)
 | 
							resizePol = append(resizePol, memPol)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return res, alloc, resizePol
 | 
						return res, resizePol
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
 | 
					func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
 | 
				
			||||||
@@ -174,9 +157,9 @@ func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func makeResizableContainer(tcInfo ResizableContainerInfo) (v1.Container, v1.ContainerStatus) {
 | 
					func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
 | 
				
			||||||
	cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
 | 
						cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
 | 
				
			||||||
	res, alloc, resizePol := getTestResourceInfo(tcInfo)
 | 
						res, resizePol := getTestResourceInfo(tcInfo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tc := v1.Container{
 | 
						tc := v1.Container{
 | 
				
			||||||
		Name:         tcInfo.Name,
 | 
							Name:         tcInfo.Name,
 | 
				
			||||||
@@ -187,18 +170,14 @@ func makeResizableContainer(tcInfo ResizableContainerInfo) (v1.Container, v1.Con
 | 
				
			|||||||
		ResizePolicy: resizePol,
 | 
							ResizePolicy: resizePol,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tcStatus := v1.ContainerStatus{
 | 
						return tc
 | 
				
			||||||
		Name:               tcInfo.Name,
 | 
					 | 
				
			||||||
		AllocatedResources: alloc,
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return tc, tcStatus
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
 | 
					func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
 | 
				
			||||||
	var testContainers []v1.Container
 | 
						var testContainers []v1.Container
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, ci := range tcInfo {
 | 
						for _, ci := range tcInfo {
 | 
				
			||||||
		tc, _ := makeResizableContainer(ci)
 | 
							tc := makeResizableContainer(ci)
 | 
				
			||||||
		testContainers = append(testContainers, tc)
 | 
							testContainers = append(testContainers, tc)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pod := &v1.Pod{
 | 
						pod := &v1.Pod{
 | 
				
			||||||
@@ -223,7 +202,7 @@ func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
 | 
				
			|||||||
	gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
 | 
						gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
 | 
				
			||||||
	for i, wantCtr := range wantCtrs {
 | 
						for i, wantCtr := range wantCtrs {
 | 
				
			||||||
		gotCtr := &gotPod.Spec.Containers[i]
 | 
							gotCtr := &gotPod.Spec.Containers[i]
 | 
				
			||||||
		ctr, _ := makeResizableContainer(wantCtr)
 | 
							ctr := makeResizableContainer(wantCtr)
 | 
				
			||||||
		gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
 | 
							gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
 | 
				
			||||||
		gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
 | 
							gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -234,45 +213,34 @@ func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
 | 
				
			|||||||
	gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
 | 
						gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
 | 
				
			||||||
	for i, wantCtr := range wantCtrs {
 | 
						for i, wantCtr := range wantCtrs {
 | 
				
			||||||
		gotCtr := &gotPod.Spec.Containers[i]
 | 
							gotCtr := &gotPod.Spec.Containers[i]
 | 
				
			||||||
		ctr, _ := makeResizableContainer(wantCtr)
 | 
							ctr := makeResizableContainer(wantCtr)
 | 
				
			||||||
		gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
 | 
							gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
 | 
				
			||||||
		gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
 | 
							gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func VerifyPodAllocations(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
 | 
					func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
 | 
				
			||||||
	ginkgo.GinkgoHelper()
 | 
						ginkgo.GinkgoHelper()
 | 
				
			||||||
	gomega.Expect(gotPod.Status.ContainerStatuses).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
 | 
					
 | 
				
			||||||
 | 
						var errs []error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
 | 
				
			||||||
 | 
							return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
 | 
				
			||||||
 | 
								len(gotPod.Status.ContainerStatuses), len(wantCtrs))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	for i, wantCtr := range wantCtrs {
 | 
						for i, wantCtr := range wantCtrs {
 | 
				
			||||||
		gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
 | 
							gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
 | 
				
			||||||
		if wantCtr.Allocations == nil {
 | 
							ctr := makeResizableContainer(wantCtr)
 | 
				
			||||||
			if wantCtr.Resources != nil {
 | 
							if gotCtrStatus.Name != ctr.Name {
 | 
				
			||||||
				alloc := &ContainerAllocations{CPUAlloc: wantCtr.Resources.CPUReq, MemAlloc: wantCtr.Resources.MemReq}
 | 
								errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name))
 | 
				
			||||||
				wantCtr.Allocations = alloc
 | 
								continue
 | 
				
			||||||
				defer func() {
 | 
					 | 
				
			||||||
					wantCtr.Allocations = nil
 | 
					 | 
				
			||||||
				}()
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(ctr.Resources)); err != nil {
 | 
				
			||||||
		_, ctrStatus := makeResizableContainer(wantCtr)
 | 
								errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", ctr.Name, err))
 | 
				
			||||||
		gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctrStatus.Name))
 | 
					 | 
				
			||||||
		if !cmp.Equal(gotCtrStatus.AllocatedResources, ctrStatus.AllocatedResources) {
 | 
					 | 
				
			||||||
			return fmt.Errorf("failed to verify Pod allocations, allocated resources not equal to expected")
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
 | 
						return utilerrors.NewAggregate(errs)
 | 
				
			||||||
	ginkgo.GinkgoHelper()
 | 
					 | 
				
			||||||
	gomega.Expect(gotPod.Status.ContainerStatuses).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
 | 
					 | 
				
			||||||
	for i, wantCtr := range wantCtrs {
 | 
					 | 
				
			||||||
		gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
 | 
					 | 
				
			||||||
		ctr, _ := makeResizableContainer(wantCtr)
 | 
					 | 
				
			||||||
		gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctr.Name))
 | 
					 | 
				
			||||||
		gomega.Expect(ctr.Resources).To(gomega.Equal(*gotCtrStatus.Resources))
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// isPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
 | 
					// isPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
 | 
				
			||||||
@@ -307,19 +275,21 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
 | 
				
			|||||||
			pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
 | 
								pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
 | 
				
			||||||
		cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
 | 
							cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
 | 
								return fmt.Errorf("failed to read cgroup %q for container %s: %w", cgPath, cName, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		cgValue = strings.Trim(cgValue, "\n")
 | 
							cgValue = strings.Trim(cgValue, "\n")
 | 
				
			||||||
		if cgValue != expectedCgValue {
 | 
							if cgValue != expectedCgValue {
 | 
				
			||||||
			return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
 | 
								return fmt.Errorf("container %s cgroup %q doesn't match expected: got %q want %q",
 | 
				
			||||||
 | 
									cName, cgPath, cgValue, expectedCgValue)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						var errs []error
 | 
				
			||||||
	for _, ci := range tcInfo {
 | 
						for _, ci := range tcInfo {
 | 
				
			||||||
		if ci.Resources == nil {
 | 
							if ci.Resources == nil {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		tc, _ := makeResizableContainer(ci)
 | 
							tc := makeResizableContainer(ci)
 | 
				
			||||||
		if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
 | 
							if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
 | 
				
			||||||
			var expectedCPUShares int64
 | 
								var expectedCPUShares int64
 | 
				
			||||||
			var expectedCPULimitString, expectedMemLimitString string
 | 
								var expectedCPULimitString, expectedMemLimitString string
 | 
				
			||||||
@@ -350,90 +320,85 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
 | 
				
			|||||||
				expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
 | 
									expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if expectedMemLimitString != "0" {
 | 
								if expectedMemLimitString != "0" {
 | 
				
			||||||
				err := verifyCgroupValue(ci.Name, cgroupMemLimit, expectedMemLimitString)
 | 
									errs = append(errs, verifyCgroupValue(ci.Name, cgroupMemLimit, expectedMemLimitString))
 | 
				
			||||||
				if err != nil {
 | 
					 | 
				
			||||||
					return err
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			err := verifyCgroupValue(ci.Name, cgroupCPULimit, expectedCPULimitString)
 | 
					 | 
				
			||||||
			if err != nil {
 | 
					 | 
				
			||||||
				return err
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			err = verifyCgroupValue(ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10))
 | 
					 | 
				
			||||||
			if err != nil {
 | 
					 | 
				
			||||||
				return err
 | 
					 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
								errs = append(errs, verifyCgroupValue(ci.Name, cgroupCPULimit, expectedCPULimitString))
 | 
				
			||||||
 | 
								errs = append(errs, verifyCgroupValue(ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return utilerrors.NewAggregate(errs)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func waitForContainerRestart(ctx context.Context, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo, initialContainers []ResizableContainerInfo, isRollback bool) error {
 | 
					func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContainerInfo) error {
 | 
				
			||||||
	ginkgo.GinkgoHelper()
 | 
						ginkgo.GinkgoHelper()
 | 
				
			||||||
	var restartContainersExpected []string
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	restartContainers := expectedContainers
 | 
						expectContainerRestarts := map[string]int32{}
 | 
				
			||||||
	// if we're rolling back, extract restart counts from test case "expected" containers
 | 
						for _, ci := range expectedContainers {
 | 
				
			||||||
	if isRollback {
 | 
							expectContainerRestarts[ci.Name] = ci.RestartCount
 | 
				
			||||||
		restartContainers = initialContainers
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, ci := range restartContainers {
 | 
						errs := []error{}
 | 
				
			||||||
		if ci.RestartCount > 0 {
 | 
						for _, cs := range pod.Status.ContainerStatuses {
 | 
				
			||||||
			restartContainersExpected = append(restartContainersExpected, ci.Name)
 | 
							expectedRestarts := expectContainerRestarts[cs.Name]
 | 
				
			||||||
 | 
							if cs.RestartCount != expectedRestarts {
 | 
				
			||||||
 | 
								errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if len(restartContainersExpected) == 0 {
 | 
						return utilerrors.NewAggregate(errs)
 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		return err
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	restartedContainersCount := 0
 | 
					 | 
				
			||||||
	for _, cName := range restartContainersExpected {
 | 
					 | 
				
			||||||
		cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
 | 
					 | 
				
			||||||
		if cs.RestartCount < 1 {
 | 
					 | 
				
			||||||
			break
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		restartedContainersCount++
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if restartedContainersCount == len(restartContainersExpected) {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if restartedContainersCount > len(restartContainersExpected) {
 | 
					 | 
				
			||||||
		return fmt.Errorf("more container restarts than expected")
 | 
					 | 
				
			||||||
	} else {
 | 
					 | 
				
			||||||
		return fmt.Errorf("less container restarts than expected")
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod, patchedPod *v1.Pod, expectedContainers []ResizableContainerInfo, initialContainers []ResizableContainerInfo, isRollback bool) *v1.Pod {
 | 
					func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod) *v1.Pod {
 | 
				
			||||||
	ginkgo.GinkgoHelper()
 | 
						ginkgo.GinkgoHelper()
 | 
				
			||||||
	var resizedPod *v1.Pod
 | 
						// Wait for resize to complete.
 | 
				
			||||||
	var pErr error
 | 
						framework.ExpectNoError(WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "resize status cleared", f.Timeouts.PodStart,
 | 
				
			||||||
	timeouts := framework.NewTimeoutContext()
 | 
							func(pod *v1.Pod) (bool, error) {
 | 
				
			||||||
	// Wait for container restart
 | 
								if pod.Status.Resize == v1.PodResizeStatusInfeasible {
 | 
				
			||||||
	gomega.Eventually(ctx, waitForContainerRestart, timeouts.PodStartShort, timeouts.Poll).
 | 
									// This is a terminal resize state
 | 
				
			||||||
		WithArguments(podClient, pod, expectedContainers, initialContainers, isRollback).
 | 
									return false, fmt.Errorf("resize is infeasible")
 | 
				
			||||||
		ShouldNot(gomega.HaveOccurred(), "failed waiting for expected container restart")
 | 
								}
 | 
				
			||||||
		// Verify Pod Containers Cgroup Values
 | 
								return pod.Status.Resize == "", nil
 | 
				
			||||||
	gomega.Eventually(ctx, VerifyPodContainersCgroupValues, timeouts.PodStartShort, timeouts.Poll).
 | 
							}), "pod should finish resizing")
 | 
				
			||||||
		WithArguments(f, patchedPod, expectedContainers).
 | 
					
 | 
				
			||||||
		ShouldNot(gomega.HaveOccurred(), "failed to verify container cgroup values to match expected")
 | 
						resizedPod, err := framework.GetObject(podClient.Get, pod.Name, metav1.GetOptions{})(ctx)
 | 
				
			||||||
	// Wait for pod resource allocations to equal expected values after resize
 | 
						framework.ExpectNoError(err, "failed to get resized pod")
 | 
				
			||||||
	gomega.Eventually(ctx, func() error {
 | 
					 | 
				
			||||||
		resizedPod, pErr = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
 | 
					 | 
				
			||||||
		if pErr != nil {
 | 
					 | 
				
			||||||
			return pErr
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return VerifyPodAllocations(resizedPod, expectedContainers)
 | 
					 | 
				
			||||||
	}, timeouts.PodStartShort, timeouts.Poll).
 | 
					 | 
				
			||||||
		ShouldNot(gomega.HaveOccurred(), "timed out waiting for pod resource allocation values to match expected")
 | 
					 | 
				
			||||||
	return resizedPod
 | 
						return resizedPod
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v1.Pod, expectedContainers []ResizableContainerInfo) {
 | 
				
			||||||
 | 
						ginkgo.GinkgoHelper()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Put each error on a new line for readability.
 | 
				
			||||||
 | 
						formatErrors := func(err error) error {
 | 
				
			||||||
 | 
							var agg utilerrors.Aggregate
 | 
				
			||||||
 | 
							if !errors.As(err, &agg) {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							errStrings := make([]string, len(agg.Errors()))
 | 
				
			||||||
 | 
							for i, err := range agg.Errors() {
 | 
				
			||||||
 | 
								errStrings[i] = err.Error()
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						// Verify Pod Containers Cgroup Values
 | 
				
			||||||
 | 
						var errs []error
 | 
				
			||||||
 | 
						if cgroupErrs := VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers); cgroupErrs != nil {
 | 
				
			||||||
 | 
							errs = append(errs, fmt.Errorf("container cgroup values don't match expected: %w", formatErrors(cgroupErrs)))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
 | 
				
			||||||
 | 
							errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if restartErrs := verifyContainerRestarts(resizedPod, expectedContainers); restartErrs != nil {
 | 
				
			||||||
 | 
							errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if len(errs) > 0 {
 | 
				
			||||||
 | 
							resizedPod.ManagedFields = nil // Suppress managed fields in error output.
 | 
				
			||||||
 | 
							framework.ExpectNoError(formatErrors(utilerrors.NewAggregate(errs)),
 | 
				
			||||||
 | 
								"Verifying pod resources resize state. Pod: %s", framework.PrettyPrintJSON(resizedPod))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ResizeContainerPatch generates a patch string to resize the pod container.
 | 
					// ResizeContainerPatch generates a patch string to resize the pod container.
 | 
				
			||||||
func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
 | 
					func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
 | 
				
			||||||
	var patch patchSpec
 | 
						var patch patchSpec
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -38,8 +38,6 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func doPodResizeResourceQuotaTests(f *framework.Framework) {
 | 
					func doPodResizeResourceQuotaTests(f *framework.Framework) {
 | 
				
			||||||
	timeouts := framework.NewTimeoutContext()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	ginkgo.It("pod-resize-resource-quota-test", func(ctx context.Context) {
 | 
						ginkgo.It("pod-resize-resource-quota-test", func(ctx context.Context) {
 | 
				
			||||||
		podClient := e2epod.NewPodClient(f)
 | 
							podClient := e2epod.NewPodClient(f)
 | 
				
			||||||
		resourceQuota := v1.ResourceQuota{
 | 
							resourceQuota := v1.ResourceQuota{
 | 
				
			||||||
@@ -92,7 +90,7 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
 | 
				
			|||||||
		newPod1 := podClient.CreateSync(ctx, testPod1)
 | 
							newPod1 := podClient.CreateSync(ctx, testPod1)
 | 
				
			||||||
		newPod2 := podClient.CreateSync(ctx, testPod2)
 | 
							newPod2 := podClient.CreateSync(ctx, testPod2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
 | 
							ginkgo.By("verifying initial pod resources, and policy are as expected")
 | 
				
			||||||
		e2epod.VerifyPodResources(newPod1, containers)
 | 
							e2epod.VerifyPodResources(newPod1, containers)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("patching pod for resize within resource quota")
 | 
							ginkgo.By("patching pod for resize within resource quota")
 | 
				
			||||||
@@ -102,23 +100,14 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("verifying pod patched for resize within resource quota")
 | 
							ginkgo.By("verifying pod patched for resize within resource quota")
 | 
				
			||||||
		e2epod.VerifyPodResources(patchedPod, expected)
 | 
							e2epod.VerifyPodResources(patchedPod, expected)
 | 
				
			||||||
		gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
					 | 
				
			||||||
			WithArguments(patchedPod, containers).
 | 
					 | 
				
			||||||
			Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("waiting for resize to be actuated")
 | 
							ginkgo.By("waiting for resize to be actuated")
 | 
				
			||||||
		resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod1, patchedPod, expected, containers, false)
 | 
							resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod1)
 | 
				
			||||||
		ginkgo.By("verifying pod container's cgroup values after resize")
 | 
							e2epod.ExpectPodResized(ctx, f, resizedPod, expected)
 | 
				
			||||||
		framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, resizedPod, expected))
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("verifying pod resources after resize")
 | 
							ginkgo.By("verifying pod resources after resize")
 | 
				
			||||||
		e2epod.VerifyPodResources(resizedPod, expected)
 | 
							e2epod.VerifyPodResources(resizedPod, expected)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("verifying pod allocations after resize")
 | 
					 | 
				
			||||||
		gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
					 | 
				
			||||||
			WithArguments(resizedPod, expected).
 | 
					 | 
				
			||||||
			Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		ginkgo.By("patching pod for resize with memory exceeding resource quota")
 | 
							ginkgo.By("patching pod for resize with memory exceeding resource quota")
 | 
				
			||||||
		_, pErrExceedMemory := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(ctx,
 | 
							_, pErrExceedMemory := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(ctx,
 | 
				
			||||||
			resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedMemory), metav1.PatchOptions{})
 | 
								resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedMemory), metav1.PatchOptions{})
 | 
				
			||||||
@@ -129,9 +118,7 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
 | 
				
			|||||||
		patchedPodExceedMemory, pErrEx2 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
 | 
							patchedPodExceedMemory, pErrEx2 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(pErrEx2, "failed to get pod post exceed memory resize")
 | 
							framework.ExpectNoError(pErrEx2, "failed to get pod post exceed memory resize")
 | 
				
			||||||
		e2epod.VerifyPodResources(patchedPodExceedMemory, expected)
 | 
							e2epod.VerifyPodResources(patchedPodExceedMemory, expected)
 | 
				
			||||||
		gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
							framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
 | 
				
			||||||
			WithArguments(patchedPodExceedMemory, expected).
 | 
					 | 
				
			||||||
			Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By(fmt.Sprintf("patching pod %s for resize with CPU exceeding resource quota", resizedPod.Name))
 | 
							ginkgo.By(fmt.Sprintf("patching pod %s for resize with CPU exceeding resource quota", resizedPod.Name))
 | 
				
			||||||
		_, pErrExceedCPU := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(ctx,
 | 
							_, pErrExceedCPU := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(ctx,
 | 
				
			||||||
@@ -143,9 +130,7 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
 | 
				
			|||||||
		patchedPodExceedCPU, pErrEx1 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
 | 
							patchedPodExceedCPU, pErrEx1 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(pErrEx1, "failed to get pod post exceed CPU resize")
 | 
							framework.ExpectNoError(pErrEx1, "failed to get pod post exceed CPU resize")
 | 
				
			||||||
		e2epod.VerifyPodResources(patchedPodExceedCPU, expected)
 | 
							e2epod.VerifyPodResources(patchedPodExceedCPU, expected)
 | 
				
			||||||
		gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
 | 
							framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
 | 
				
			||||||
			WithArguments(patchedPodExceedCPU, expected).
 | 
					 | 
				
			||||||
			Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("deleting pods")
 | 
							ginkgo.By("deleting pods")
 | 
				
			||||||
		delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod1)
 | 
							delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod1)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -500,6 +500,12 @@
 | 
				
			|||||||
    lockToDefault: false
 | 
					    lockToDefault: false
 | 
				
			||||||
    preRelease: Alpha
 | 
					    preRelease: Alpha
 | 
				
			||||||
    version: "1.27"
 | 
					    version: "1.27"
 | 
				
			||||||
 | 
					- name: InPlacePodVerticalScalingAllocatedStatus
 | 
				
			||||||
 | 
					  versionedSpecs:
 | 
				
			||||||
 | 
					  - default: false
 | 
				
			||||||
 | 
					    lockToDefault: false
 | 
				
			||||||
 | 
					    preRelease: Alpha
 | 
				
			||||||
 | 
					    version: "1.32"
 | 
				
			||||||
- name: InTreePluginPortworxUnregister
 | 
					- name: InTreePluginPortworxUnregister
 | 
				
			||||||
  versionedSpecs:
 | 
					  versionedSpecs:
 | 
				
			||||||
  - default: false
 | 
					  - default: false
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user