diff --git a/pkg/api/pod/util.go b/pkg/api/pod/util.go index 0dcaff3bd8f..a00c82cc083 100644 --- a/pkg/api/pod/util.go +++ b/pkg/api/pod/util.go @@ -19,7 +19,7 @@ package pod import ( "strings" - "github.com/google/go-cmp/cmp" //nolint:depguard + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metavalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/sets" @@ -386,6 +386,7 @@ func GetValidationOptionsFromPodSpecAndMeta(podSpec, oldPodSpec *api.PodSpec, po AllowPodLifecycleSleepActionZeroValue: utilfeature.DefaultFeatureGate.Enabled(features.PodLifecycleSleepActionAllowZero), PodLevelResourcesEnabled: utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources), AllowInvalidLabelValueInRequiredNodeAffinity: false, + AllowSidecarResizePolicy: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), } // If old spec uses relaxed validation or enabled the RelaxedEnvironmentVariableValidation feature gate, @@ -419,7 +420,7 @@ func GetValidationOptionsFromPodSpecAndMeta(podSpec, oldPodSpec *api.PodSpec, po opts.AllowPodLifecycleSleepActionZeroValue = opts.AllowPodLifecycleSleepActionZeroValue || podLifecycleSleepActionZeroValueInUse(podSpec) // If oldPod has resize policy set on the restartable init container, we must allow it - opts.AllowSidecarResizePolicy = hasRestartableInitContainerResizePolicy(oldPodSpec) + opts.AllowSidecarResizePolicy = opts.AllowSidecarResizePolicy || hasRestartableInitContainerResizePolicy(oldPodSpec) } if oldPodMeta != nil && !opts.AllowInvalidPodDeletionCost { // This is an update, so validate only if the existing object was valid. @@ -1092,7 +1093,8 @@ func inPlacePodVerticalScalingInUse(podSpec *api.PodSpec) bool { return false } var inUse bool - VisitContainers(podSpec, Containers, func(c *api.Container, containerType ContainerType) bool { + containersMask := Containers | InitContainers + VisitContainers(podSpec, containersMask, func(c *api.Container, containerType ContainerType) bool { if len(c.ResizePolicy) > 0 { inUse = true return false @@ -1289,19 +1291,35 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) { return } + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && len(newPod.Spec.InitContainers) != len(oldPod.Spec.InitContainers) { + return + } + for i, c := range newPod.Spec.Containers { if c.Name != oldPod.Spec.Containers[i].Name { return // Update is invalid (container mismatch): let validation handle it. } - if c.Resources.Requests == nil { - continue - } - if cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) { + if apiequality.Semantic.DeepEqual(oldPod.Spec.Containers[i].Resources, c.Resources) { continue } newPod.Status.Resize = api.PodResizeStatusProposed return } + + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for i, c := range newPod.Spec.InitContainers { + if IsRestartableInitContainer(&c) { + if c.Name != oldPod.Spec.InitContainers[i].Name { + return // Update is invalid (container mismatch): let validation handle it. + } + if apiequality.Semantic.DeepEqual(oldPod.Spec.InitContainers[i].Resources, c.Resources) { + continue + } + newPod.Status.Resize = api.PodResizeStatusProposed + return + } + } + } } // KEP: https://kep.k8s.io/4639 diff --git a/pkg/api/pod/util_test.go b/pkg/api/pod/util_test.go index 31feb802369..81897f4c484 100644 --- a/pkg/api/pod/util_test.go +++ b/pkg/api/pod/util_test.go @@ -2955,6 +2955,8 @@ func TestDropSidecarContainers(t *testing.T) { } func TestMarkPodProposedForResize(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true) + containerRestartPolicyAlways := api.ContainerRestartPolicyAlways testCases := []struct { desc string newPodSpec api.PodSpec @@ -3213,6 +3215,378 @@ func TestMarkPodProposedForResize(t *testing.T) { }, expectProposedResize: false, }, + { + desc: "resources unchanged with sidecar containers", + newPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + oldPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + expectProposedResize: false, + }, + { + desc: "requests resized with sidecar containers", + newPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + oldPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + expectProposedResize: true, + }, + { + desc: "limits resized with sidecar containers", + newPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + oldPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + expectProposedResize: true, + }, + { + desc: "requests resized should fail with non-sidecar init container", + newPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + }, + }, + }, + oldPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + }, + }, + }, + }, + expectProposedResize: false, + }, + { + desc: "limits resized should fail with non-sidecar init containers", + newPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + }, + }, + }, + oldPodSpec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "c1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + }, + }, + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")}, + }, + }, + }, + }, + expectProposedResize: false, + }, + { + desc: "the number of sidecar containers in the pod has increased; no action should be taken.", + newPodSpec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + { + Name: "i2", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + oldPodSpec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + expectProposedResize: false, + }, + { + desc: "the number of sidecar containers in the pod has decreased; no action should be taken.", + newPodSpec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + oldPodSpec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + { + Name: "i2", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + expectProposedResize: false, + }, + { + desc: "sidecar containers reordered", + newPodSpec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + { + Name: "i2", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + oldPodSpec: api.PodSpec{ + InitContainers: []api.Container{ + { + Name: "i2", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + { + Name: "i1", + Image: "image", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")}, + Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + }, + expectProposedResize: false, + }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 179a7b0900b..72224863802 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -5413,16 +5413,16 @@ func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.Contai } // Skip any restartable init container that is allowed to restart - isRestartableInitContainer := false + isRestartableInitCtr := false for _, c := range podSpec.InitContainers { if oldStatus.Name == c.Name { - if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways { - isRestartableInitContainer = true + if isRestartableInitContainer(&c) { + isRestartableInitCtr = true } break } } - if isRestartableInitContainer { + if isRestartableInitCtr { continue } @@ -5605,7 +5605,8 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel } // Part 2: Validate that the changes between oldPod.Spec.Containers[].Resources and - // newPod.Spec.Containers[].Resources are allowed. + // newPod.Spec.Containers[].Resources are allowed. Also validate that the changes between oldPod.Spec.InitContainers[].Resources and + // newPod.Spec.InitContainers[].Resources are allowed. specPath := field.NewPath("spec") if qos.GetPodQOS(oldPod) != qos.ComputePodQOS(newPod) { allErrs = append(allErrs, field.Invalid(specPath, newPod.Status.QOSClass, "Pod QOS Class may not change as a result of resizing")) @@ -5618,7 +5619,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel // Do not allow removing resource requests/limits on resize. if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { for ix, ctr := range oldPod.Spec.InitContainers { - if ctr.RestartPolicy != nil && *ctr.RestartPolicy != core.ContainerRestartPolicyAlways { + if !isRestartableInitContainer(&ctr) { continue } if resourcesRemoved(newPod.Spec.InitContainers[ix].Resources.Requests, ctr.Resources.Requests) { @@ -5638,46 +5639,82 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel } } - // Ensure that only CPU and memory resources are mutable. + // Ensure that only CPU and memory resources are mutable for regular containers. originalCPUMemPodSpec := *newPod.Spec.DeepCopy() var newContainers []core.Container for ix, container := range originalCPUMemPodSpec.Containers { - dropCPUMemoryUpdates := func(resourceList, oldResourceList core.ResourceList) core.ResourceList { - if oldResourceList == nil { - return nil - } - var mungedResourceList core.ResourceList - if resourceList == nil { - mungedResourceList = make(core.ResourceList) - } else { - mungedResourceList = resourceList.DeepCopy() - } - delete(mungedResourceList, core.ResourceCPU) - delete(mungedResourceList, core.ResourceMemory) - if cpu, found := oldResourceList[core.ResourceCPU]; found { - mungedResourceList[core.ResourceCPU] = cpu - } - if mem, found := oldResourceList[core.ResourceMemory]; found { - mungedResourceList[core.ResourceMemory] = mem - } - return mungedResourceList + dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.Containers[ix]) + if !apiequality.Semantic.DeepEqual(container, oldPod.Spec.Containers[ix]) { + // This likely means that the user has made changes to resources other than CPU and memory for regular container. + errs := field.Forbidden(specPath, "only cpu and memory resources are mutable") + allErrs = append(allErrs, errs) } - lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits) - req := dropCPUMemoryUpdates(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests) - container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} - container.ResizePolicy = oldPod.Spec.Containers[ix].ResizePolicy // +k8s:verify-mutation:reason=clone newContainers = append(newContainers, container) } originalCPUMemPodSpec.Containers = newContainers + + // Ensure that only CPU and memory resources are mutable for restartable init containers. + // Also ensure that resources are immutable for non-restartable init containers. + var newInitContainers []core.Container + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for ix, container := range originalCPUMemPodSpec.InitContainers { + if isRestartableInitContainer(&container) { // restartable init container + dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix]) + if !apiequality.Semantic.DeepEqual(container, oldPod.Spec.InitContainers[ix]) { + // This likely means that the user has made changes to resources other than CPU and memory for sidecar container. + errs := field.Forbidden(specPath, "only cpu and memory resources for sidecar containers are mutable") + allErrs = append(allErrs, errs) + } + } else if !apiequality.Semantic.DeepEqual(container, oldPod.Spec.InitContainers[ix]) { // non-restartable init container + // This likely means that the user has modified resources of non-sidecar init container. + errs := field.Forbidden(specPath, "resources for non-sidecar init containers are immutable") + allErrs = append(allErrs, errs) + } + newInitContainers = append(newInitContainers, container) + } + originalCPUMemPodSpec.InitContainers = newInitContainers + } + + if len(allErrs) > 0 { + return allErrs + } + if !apiequality.Semantic.DeepEqual(originalCPUMemPodSpec, oldPod.Spec) { // This likely means that the user has made changes to resources other than CPU and Memory. - specDiff := cmp.Diff(oldPod.Spec, originalCPUMemPodSpec) - errs := field.Forbidden(specPath, fmt.Sprintf("only cpu and memory resources are mutable\n%v", specDiff)) + errs := field.Forbidden(specPath, "only cpu and memory resources are mutable") allErrs = append(allErrs, errs) } return allErrs } +// dropCPUMemoryResourcesFromContainer deletes the cpu and memory resources from the container, and copies them from the old pod container resources if present. +func dropCPUMemoryResourcesFromContainer(container *core.Container, oldPodSpecContainer *core.Container) { + dropCPUMemoryUpdates := func(resourceList, oldResourceList core.ResourceList) core.ResourceList { + if oldResourceList == nil { + return nil + } + var mungedResourceList core.ResourceList + if resourceList == nil { + mungedResourceList = make(core.ResourceList) + } else { + mungedResourceList = resourceList.DeepCopy() + } + delete(mungedResourceList, core.ResourceCPU) + delete(mungedResourceList, core.ResourceMemory) + if cpu, found := oldResourceList[core.ResourceCPU]; found { + mungedResourceList[core.ResourceCPU] = cpu + } + if mem, found := oldResourceList[core.ResourceMemory]; found { + mungedResourceList[core.ResourceMemory] = mem + } + return mungedResourceList + } + lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPodSpecContainer.Resources.Limits) + req := dropCPUMemoryUpdates(container.Resources.Requests, oldPodSpecContainer.Resources.Requests) + container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} + container.ResizePolicy = oldPodSpecContainer.ResizePolicy // +k8s:verify-mutation:reason=clone +} + // isPodResizeRequestSupported checks whether the pod is running on a node with InPlacePodVerticalScaling enabled. func isPodResizeRequestSupported(pod core.Pod) bool { // TODO: Remove this after GA+3 releases of InPlacePodVerticalScaling @@ -8604,3 +8641,11 @@ func validateImageVolumeSource(imageVolume *core.ImageVolumeSource, fldPath *fie allErrs = append(allErrs, validatePullPolicy(imageVolume.PullPolicy, fldPath.Child("pullPolicy"))...) return allErrs } + +// isRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways. +func isRestartableInitContainer(initContainer *core.Container) bool { + if initContainer == nil || initContainer.RestartPolicy == nil { + return false + } + return *initContainer.RestartPolicy == core.ContainerRestartPolicyAlways +} diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index c8498406103..1619bbdb973 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -10301,11 +10301,6 @@ func TestValidatePodSpec(t *testing.T) { FSGroupChangePolicy: &badfsGroupChangePolicy1, }), ), - "disallowed resources resize policy for init containers": *podtest.MakePod("", - podtest.SetInitContainers(podtest.MakeContainer("initctr", podtest.SetContainerResizePolicy( - core.ContainerResizePolicy{ResourceName: "cpu", RestartPolicy: "NotRequired"}, - ))), - ), } for k, v := range failureCases { opts := PodValidationOptions{ @@ -25936,9 +25931,8 @@ func TestValidatePodResize(t *testing.T) { test: "Pod QoS unchanged, burstable -> burstable, remove cpu and memory requests", old: mkPodWithInitContainers(getResources("100m", "100Mi", "", ""), getResources("100m", "", "", ""), ""), new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "", "", ""), ""), - err: "spec: Forbidden: only cpu and memory resources are mutable", - }, - { + err: "spec: Forbidden: resources for non-sidecar init containers are immutable", + }, { test: "Pod with nil Resource field in Status", old: mkPod(core.ResourceList{}, getResources("100m", "0", "1Gi", ""), podtest.SetStatus(core.PodStatus{ ContainerStatuses: []core.ContainerStatus{{ @@ -26005,6 +25999,36 @@ func TestValidatePodResize(t *testing.T) { })), new: mkPod(core.ResourceList{}, getResources("200m", "0", "1Gi", "")), err: "", + }, { + test: "cpu limit change for sidecar containers", + old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "0", "1Gi", ""), core.ContainerRestartPolicyAlways), + new: mkPodWithInitContainers(core.ResourceList{}, getResources("200m", "0", "1Gi", ""), core.ContainerRestartPolicyAlways), + err: "", + }, { + test: "memory limit change for sidecar containers", + old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "200Mi", "", ""), core.ContainerRestartPolicyAlways), + new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "", ""), core.ContainerRestartPolicyAlways), + err: "", + }, { + test: "storage limit change for sidecar containers", + old: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "2Gi", ""), core.ContainerRestartPolicyAlways), + new: mkPodWithInitContainers(core.ResourceList{}, getResources("100m", "100Mi", "1Gi", ""), core.ContainerRestartPolicyAlways), + err: "spec: Forbidden: only cpu and memory resources for sidecar containers are mutable", + }, { + test: "cpu request change for sidecar containers", + old: mkPodWithInitContainers(getResources("200m", "0", "", ""), core.ResourceList{}, core.ContainerRestartPolicyAlways), + new: mkPodWithInitContainers(getResources("100m", "0", "", ""), core.ResourceList{}, core.ContainerRestartPolicyAlways), + err: "", + }, { + test: "memory request change for sidecar containers", + old: mkPodWithInitContainers(getResources("0", "100Mi", "", ""), core.ResourceList{}, core.ContainerRestartPolicyAlways), + new: mkPodWithInitContainers(getResources("0", "200Mi", "", ""), core.ResourceList{}, core.ContainerRestartPolicyAlways), + err: "", + }, { + test: "storage request change for sidecar containers", + old: mkPodWithInitContainers(getResources("100m", "0", "1Gi", ""), core.ResourceList{}, core.ContainerRestartPolicyAlways), + new: mkPodWithInitContainers(getResources("100m", "0", "2Gi", ""), core.ResourceList{}, core.ContainerRestartPolicyAlways), + err: "spec: Forbidden: only cpu and memory resources for sidecar containers are mutable", }, } diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index 26d1fc6d91b..5d470e73db3 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -464,7 +464,13 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int // We should return this value because this is what kubelet agreed to allocate for the container // and the value configured with runtime. if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok { + containerStatuses := pod.Status.ContainerStatuses + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && podutil.IsRestartableInitContainer(container) { + if len(pod.Status.InitContainerStatuses) != 0 { + containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...) + } + } + if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { cpuQuantity = cs.AllocatedResources[v1.ResourceCPU] } } diff --git a/pkg/kubelet/cm/memorymanager/policy_static.go b/pkg/kubelet/cm/memorymanager/policy_static.go index 115e74afa27..b33a91eeff3 100644 --- a/pkg/kubelet/cm/memorymanager/policy_static.go +++ b/pkg/kubelet/cm/memorymanager/policy_static.go @@ -447,7 +447,13 @@ func getRequestedResources(pod *v1.Pod, container *v1.Container) (map[v1.Resourc // We should return this value because this is what kubelet agreed to allocate for the container // and the value configured with runtime. if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) { - if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok { + containerStatuses := pod.Status.ContainerStatuses + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && podutil.IsRestartableInitContainer(container) { + if len(pod.Status.InitContainerStatuses) != 0 { + containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...) + } + } + if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok { resources = cs.AllocatedResources } } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index ada3e7517e8..479e11d3e2f 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2812,30 +2812,6 @@ func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) { } } -func isPodResizeInProgress(pod *v1.Pod, podStatus *kubecontainer.PodStatus) bool { - for _, c := range pod.Spec.Containers { - if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil { - if cs.State != kubecontainer.ContainerStateRunning || cs.Resources == nil { - continue - } - if c.Resources.Requests != nil { - if cs.Resources.CPURequest != nil && !cs.Resources.CPURequest.Equal(*c.Resources.Requests.Cpu()) { - return true - } - } - if c.Resources.Limits != nil { - if cs.Resources.CPULimit != nil && !cs.Resources.CPULimit.Equal(*c.Resources.Limits.Cpu()) { - return true - } - if cs.Resources.MemoryLimit != nil && !cs.Resources.MemoryLimit.Equal(*c.Resources.Limits.Memory()) { - return true - } - } - } - } - return false -} - // canResizePod determines if the requested resize is currently feasible. // pod should hold the desired (pre-allocated) spec. // Returns true if the resize can proceed. @@ -2930,6 +2906,14 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine kl.backOff.Reset(key) } } + for i, container := range pod.Spec.InitContainers { + if podutil.IsRestartableInitContainer(&container) { + if !apiequality.Semantic.DeepEqual(container.Resources, allocatedPod.Spec.InitContainers[i].Resources) { + key := kuberuntime.GetStableKey(pod, &container) + kl.backOff.Reset(key) + } + } + } allocatedPod = pod // Special case when the updated allocation matches the actual resources. This can occur diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index e6a3d81d5ac..ea829014121 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1764,60 +1764,75 @@ func (kl *Kubelet) determinePodResizeStatus(allocatedPod *v1.Pod, podStatus *kub // resources reported in the status. func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool { for _, c := range allocatedPod.Spec.Containers { - if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil { - if cs.State != kubecontainer.ContainerStateRunning { - // If the container isn't running, it isn't resizing. - continue + if !allocatedContainerResourcesMatchStatus(allocatedPod, &c, podStatus) { + return false + } + } + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for _, c := range allocatedPod.Spec.InitContainers { + if podutil.IsRestartableInitContainer(&c) && !allocatedContainerResourcesMatchStatus(allocatedPod, &c, podStatus) { + return false } + } + } + return true +} - cpuReq, hasCPUReq := c.Resources.Requests[v1.ResourceCPU] - cpuLim, hasCPULim := c.Resources.Limits[v1.ResourceCPU] - memLim, hasMemLim := c.Resources.Limits[v1.ResourceMemory] +// allocatedContainerResourcesMatchStatus returns true if the container resources matches with the container statuses resources. +func allocatedContainerResourcesMatchStatus(allocatedPod *v1.Pod, c *v1.Container, podStatus *kubecontainer.PodStatus) bool { + if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil { + if cs.State != kubecontainer.ContainerStateRunning { + // If the container isn't running, it isn't resizing. + return true + } - if cs.Resources == nil { - if hasCPUReq || hasCPULim || hasMemLim { - // Container status is missing Resources information, but the container does - // have resizable resources configured. - klog.ErrorS(nil, "Missing runtime resources information for resizing container", - "pod", format.Pod(allocatedPod), "container", c.Name) - return false // We don't want to clear resize status with insufficient information. - } else { - // No resizable resources configured; this might be ok. - continue - } + cpuReq, hasCPUReq := c.Resources.Requests[v1.ResourceCPU] + cpuLim, hasCPULim := c.Resources.Limits[v1.ResourceCPU] + memLim, hasMemLim := c.Resources.Limits[v1.ResourceMemory] + + if cs.Resources == nil { + if hasCPUReq || hasCPULim || hasMemLim { + // Container status is missing Resources information, but the container does + // have resizable resources configured. + klog.ErrorS(nil, "Missing runtime resources information for resizing container", + "pod", format.Pod(allocatedPod), "container", c.Name) + return false // We don't want to clear resize status with insufficient information. + } else { + // No resizable resources configured; this might be ok. + return true } + } - // Only compare resizeable resources, and only compare resources that are explicitly configured. - if hasCPUReq { - if cs.Resources.CPURequest == nil { - if !cpuReq.IsZero() { - return false - } - } else if !cpuReq.Equal(*cs.Resources.CPURequest) && - (cpuReq.MilliValue() > cm.MinShares || cs.Resources.CPURequest.MilliValue() > cm.MinShares) { - // If both allocated & status CPU requests are at or below MinShares then they are considered equal. + // Only compare resizeable resources, and only compare resources that are explicitly configured. + if hasCPUReq { + if cs.Resources.CPURequest == nil { + if !cpuReq.IsZero() { return false } + } else if !cpuReq.Equal(*cs.Resources.CPURequest) && + (cpuReq.MilliValue() > cm.MinShares || cs.Resources.CPURequest.MilliValue() > cm.MinShares) { + // If both allocated & status CPU requests are at or below MinShares then they are considered equal. + return false } - if hasCPULim { - if cs.Resources.CPULimit == nil { - if !cpuLim.IsZero() { - return false - } - } else if !cpuLim.Equal(*cs.Resources.CPULimit) && - (cpuLim.MilliValue() > cm.MinMilliCPULimit || cs.Resources.CPULimit.MilliValue() > cm.MinMilliCPULimit) { - // If both allocated & status CPU limits are at or below the minimum limit, then they are considered equal. + } + if hasCPULim { + if cs.Resources.CPULimit == nil { + if !cpuLim.IsZero() { return false } + } else if !cpuLim.Equal(*cs.Resources.CPULimit) && + (cpuLim.MilliValue() > cm.MinMilliCPULimit || cs.Resources.CPULimit.MilliValue() > cm.MinMilliCPULimit) { + // If both allocated & status CPU limits are at or below the minimum limit, then they are considered equal. + return false } - if hasMemLim { - if cs.Resources.MemoryLimit == nil { - if !memLim.IsZero() { - return false - } - } else if !memLim.Equal(*cs.Resources.MemoryLimit) { + } + if hasMemLim { + if cs.Resources.MemoryLimit == nil { + if !memLim.IsZero() { return false } + } else if !memLim.Equal(*cs.Resources.MemoryLimit) { + return false } } } diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index e5fb7176d5e..4fdf947485b 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -6671,6 +6671,8 @@ func TestResolveRecursiveReadOnly(t *testing.T) { } func TestAllocatedResourcesMatchStatus(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true) + containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways tests := []struct { name string allocatedResources v1.ResourceRequirements @@ -6875,35 +6877,62 @@ func TestAllocatedResourcesMatchStatus(t *testing.T) { }} for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - allocatedPod := v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Name: "c", - Resources: test.allocatedResources, - }}, - }, - } - state := kubecontainer.ContainerStateRunning - if test.statusTerminated { - state = kubecontainer.ContainerStateExited - } - podStatus := &kubecontainer.PodStatus{ - Name: "test", - ContainerStatuses: []*kubecontainer.Status{ - { - Name: "c", - State: state, - Resources: test.statusResources, - }, - }, + for _, isSidecarContainer := range []bool{false, true} { + if isSidecarContainer { + test.name += " " + "for sidecar containers" } + t.Run(test.name, func(t *testing.T) { + var podStatus *kubecontainer.PodStatus + state := kubecontainer.ContainerStateRunning + if test.statusTerminated { + state = kubecontainer.ContainerStateExited + } - match := allocatedResourcesMatchStatus(&allocatedPod, podStatus) - assert.Equal(t, test.expectMatch, match) - }) + allocatedPod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + + if isSidecarContainer { + allocatedPod.Spec = v1.PodSpec{ + InitContainers: []v1.Container{{ + Name: "c1-init", + Resources: test.allocatedResources, + RestartPolicy: &containerRestartPolicyAlways, + }}, + } + podStatus = &kubecontainer.PodStatus{ + Name: "test", + ContainerStatuses: []*kubecontainer.Status{ + { + Name: "c1-init", + State: state, + Resources: test.statusResources, + }, + }, + } + } else { + allocatedPod.Spec = v1.PodSpec{ + Containers: []v1.Container{{ + Name: "c", + Resources: test.allocatedResources, + }}, + } + podStatus = &kubecontainer.PodStatus{ + Name: "test", + ContainerStatuses: []*kubecontainer.Status{ + { + Name: "c", + State: state, + Resources: test.statusResources, + }, + }, + } + } + match := allocatedResourcesMatchStatus(&allocatedPod, podStatus) + assert.Equal(t, test.expectMatch, match) + }) + } } } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 6d67fdee7d6..16786d6c5b9 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -2584,9 +2584,11 @@ func TestPodResourceAllocationReset(t *testing.T) { func TestHandlePodResourcesResize(t *testing.T) { featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true) + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true) testKubelet := newTestKubelet(t, false) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet + containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways cpu1m := resource.MustParse("1m") cpu2m := resource.MustParse("2m") @@ -2651,6 +2653,28 @@ func TestHandlePodResourcesResize(t *testing.T) { testPod2.UID = "2222" testPod2.Name = "pod2" testPod2.Namespace = "ns2" + testPod2.Spec = v1.PodSpec{ + InitContainers: []v1.Container{ + { + Name: "c1-init", + Image: "i1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + }, + } + testPod2.Status = v1.PodStatus{ + Phase: v1.PodRunning, + InitContainerStatuses: []v1.ContainerStatus{ + { + Name: "c1-init", + AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}, + Resources: &v1.ResourceRequirements{}, + }, + }, + } testPod3 := testPod1.DeepCopy() testPod3.UID = "3333" testPod3.Name = "pod3" @@ -2842,72 +2866,104 @@ func TestHandlePodResourcesResize(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - oldGOOS := goos - defer func() { goos = oldGOOS }() - if tt.goos != "" { - goos = tt.goos - } - kubelet.statusManager = status.NewFakeManager() - - originalPod := testPod1.DeepCopy() - originalPod.Spec.Containers[0].Resources.Requests = tt.originalRequests - originalPod.Spec.Containers[0].Resources.Limits = tt.originalLimits - kubelet.podManager.UpdatePod(originalPod) - - newPod := originalPod.DeepCopy() - newPod.Spec.Containers[0].Resources.Requests = tt.newRequests - newPod.Spec.Containers[0].Resources.Limits = tt.newLimits - - if !tt.newResourcesAllocated { - require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod)) - } else { - require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod)) - } - - podStatus := &kubecontainer.PodStatus{ - ID: originalPod.UID, - Name: originalPod.Name, - Namespace: originalPod.Namespace, - ContainerStatuses: make([]*kubecontainer.Status, len(originalPod.Spec.Containers)), - } - for i, c := range originalPod.Spec.Containers { - podStatus.ContainerStatuses[i] = &kubecontainer.Status{ - Name: c.Name, - State: kubecontainer.ContainerStateRunning, - Resources: &kubecontainer.ContainerResources{ - CPURequest: c.Resources.Requests.Cpu(), - CPULimit: c.Resources.Limits.Cpu(), - MemoryLimit: c.Resources.Limits.Memory(), - }, + for _, isSidecarContainer := range []bool{false, true} { + t.Run(tt.name, func(t *testing.T) { + oldGOOS := goos + defer func() { goos = oldGOOS }() + if tt.goos != "" { + goos = tt.goos } - } + kubelet.statusManager = status.NewFakeManager() - now := kubelet.clock.Now() - // Put the container in backoff so we can confirm backoff is reset. - backoffKey := kuberuntime.GetStableKey(originalPod, &originalPod.Spec.Containers[0]) - kubelet.backOff.Next(backoffKey, now) + var originalPod *v1.Pod + var originalCtr *v1.Container + if isSidecarContainer { + originalPod = testPod2.DeepCopy() + originalCtr = &originalPod.Spec.InitContainers[0] + } else { + originalPod = testPod1.DeepCopy() + originalCtr = &originalPod.Spec.Containers[0] + } + originalCtr.Resources.Requests = tt.originalRequests + originalCtr.Resources.Limits = tt.originalLimits - updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus) - require.NoError(t, err) - assert.Equal(t, tt.expectedAllocatedReqs, updatedPod.Spec.Containers[0].Resources.Requests, "updated pod spec requests") - assert.Equal(t, tt.expectedAllocatedLims, updatedPod.Spec.Containers[0].Resources.Limits, "updated pod spec limits") + kubelet.podManager.UpdatePod(originalPod) - alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), newPod.Spec.Containers[0].Name) - require.True(t, found, "container allocation") - assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation") - assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation") + newPod := originalPod.DeepCopy() - resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID) - assert.Equal(t, tt.expectedResize, resizeStatus) + if isSidecarContainer { + newPod.Spec.InitContainers[0].Resources.Requests = tt.newRequests + newPod.Spec.InitContainers[0].Resources.Limits = tt.newLimits + } else { + newPod.Spec.Containers[0].Resources.Requests = tt.newRequests + newPod.Spec.Containers[0].Resources.Limits = tt.newLimits + } - isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now) - if tt.expectBackoffReset { - assert.False(t, isInBackoff, "container backoff should be reset") - } else { - assert.True(t, isInBackoff, "container backoff should not be reset") - } - }) + if !tt.newResourcesAllocated { + require.NoError(t, kubelet.statusManager.SetPodAllocation(originalPod)) + } else { + require.NoError(t, kubelet.statusManager.SetPodAllocation(newPod)) + } + + podStatus := &kubecontainer.PodStatus{ + ID: originalPod.UID, + Name: originalPod.Name, + Namespace: originalPod.Namespace, + } + + setContainerStatus := func(podStatus *kubecontainer.PodStatus, c *v1.Container, idx int) { + podStatus.ContainerStatuses[idx] = &kubecontainer.Status{ + Name: c.Name, + State: kubecontainer.ContainerStateRunning, + Resources: &kubecontainer.ContainerResources{ + CPURequest: c.Resources.Requests.Cpu(), + CPULimit: c.Resources.Limits.Cpu(), + MemoryLimit: c.Resources.Limits.Memory(), + }, + } + } + + podStatus.ContainerStatuses = make([]*kubecontainer.Status, len(originalPod.Spec.Containers)+len(originalPod.Spec.InitContainers)) + for i, c := range originalPod.Spec.InitContainers { + setContainerStatus(podStatus, &c, i) + } + for i, c := range originalPod.Spec.Containers { + setContainerStatus(podStatus, &c, i+len(originalPod.Spec.InitContainers)) + } + + now := kubelet.clock.Now() + // Put the container in backoff so we can confirm backoff is reset. + backoffKey := kuberuntime.GetStableKey(originalPod, originalCtr) + kubelet.backOff.Next(backoffKey, now) + + updatedPod, err := kubelet.handlePodResourcesResize(newPod, podStatus) + require.NoError(t, err) + + var updatedPodCtr v1.Container + if isSidecarContainer { + updatedPodCtr = updatedPod.Spec.InitContainers[0] + } else { + updatedPodCtr = updatedPod.Spec.Containers[0] + } + assert.Equal(t, tt.expectedAllocatedReqs, updatedPodCtr.Resources.Requests, "updated pod spec requests") + assert.Equal(t, tt.expectedAllocatedLims, updatedPodCtr.Resources.Limits, "updated pod spec limits") + + alloc, found := kubelet.statusManager.GetContainerResourceAllocation(string(newPod.UID), updatedPodCtr.Name) + require.True(t, found, "container allocation") + assert.Equal(t, tt.expectedAllocatedReqs, alloc.Requests, "stored container request allocation") + assert.Equal(t, tt.expectedAllocatedLims, alloc.Limits, "stored container limit allocation") + + resizeStatus := kubelet.statusManager.GetPodResizeStatus(newPod.UID) + assert.Equal(t, tt.expectedResize, resizeStatus) + + isInBackoff := kubelet.backOff.IsInBackOffSince(backoffKey, now) + if tt.expectBackoffReset { + assert.False(t, isInBackoff, "container backoff should be reset") + } else { + assert.True(t, isInBackoff, "container backoff should not be reset") + } + }) + } } } @@ -3462,135 +3518,6 @@ func TestSyncPodSpans(t *testing.T) { } } -func TestIsPodResizeInProgress(t *testing.T) { - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - UID: "12345", - Name: "test", - Namespace: "default", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Name: "c1", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI), - }, - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI), - }, - }, - }, { - Name: "c2", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI), - }, - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI), - }, - }, - }}, - }, - } - steadyStateC1Status := &kubecontainer.Status{ - Name: "c1", - State: kubecontainer.ContainerStateRunning, - Resources: &kubecontainer.ContainerResources{ - CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI), - CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI), - MemoryLimit: resource.NewQuantity(400, resource.DecimalSI), - }, - } - resizeMemC1Status := &kubecontainer.Status{ - Name: "c1", - State: kubecontainer.ContainerStateRunning, - Resources: &kubecontainer.ContainerResources{ - CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI), - CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI), - MemoryLimit: resource.NewQuantity(800, resource.DecimalSI), - }, - } - resizeCPUReqC1Status := &kubecontainer.Status{ - Name: "c1", - State: kubecontainer.ContainerStateRunning, - Resources: &kubecontainer.ContainerResources{ - CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI), - CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI), - MemoryLimit: resource.NewQuantity(400, resource.DecimalSI), - }, - } - resizeCPULimitC1Status := &kubecontainer.Status{ - Name: "c1", - State: kubecontainer.ContainerStateRunning, - Resources: &kubecontainer.ContainerResources{ - CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI), - CPULimit: resource.NewMilliQuantity(600, resource.DecimalSI), - MemoryLimit: resource.NewQuantity(400, resource.DecimalSI), - }, - } - steadyStateC2Status := &kubecontainer.Status{ - Name: "c2", - State: kubecontainer.ContainerStateRunning, - Resources: &kubecontainer.ContainerResources{ - CPURequest: resource.NewMilliQuantity(500, resource.DecimalSI), - CPULimit: resource.NewMilliQuantity(700, resource.DecimalSI), - MemoryLimit: resource.NewQuantity(800, resource.DecimalSI), - }, - } - mkPodStatus := func(containerStatuses ...*kubecontainer.Status) *kubecontainer.PodStatus { - return &kubecontainer.PodStatus{ - ID: pod.UID, - Name: pod.Name, - Namespace: pod.Namespace, - ContainerStatuses: containerStatuses, - } - } - tests := []struct { - name string - status *kubecontainer.PodStatus - expectResize bool - }{{ - name: "steady state", - status: mkPodStatus(steadyStateC1Status, steadyStateC2Status), - expectResize: false, - }, { - name: "terminated container", - status: mkPodStatus(&kubecontainer.Status{ - Name: "c1", - State: kubecontainer.ContainerStateExited, - Resources: resizeMemC1Status.Resources, - }, steadyStateC2Status), - expectResize: false, - }, { - name: "missing container", - status: mkPodStatus(steadyStateC2Status), - expectResize: false, - }, { - name: "resizing memory limit", - status: mkPodStatus(resizeMemC1Status, steadyStateC2Status), - expectResize: true, - }, { - name: "resizing cpu request", - status: mkPodStatus(resizeCPUReqC1Status, steadyStateC2Status), - expectResize: true, - }, { - name: "resizing cpu limit", - status: mkPodStatus(resizeCPULimitC1Status, steadyStateC2Status), - expectResize: true, - }} - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - assert.Equal(t, test.expectResize, isPodResizeInProgress(pod, test.status)) - }) - } -} - func TestRecordAdmissionRejection(t *testing.T) { metrics.Register() diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 2f43ddfe63e..88489692461 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -1159,8 +1159,15 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod reason: reasonLivenessProbe, } changes.InitContainersToStart = append(changes.InitContainersToStart, i) + // The container is restarting, so no other actions need to be taken. + break } } + + if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, i, true, status, changes) { + // computePodResizeAction updates 'changes' if resize policy requires restarting this container + break + } } else { // init container // nothing do to but wait for it to finish break diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index f42d97b6e8a..46b53805452 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -473,8 +473,8 @@ type containerResources struct { // containerToUpdateInfo contains necessary information to update a container's resources. type containerToUpdateInfo struct { - // Index of the container in pod.Spec.Containers that needs resource update - apiContainerIdx int + // The spec of the container. + container *v1.Container // ID of the runtime container that needs resource update kubeContainerID kubecontainer.ContainerID // Desired resources for the running container @@ -562,8 +562,14 @@ func IsInPlacePodVerticalScalingAllowed(pod *v1.Pod) bool { // computePodResizeAction determines the actions required (if any) to resize the given container. // Returns whether to keep (true) or restart (false) the container. -func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) { - container := pod.Spec.Containers[containerIdx] +// TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`). +func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) { + var container v1.Container + if isRestartableInitContainer { + container = pod.Spec.InitContainers[containerIdx] + } else { + container = pod.Spec.Containers[containerIdx] + } // Determine if the *running* container needs resource update by comparing v1.Spec.Resources (desired) // with v1.Status.Resources / runtime.Status.Resources (last known actual). @@ -633,7 +639,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe } markContainerForUpdate := func(rName v1.ResourceName, specValue, statusValue int64) { cUpdateInfo := containerToUpdateInfo{ - apiContainerIdx: containerIdx, + container: &container, kubeContainerID: kubeContainerStatus.ID, desiredContainerResources: desiredResources, currentContainerResources: ¤tResources, @@ -655,10 +661,14 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe // resize policy requires this container to restart changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{ name: kubeContainerStatus.Name, - container: &pod.Spec.Containers[containerIdx], + container: &container, message: fmt.Sprintf("Container %s resize requires restart", container.Name), } - changes.ContainersToStart = append(changes.ContainersToStart, containerIdx) + if isRestartableInitContainer { + changes.InitContainersToStart = append(changes.InitContainersToStart, containerIdx) + } else { + changes.ContainersToStart = append(changes.ContainersToStart, containerIdx) + } changes.UpdatePodResources = true return false } else { @@ -807,7 +817,7 @@ func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, res klog.V(5).InfoS("Updating container resources", "pod", klog.KObj(pod)) for _, cInfo := range containersToUpdate { - container := pod.Spec.Containers[cInfo.apiContainerIdx].DeepCopy() + container := cInfo.container.DeepCopy() // If updating memory limit, use most recently configured CPU request and limit values. // If updating CPU request and limit, use most recently configured memory request and limit values. switch resourceName { @@ -980,6 +990,10 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod * } } + if IsInPlacePodVerticalScalingAllowed(pod) { + changes.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo) + } + // Check initialization progress. // TODO: Remove this code path as logically it is the subset of the next // code path. @@ -1017,10 +1031,6 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod * } } - if IsInPlacePodVerticalScalingAllowed(pod) { - changes.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo) - } - // Number of running containers to keep. keepCount := 0 // check the status of containers. @@ -1075,7 +1085,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod * // If the container failed the startup probe, we should kill it. message = fmt.Sprintf("Container %s failed startup probe", container.Name) reason = reasonStartupProbe - } else if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, containerStatus, &changes) { + } else if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, false, containerStatus, &changes) { // computePodResizeAction updates 'changes' if resize policy requires restarting this container continue } else { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index b60d0b2071c..a60ee64c5a9 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -2225,7 +2225,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ v1.ResourceMemory: { { - apiContainerIdx: 1, + container: &pod.Spec.Containers[1], kubeContainerID: kcs.ID, desiredContainerResources: containerResources{ memoryLimit: mem100M.Value(), @@ -2239,7 +2239,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { }, v1.ResourceCPU: { { - apiContainerIdx: 1, + container: &pod.Spec.Containers[1], kubeContainerID: kcs.ID, desiredContainerResources: containerResources{ memoryLimit: mem100M.Value(), @@ -2278,7 +2278,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ v1.ResourceCPU: { { - apiContainerIdx: 1, + container: &pod.Spec.Containers[1], kubeContainerID: kcs.ID, desiredContainerResources: containerResources{ memoryLimit: mem100M.Value(), @@ -2317,7 +2317,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ v1.ResourceMemory: { { - apiContainerIdx: 2, + container: &pod.Spec.Containers[2], kubeContainerID: kcs.ID, desiredContainerResources: containerResources{ memoryLimit: mem200M.Value(), @@ -2485,7 +2485,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ v1.ResourceMemory: { { - apiContainerIdx: 1, + container: &pod.Spec.Containers[1], kubeContainerID: kcs.ID, desiredContainerResources: containerResources{ memoryLimit: mem200M.Value(), @@ -2525,7 +2525,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ v1.ResourceCPU: { { - apiContainerIdx: 2, + container: &pod.Spec.Containers[2], kubeContainerID: kcs.ID, desiredContainerResources: containerResources{ memoryLimit: mem100M.Value(), @@ -2570,6 +2570,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { func TestUpdatePodContainerResources(t *testing.T) { featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true) + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true) fakeRuntime, _, m, err := createTestRuntimeManager() m.machineInfo.MemoryCapacity = 17179860387 // 16GB assert.NoError(t, err) @@ -2599,7 +2600,7 @@ func TestUpdatePodContainerResources(t *testing.T) { res300m350Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem350M} res350m350Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem350M} - pod, _ := makeBasePodAndStatus() + pod, _ := makeBasePodAndStatusWithRestartableInitContainers() makeAndSetFakePod(t, m, fakeRuntime, pod) for dsc, tc := range map[string]struct { @@ -2646,41 +2647,58 @@ func TestUpdatePodContainerResources(t *testing.T) { expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, }, } { - var containersToUpdate []containerToUpdateInfo - for idx := range pod.Spec.Containers { - // default resize policy when pod resize feature is enabled - pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx] - pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx] - cInfo := containerToUpdateInfo{ - apiContainerIdx: idx, - kubeContainerID: kubecontainer.ContainerID{}, - desiredContainerResources: containerResources{ - memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(), - memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(), - cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(), - cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(), - }, - currentContainerResources: &containerResources{ - memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(), - memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(), - cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(), - cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(), - }, + for _, allSideCarCtrs := range []bool{false, true} { + var containersToUpdate []containerToUpdateInfo + containerToUpdateInfo := func(container *v1.Container, idx int) containerToUpdateInfo { + return containerToUpdateInfo{ + container: container, + kubeContainerID: kubecontainer.ContainerID{}, + desiredContainerResources: containerResources{ + memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(), + memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(), + cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(), + cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(), + }, + currentContainerResources: &containerResources{ + memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(), + memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(), + cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(), + cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(), + }, + } } - containersToUpdate = append(containersToUpdate, cInfo) - } - fakeRuntime.Called = []string{} - err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate) - require.NoError(t, err, dsc) - if tc.invokeUpdateResources { - assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc) - } - for idx := range pod.Spec.Containers { - assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc) - assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc) - assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc) - assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc) + if allSideCarCtrs { + for idx := range pod.Spec.InitContainers { + // default resize policy when pod resize feature is enabled + pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx] + pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx] + cinfo := containerToUpdateInfo(&pod.Spec.InitContainers[idx], idx) + containersToUpdate = append(containersToUpdate, cinfo) + } + } else { + for idx := range pod.Spec.Containers { + // default resize policy when pod resize feature is enabled + pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx] + pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx] + cinfo := containerToUpdateInfo(&pod.Spec.Containers[idx], idx) + containersToUpdate = append(containersToUpdate, cinfo) + } + } + + fakeRuntime.Called = []string{} + err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate) + require.NoError(t, err, dsc) + + if tc.invokeUpdateResources { + assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc) + } + for idx := range len(containersToUpdate) { + assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc) + assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc) + assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc) + assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc) + } } } } @@ -2979,7 +2997,7 @@ func TestDoPodResizeAction(t *testing.T) { } updateInfo := containerToUpdateInfo{ - apiContainerIdx: 0, + container: &pod.Spec.Containers[0], kubeContainerID: kps.ContainerStatuses[0].ID, desiredContainerResources: tc.desiredResources, currentContainerResources: &tc.currentResources, diff --git a/pkg/kubelet/status/fake_status_manager.go b/pkg/kubelet/status/fake_status_manager.go index 897bc34c5f4..c99acf31a2f 100644 --- a/pkg/kubelet/status/fake_status_manager.go +++ b/pkg/kubelet/status/fake_status_manager.go @@ -19,7 +19,10 @@ package status import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/status/state" ) @@ -81,7 +84,19 @@ func (m *fakeManager) SetPodAllocation(pod *v1.Pod) error { klog.InfoS("SetPodAllocation()") for _, container := range pod.Spec.Containers { alloc := *container.Resources.DeepCopy() - m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc) + if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil { + return err + } + } + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for _, container := range pod.Spec.InitContainers { + if podutil.IsRestartableInitContainer(&container) { + alloc := *container.Resources.DeepCopy() + if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil { + return err + } + } + } } return nil } diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index f314e26a9c5..c2d7b9485e6 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -269,18 +269,32 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (* } updated := false - for i, c := range pod.Spec.Containers { + containerAlloc := func(c v1.Container) (v1.ResourceRequirements, bool) { if cAlloc, ok := allocated[c.Name]; ok { if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) { - // Allocation differs from pod spec, update + // Allocation differs from pod spec, retrieve the allocation if !updated { - // If this is the first update, copy the pod + // If this is the first update to be performed, copy the pod pod = pod.DeepCopy() updated = true } - pod.Spec.Containers[i].Resources = cAlloc + return cAlloc, true } } + return v1.ResourceRequirements{}, false + } + + for i, c := range pod.Spec.Containers { + if cAlloc, found := containerAlloc(c); found { + // Allocation differs from pod spec, update + pod.Spec.Containers[i].Resources = cAlloc + } + } + for i, c := range pod.Spec.InitContainers { + if cAlloc, found := containerAlloc(c); found { + // Allocation differs from pod spec, update + pod.Spec.InitContainers[i].Resources = cAlloc + } } return pod, updated } @@ -302,6 +316,18 @@ func (m *manager) SetPodAllocation(pod *v1.Pod) error { return err } } + + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for _, container := range pod.Spec.InitContainers { + if podutil.IsRestartableInitContainer(&container) { + alloc := *container.Resources.DeepCopy() + if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil { + return err + } + } + } + } + return nil } diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 19cef9fb9ce..d94a82741cb 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -2037,6 +2037,7 @@ func TestMergePodStatus(t *testing.T) { } func TestUpdatePodFromAllocation(t *testing.T) { + containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: "12345", @@ -2044,36 +2045,69 @@ func TestUpdatePodFromAllocation(t *testing.T) { Namespace: "default", }, Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Name: "c1", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI), - }, - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI), + Containers: []v1.Container{ + { + Name: "c1", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI), + }, }, }, - }, { - Name: "c2", - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI), - }, - Limits: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI), + { + Name: "c2", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI), + }, }, }, - }}, + }, + InitContainers: []v1.Container{ + { + Name: "c1-restartable-init", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + }, + }, + RestartPolicy: &containerRestartPolicyAlways, + }, + { + Name: "c1-init", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI), + }, + }, + }, + }, }, } resizedPod := pod.DeepCopy() resizedPod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(200, resource.DecimalSI) + resizedPod.Spec.InitContainers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(300, resource.DecimalSI) tests := []struct { name string @@ -2086,8 +2120,10 @@ func TestUpdatePodFromAllocation(t *testing.T) { pod: pod, allocs: state.PodResourceAllocation{ string(pod.UID): map[string]v1.ResourceRequirements{ - "c1": *pod.Spec.Containers[0].Resources.DeepCopy(), - "c2": *pod.Spec.Containers[1].Resources.DeepCopy(), + "c1": *pod.Spec.Containers[0].Resources.DeepCopy(), + "c2": *pod.Spec.Containers[1].Resources.DeepCopy(), + "c1-restartable-init": *pod.Spec.InitContainers[0].Resources.DeepCopy(), + "c1-init": *pod.Spec.InitContainers[1].Resources.DeepCopy(), }, }, expectUpdate: false, @@ -2110,8 +2146,10 @@ func TestUpdatePodFromAllocation(t *testing.T) { pod: pod, allocs: state.PodResourceAllocation{ string(pod.UID): map[string]v1.ResourceRequirements{ - "c1": *resizedPod.Spec.Containers[0].Resources.DeepCopy(), - "c2": *resizedPod.Spec.Containers[1].Resources.DeepCopy(), + "c1": *resizedPod.Spec.Containers[0].Resources.DeepCopy(), + "c2": *resizedPod.Spec.Containers[1].Resources.DeepCopy(), + "c1-restartable-init": *resizedPod.Spec.InitContainers[0].Resources.DeepCopy(), + "c1-init": *resizedPod.Spec.InitContainers[1].Resources.DeepCopy(), }, }, expectUpdate: true, diff --git a/pkg/registry/core/pod/strategy.go b/pkg/registry/core/pod/strategy.go index fd239f6970e..17764ca6ff4 100644 --- a/pkg/registry/core/pod/strategy.go +++ b/pkg/registry/core/pod/strategy.go @@ -289,18 +289,21 @@ var ResizeStrategy = podResizeStrategy{ ), } -// dropNonResizeUpdates discards all changes except for pod.Spec.Containers[*].Resources,ResizePolicy and certain metadata +// dropNonResizeUpdates discards all changes except for pod.Spec.Containers[*].Resources, pod.Spec.InitContainers[*].Resources, ResizePolicy and certain metadata func dropNonResizeUpdates(newPod, oldPod *api.Pod) *api.Pod { pod := dropPodUpdates(newPod, oldPod) // Containers are not allowed to be re-ordered, but in case they were, // we don't want to corrupt them here. It will get caught in validation. oldCtrToIndex := make(map[string]int) + oldInitCtrToIndex := make(map[string]int) for idx, ctr := range pod.Spec.Containers { oldCtrToIndex[ctr.Name] = idx } - // TODO: Once we add in-place pod resize support for sidecars, we need to allow - // modifying sidecar resources via resize subresource too. + for idx, ctr := range pod.Spec.InitContainers { + oldInitCtrToIndex[ctr.Name] = idx + } + for _, ctr := range newPod.Spec.Containers { idx, ok := oldCtrToIndex[ctr.Name] if !ok { @@ -309,6 +312,17 @@ func dropNonResizeUpdates(newPod, oldPod *api.Pod) *api.Pod { pod.Spec.Containers[idx].Resources = ctr.Resources pod.Spec.Containers[idx].ResizePolicy = ctr.ResizePolicy } + + if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { + for _, ctr := range newPod.Spec.InitContainers { + idx, ok := oldInitCtrToIndex[ctr.Name] + if !ok { + continue + } + pod.Spec.InitContainers[idx].Resources = ctr.Resources + pod.Spec.InitContainers[idx].ResizePolicy = ctr.ResizePolicy + } + } return pod } diff --git a/pkg/registry/core/pod/strategy_test.go b/pkg/registry/core/pod/strategy_test.go index 1b29c4cdc31..80e0893e251 100644 --- a/pkg/registry/core/pod/strategy_test.go +++ b/pkg/registry/core/pod/strategy_test.go @@ -2451,6 +2451,7 @@ func (w *warningRecorder) AddWarning(_, text string) { } func TestPodResizePrepareForUpdate(t *testing.T) { + containerRestartPolicyAlways := api.ContainerRestartPolicyAlways tests := []struct { name string oldPod *api.Pod @@ -2898,12 +2899,156 @@ func TestPodResizePrepareForUpdate(t *testing.T) { )), ), }, + { + name: "Update resources for sidecar container", + oldPod: podtest.MakePod("test-pod", + podtest.SetResourceVersion("1"), + podtest.SetInitContainers( + podtest.MakeContainer("init-container1", + podtest.SetContainerRestartPolicy(containerRestartPolicyAlways), + podtest.SetContainerResources(api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }, + }), + ), + ), + podtest.SetStatus(podtest.MakePodStatus( + podtest.SetContainerStatuses( + podtest.MakeContainerStatus("init-container1", + api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }), + ), + )), + ), + newPod: podtest.MakePod("test-pod", + podtest.SetResourceVersion("2"), + podtest.SetInitContainers( + podtest.MakeContainer("init-container1", + podtest.SetContainerRestartPolicy(containerRestartPolicyAlways), + podtest.SetContainerResources(api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("200m"), // Updated resource + api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource + }, + }), + ), + ), + podtest.SetStatus(podtest.MakePodStatus( + podtest.SetContainerStatuses( + podtest.MakeContainerStatus("init-container1", + api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }), + ), + )), + ), + expected: podtest.MakePod("test-pod", + podtest.SetResourceVersion("2"), + podtest.SetInitContainers( + podtest.MakeContainer("init-container1", + podtest.SetContainerRestartPolicy(containerRestartPolicyAlways), + podtest.SetContainerResources(api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("200m"), // Updated resource + api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource + }, + }), + ), + ), + podtest.SetStatus(podtest.MakePodStatus( + podtest.SetResizeStatus(api.PodResizeStatusProposed), // Resize status set + podtest.SetContainerStatuses( + podtest.MakeContainerStatus("init-container1", + api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }), + ), + )), + ), + }, + { + name: "Update resources should fail for non-restartable init container", + oldPod: podtest.MakePod("test-pod", + podtest.SetResourceVersion("1"), + podtest.SetInitContainers( + podtest.MakeContainer("init-container1", + podtest.SetContainerResources(api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }, + }), + ), + ), + podtest.SetStatus(podtest.MakePodStatus( + podtest.SetContainerStatuses( + podtest.MakeContainerStatus("init-container1", + api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }), + ), + )), + ), + newPod: podtest.MakePod("test-pod", + podtest.SetResourceVersion("2"), + podtest.SetInitContainers( + podtest.MakeContainer("init-container1", + podtest.SetContainerResources(api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("200m"), // Updated resource + api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource + }, + }), + ), + ), + podtest.SetStatus(podtest.MakePodStatus( + podtest.SetContainerStatuses( + podtest.MakeContainerStatus("init-container1", + api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }), + ), + )), + ), + expected: podtest.MakePod("test-pod", + podtest.SetResourceVersion("2"), + podtest.SetInitContainers( + podtest.MakeContainer("init-container1", + podtest.SetContainerResources(api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("200m"), // Updated resource + api.ResourceMemory: resource.MustParse("4Gi"), // Updated resource + }, + }), + ), + ), + podtest.SetStatus(podtest.MakePodStatus( + podtest.SetResizeStatus(""), // Resize status not set + podtest.SetContainerStatuses( + podtest.MakeContainerStatus("init-container1", + api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("1Gi"), + }), + ), + )), + ), + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true) featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, true) + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true) ctx := context.Background() ResizeStrategy.PrepareForUpdate(ctx, tc.newPod, tc.oldPod) if !cmp.Equal(tc.expected, tc.newPod) { diff --git a/staging/src/k8s.io/component-helpers/resource/helpers.go b/staging/src/k8s.io/component-helpers/resource/helpers.go index 3bdcef6e816..683dc3ad8b8 100644 --- a/staging/src/k8s.io/component-helpers/resource/helpers.go +++ b/staging/src/k8s.io/component-helpers/resource/helpers.go @@ -144,10 +144,13 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour reqs := reuseOrClearResourceList(opts.Reuse) var containerStatuses map[string]*v1.ContainerStatus if opts.UseStatusResources { - containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) + containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses)) for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } + for i := range pod.Status.InitContainerStatuses { + containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i] + } } for _, container := range pod.Spec.Containers { @@ -155,11 +158,7 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour if opts.UseStatusResources { cs, found := containerStatuses[container.Name] if found && cs.Resources != nil { - if pod.Status.Resize == v1.PodResizeStatusInfeasible { - containerReqs = cs.Resources.Requests.DeepCopy() - } else { - containerReqs = max(container.Resources.Requests, cs.Resources.Requests) - } + containerReqs = determineContainerReqs(pod, &container, cs) } } @@ -177,7 +176,6 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour restartableInitContainerReqs := v1.ResourceList{} initContainerReqs := v1.ResourceList{} // init containers define the minimum of any resource - // Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value // // Let's say `InitContainerUse(i)` is the resource requirements when the i-th // init container is initializing, then @@ -186,6 +184,15 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. for _, container := range pod.Spec.InitContainers { containerReqs := container.Resources.Requests + if opts.UseStatusResources { + if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { + cs, found := containerStatuses[container.Name] + if found && cs.Resources != nil { + containerReqs = determineContainerReqs(pod, &container, cs) + } + } + } + if len(opts.NonMissingContainerRequests) > 0 { containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests) } @@ -214,6 +221,22 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour return reqs } +// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not. +func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList { + if pod.Status.Resize == v1.PodResizeStatusInfeasible { + return cs.Resources.Requests.DeepCopy() + } + return max(container.Resources.Requests, cs.Resources.Requests) +} + +// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not. +func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList { + if pod.Status.Resize == v1.PodResizeStatusInfeasible { + return cs.Resources.Limits.DeepCopy() + } + return max(container.Resources.Limits, cs.Resources.Limits) +} + // applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList { cp := v1.ResourceList{} @@ -267,10 +290,13 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource limits := reuseOrClearResourceList(opts.Reuse) var containerStatuses map[string]*v1.ContainerStatus if opts.UseStatusResources { - containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) + containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses)) for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } + for i := range pod.Status.InitContainerStatuses { + containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i] + } } for _, container := range pod.Spec.Containers { @@ -278,11 +304,7 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource if opts.UseStatusResources { cs, found := containerStatuses[container.Name] if found && cs.Resources != nil { - if pod.Status.Resize == v1.PodResizeStatusInfeasible { - containerLimits = cs.Resources.Limits.DeepCopy() - } else { - containerLimits = max(container.Resources.Limits, cs.Resources.Limits) - } + containerLimits = determineContainerLimits(pod, &container, cs) } } @@ -303,6 +325,15 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail. for _, container := range pod.Spec.InitContainers { containerLimits := container.Resources.Limits + if opts.UseStatusResources { + if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { + cs, found := containerStatuses[container.Name] + if found && cs.Resources != nil { + containerLimits = determineContainerLimits(pod, &container, cs) + } + } + } + // Is the init container marked as a restartable init container? if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways { addResourceList(limits, containerLimits) diff --git a/staging/src/k8s.io/component-helpers/resource/helpers_test.go b/staging/src/k8s.io/component-helpers/resource/helpers_test.go index e146a311971..a5d4657d21f 100644 --- a/staging/src/k8s.io/component-helpers/resource/helpers_test.go +++ b/staging/src/k8s.io/component-helpers/resource/helpers_test.go @@ -286,14 +286,15 @@ func TestPodRequestsAndLimitsWithoutOverhead(t *testing.T) { func TestPodResourceRequests(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways testCases := []struct { - description string - options PodResourcesOptions - overhead v1.ResourceList - podResizeStatus v1.PodResizeStatus - initContainers []v1.Container - containers []v1.Container - containerStatus []v1.ContainerStatus - expectedRequests v1.ResourceList + description string + options PodResourcesOptions + overhead v1.ResourceList + podResizeStatus v1.PodResizeStatus + initContainers []v1.Container + initContainerStatuses []v1.ContainerStatus + containers []v1.Container + containerStatus []v1.ContainerStatus + expectedRequests v1.ResourceList }{ { description: "nil options, larger init container", @@ -509,6 +510,92 @@ func TestPodResourceRequests(t *testing.T) { }, }, }, + { + description: "resized, restartable init container, infeasible", + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + podResizeStatus: v1.PodResizeStatusInfeasible, + options: PodResourcesOptions{UseStatusResources: true}, + initContainers: []v1.Container{ + { + Name: "restartable-init-1", + RestartPolicy: &restartAlways, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + }, + }, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "restartable-init-1", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + }, + { + description: "resized, restartable init container, no resize status", + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + }, + options: PodResourcesOptions{UseStatusResources: true}, + initContainers: []v1.Container{ + { + Name: "restartable-init-1", + RestartPolicy: &restartAlways, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + }, + }, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "restartable-init-1", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + }, + { + description: "resized, restartable init container, infeasible, but don't use status", + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + }, + podResizeStatus: v1.PodResizeStatusInfeasible, + options: PodResourcesOptions{UseStatusResources: false}, + initContainers: []v1.Container{ + { + Name: "restartable-init-1", + RestartPolicy: &restartAlways, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + }, + }, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "restartable-init-1", + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + }, { description: "restartable init container", expectedRequests: v1.ResourceList{ @@ -700,8 +787,9 @@ func TestPodResourceRequests(t *testing.T) { Overhead: tc.overhead, }, Status: v1.PodStatus{ - ContainerStatuses: tc.containerStatus, - Resize: tc.podResizeStatus, + ContainerStatuses: tc.containerStatus, + InitContainerStatuses: tc.initContainerStatuses, + Resize: tc.podResizeStatus, }, } request := PodRequests(p, tc.options) @@ -748,13 +836,14 @@ func TestPodResourceRequestsReuse(t *testing.T) { func TestPodResourceLimits(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways testCases := []struct { - description string - options PodResourcesOptions - overhead v1.ResourceList - initContainers []v1.Container - containers []v1.Container - containerStatuses []v1.ContainerStatus - expectedLimits v1.ResourceList + description string + options PodResourcesOptions + overhead v1.ResourceList + initContainers []v1.Container + initContainerStatuses []v1.ContainerStatus + containers []v1.Container + containerStatuses []v1.ContainerStatus + expectedLimits v1.ResourceList }{ { description: "nil options, larger init container", @@ -1207,6 +1296,90 @@ func TestPodResourceLimits(t *testing.T) { }, }, }, + { + description: "pod scaled up with restartable init containers", + expectedLimits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + options: PodResourcesOptions{UseStatusResources: true}, + initContainers: []v1.Container{ + { + Name: "restartable-init-1", + RestartPolicy: &restartAlways, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "restartable-init-1", + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + { + description: "pod scaled down with restartable init containers", + expectedLimits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + options: PodResourcesOptions{UseStatusResources: true}, + initContainers: []v1.Container{ + { + Name: "restartable-init-1", + RestartPolicy: &restartAlways, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "restartable-init-1", + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + { + description: "pod scaled down with restartable init containers, don't use status", + expectedLimits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + options: PodResourcesOptions{UseStatusResources: false}, + initContainers: []v1.Container{ + { + Name: "restartable-init-1", + RestartPolicy: &restartAlways, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + initContainerStatuses: []v1.ContainerStatus{ + { + Name: "restartable-init-1", + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { @@ -1217,7 +1390,8 @@ func TestPodResourceLimits(t *testing.T) { Overhead: tc.overhead, }, Status: v1.PodStatus{ - ContainerStatuses: tc.containerStatuses, + ContainerStatuses: tc.containerStatuses, + InitContainerStatuses: tc.initContainerStatuses, }, } limits := PodLimits(p, tc.options) diff --git a/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go b/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go index 736a2600710..369277ba6dd 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go +++ b/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go @@ -45,16 +45,15 @@ func podRequests(pod *corev1.Pod) corev1.ResourceList { for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } + for i := range pod.Status.InitContainerStatuses { + containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i] + } for _, container := range pod.Spec.Containers { containerReqs := container.Resources.Requests cs, found := containerStatuses[container.Name] if found && cs.Resources != nil { - if pod.Status.Resize == corev1.PodResizeStatusInfeasible { - containerReqs = cs.Resources.Requests.DeepCopy() - } else { - containerReqs = max(container.Resources.Requests, cs.Resources.Requests) - } + containerReqs = determineContainerReqs(pod, &container, cs) } addResourceList(reqs, containerReqs) } @@ -66,6 +65,10 @@ func podRequests(pod *corev1.Pod) corev1.ResourceList { containerReqs := container.Resources.Requests if container.RestartPolicy != nil && *container.RestartPolicy == corev1.ContainerRestartPolicyAlways { + cs, found := containerStatuses[container.Name] + if found && cs.Resources != nil { + containerReqs = determineContainerReqs(pod, &container, cs) + } // and add them to the resulting cumulative container requests addResourceList(reqs, containerReqs) @@ -137,6 +140,14 @@ func podLimits(pod *corev1.Pod) corev1.ResourceList { return limits } +// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not. +func determineContainerReqs(pod *corev1.Pod, container *corev1.Container, cs *corev1.ContainerStatus) corev1.ResourceList { + if pod.Status.Resize == corev1.PodResizeStatusInfeasible { + return cs.Resources.Requests.DeepCopy() + } + return max(container.Resources.Requests, cs.Resources.Requests) +} + // max returns the result of max(a, b) for each named resource and is only used if we can't // accumulate into an existing resource list func max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { diff --git a/test/e2e/common/node/pod_resize.go b/test/e2e/common/node/pod_resize.go index 6a6c8361535..7899399c289 100644 --- a/test/e2e/common/node/pod_resize.go +++ b/test/e2e/common/node/pod_resize.go @@ -22,6 +22,7 @@ import ( "strconv" "time" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -33,7 +34,6 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" ) const ( @@ -968,6 +968,322 @@ func doPodResizeTests() { }, }, }, + { + name: "Guaranteed QoS pod, one restartable init container - increase CPU & memory", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}} + ]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: increasedCPU, CPULim: increasedCPU, MemReq: increasedMem, MemLim: increasedMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Guaranteed QoS pod, one restartable init container - decrease CPU & memory", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}} + ]}}`, reducedCPU, reducedMem, reducedCPU, reducedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: reducedCPU, MemReq: reducedMem, MemLim: reducedMem}, + CPUPolicy: &noRestart, + MemPolicy: &noRestart, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Guaranteed QoS pod, one restartable init container - increase CPU & decrease memory", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}} + ]}}`, increasedCPU, reducedMem, increasedCPU, reducedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: increasedCPU, CPULim: increasedCPU, MemReq: reducedMem, MemLim: reducedMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Guaranteed QoS pod, one restartable init container - decrease CPU & increase memory", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}} + ]}}`, reducedCPU, increasedMem, reducedCPU, increasedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: reducedCPU, MemReq: increasedMem, MemLim: increasedMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Burstable QoS pod, one container, one restartable init container - decrease init container CPU & memory", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}} + ]}}`, reducedCPU, reducedMem, reducedCPU, reducedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: reducedCPU, MemReq: reducedMem, MemLim: reducedMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Burstable QoS pod, one container, one restartable init container - increase init container CPU & memory", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}} + ]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: increasedCPU, CPULim: increasedCPU, MemReq: increasedMem, MemLim: increasedMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Burstable QoS pod, one container, one restartable init container - decrease init container CPU only", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}} + ]}}`, reducedCPU, reducedCPU), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: reducedCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Burstable QoS pod, one container, one restartable init container - increase init container CPU only", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}} + ]}}`, increasedCPU, increasedCPU), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: increasedCPU, CPULim: increasedCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Burstable QoS pod, one container, one restartable init container - decrease init container memory only", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}} + ]}}`, reducedMem, reducedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: reducedMem, MemLim: reducedMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, + { + name: "Burstable QoS pod, one container, one restartable init container - increase init container memory only", + containers: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + patchString: fmt.Sprintf(`{"spec":{"initContainers":[ + {"name":"c1-init", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}} + ]}}`, increasedMem, increasedMem), + expected: []e2epod.ResizableContainerInfo{ + { + Name: "c1", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: originalMem, MemLim: originalMem}, + }, + { + Name: "c1-init", + Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPU, MemReq: increasedMem, MemLim: increasedMem}, + InitCtr: true, + RestartPolicy: v1.ContainerRestartPolicyAlways, + }, + }, + }, } for idx := range tests { diff --git a/test/e2e/framework/pod/resize.go b/test/e2e/framework/pod/resize.go index 415259ad8c5..6e3b6a6f5d8 100644 --- a/test/e2e/framework/pod/resize.go +++ b/test/e2e/framework/pod/resize.go @@ -98,11 +98,13 @@ func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements { } type ResizableContainerInfo struct { - Name string - Resources *ContainerResources - CPUPolicy *v1.ResourceResizeRestartPolicy - MemPolicy *v1.ResourceResizeRestartPolicy - RestartCount int32 + Name string + Resources *ContainerResources + CPUPolicy *v1.ResourceResizeRestartPolicy + MemPolicy *v1.ResourceResizeRestartPolicy + RestartCount int32 + RestartPolicy v1.ContainerRestartPolicy + InitCtr bool } type containerPatch struct { @@ -154,17 +156,16 @@ func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container { Resources: res, ResizePolicy: resizePol, } + if tcInfo.RestartPolicy != "" { + tc.RestartPolicy = &tcInfo.RestartPolicy + } return tc } func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod { - var testContainers []v1.Container + testInitContainers, testContainers := separateContainers(tcInfo) - for _, ci := range tcInfo { - tc := makeResizableContainer(ci) - testContainers = append(testContainers, tc) - } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -174,54 +175,119 @@ func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []Resizab }, }, Spec: v1.PodSpec{ - OS: &v1.PodOS{Name: v1.Linux}, - Containers: testContainers, - RestartPolicy: v1.RestartPolicyOnFailure, + OS: &v1.PodOS{Name: v1.Linux}, + InitContainers: testInitContainers, + Containers: testContainers, + RestartPolicy: v1.RestartPolicyOnFailure, }, } return pod } -func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) { +// separateContainers splits the input into initContainers and normal containers. +func separateContainers(tcInfo []ResizableContainerInfo) ([]v1.Container, []v1.Container) { + var initContainers, containers []v1.Container + + for _, ci := range tcInfo { + tc := makeResizableContainer(ci) + if ci.InitCtr { + initContainers = append(initContainers, tc) + } else { + containers = append(containers, tc) + } + } + + return initContainers, containers +} + +// separateContainerStatuses splits the input into initContainerStatuses and containerStatuses. +func separateContainerStatuses(tcInfo []ResizableContainerInfo) ([]v1.ContainerStatus, []v1.ContainerStatus) { + var containerStatuses, initContainerStatuses []v1.ContainerStatus + + for _, ci := range tcInfo { + ctrStatus := v1.ContainerStatus{ + Name: ci.Name, + RestartCount: ci.RestartCount, + } + if ci.InitCtr { + initContainerStatuses = append(initContainerStatuses, ctrStatus) + } else { + containerStatuses = append(containerStatuses, ctrStatus) + } + } + + return initContainerStatuses, containerStatuses +} + +func VerifyPodResizePolicy(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) { ginkgo.GinkgoHelper() - gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") - for i, wantCtr := range wantCtrs { - gotCtr := &gotPod.Spec.Containers[i] - ctr := makeResizableContainer(wantCtr) - gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name)) - gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy)) + + gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...) + var wantCtrs []v1.Container + for _, ci := range wantInfo { + wantCtrs = append(wantCtrs, makeResizableContainer(ci)) + } + gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") + for _, wantCtr := range wantCtrs { + for _, gotCtr := range gotCtrs { + if wantCtr.Name != gotCtr.Name { + continue + } + gomega.Expect(v1.Container{Name: gotCtr.Name, ResizePolicy: gotCtr.ResizePolicy}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, ResizePolicy: wantCtr.ResizePolicy})) + } } } -func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) { +func VerifyPodResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) { ginkgo.GinkgoHelper() - gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") - for i, wantCtr := range wantCtrs { - gotCtr := &gotPod.Spec.Containers[i] - ctr := makeResizableContainer(wantCtr) - gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name)) - gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources)) + + gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...) + var wantCtrs []v1.Container + for _, ci := range wantInfo { + wantCtrs = append(wantCtrs, makeResizableContainer(ci)) + } + gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match") + for _, wantCtr := range wantCtrs { + for _, gotCtr := range gotCtrs { + if wantCtr.Name != gotCtr.Name { + continue + } + gomega.Expect(v1.Container{Name: gotCtr.Name, Resources: gotCtr.Resources}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, Resources: wantCtr.Resources})) + } } } -func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error { +func VerifyPodStatusResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) error { + ginkgo.GinkgoHelper() + + wantInitCtrs, wantCtrs := separateContainers(wantInfo) + var errs []error + if err := verifyPodContainersStatusResources(gotPod.Status.InitContainerStatuses, wantInitCtrs); err != nil { + errs = append(errs, err) + } + if err := verifyPodContainersStatusResources(gotPod.Status.ContainerStatuses, wantCtrs); err != nil { + errs = append(errs, err) + } + + return utilerrors.NewAggregate(errs) +} + +func verifyPodContainersStatusResources(gotCtrStatuses []v1.ContainerStatus, wantCtrs []v1.Container) error { ginkgo.GinkgoHelper() var errs []error - - if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) { + if len(gotCtrStatuses) != len(wantCtrs) { return fmt.Errorf("expectation length mismatch: got %d statuses, want %d", - len(gotPod.Status.ContainerStatuses), len(wantCtrs)) + len(gotCtrStatuses), len(wantCtrs)) } for i, wantCtr := range wantCtrs { - gotCtrStatus := &gotPod.Status.ContainerStatuses[i] - ctr := makeResizableContainer(wantCtr) - if gotCtrStatus.Name != ctr.Name { - errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name)) + gotCtrStatus := gotCtrStatuses[i] + if gotCtrStatus.Name != wantCtr.Name { + errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, wantCtr.Name)) continue } - if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(ctr.Resources)); err != nil { - errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", ctr.Name, err)) + if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(wantCtr.Resources)); err != nil { + errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", wantCtr.Name, err)) } } @@ -288,19 +354,33 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework return utilerrors.NewAggregate(errs) } -func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContainerInfo) error { +func verifyPodRestarts(pod *v1.Pod, wantInfo []ResizableContainerInfo) error { ginkgo.GinkgoHelper() - expectContainerRestarts := map[string]int32{} - for _, ci := range expectedContainers { - expectContainerRestarts[ci.Name] = ci.RestartCount + initCtrStatuses, ctrStatuses := separateContainerStatuses(wantInfo) + errs := []error{} + if err := verifyContainerRestarts(pod.Status.InitContainerStatuses, initCtrStatuses); err != nil { + errs = append(errs, err) + } + if err := verifyContainerRestarts(pod.Status.ContainerStatuses, ctrStatuses); err != nil { + errs = append(errs, err) + } + + return utilerrors.NewAggregate(errs) +} + +func verifyContainerRestarts(gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error { + ginkgo.GinkgoHelper() + + if len(gotStatuses) != len(wantStatuses) { + return fmt.Errorf("expectation length mismatch: got %d statuses, want %d", + len(gotStatuses), len(wantStatuses)) } errs := []error{} - for _, cs := range pod.Status.ContainerStatuses { - expectedRestarts := expectContainerRestarts[cs.Name] - if cs.RestartCount != expectedRestarts { - errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts)) + for i, gotStatus := range gotStatuses { + if gotStatus.RestartCount != wantStatuses[i].RestartCount { + errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", gotStatus.Name, gotStatus.RestartCount, wantStatuses[i].RestartCount)) } } return utilerrors.NewAggregate(errs) @@ -347,7 +427,7 @@ func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil { errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs))) } - if restartErrs := verifyContainerRestarts(resizedPod, expectedContainers); restartErrs != nil { + if restartErrs := verifyPodRestarts(resizedPod, expectedContainers); restartErrs != nil { errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs))) }