Files
kubernetes/test/e2e/common/node/pod_resize.go
Anish Shah b8897e688d test: refactor duplicate IPPR e22 tests.
This change refactors duplicate IPPR cluster and node e2e tests under
test/e2e/common directory
2024-10-17 05:27:27 -07:00

1483 lines
51 KiB
Go

/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"time"
semver "github.com/blang/semver/v4"
"github.com/google/go-cmp/cmp"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
CgroupCPUPeriod string = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
CgroupCPUShares string = "/sys/fs/cgroup/cpu/cpu.shares"
CgroupCPUQuota string = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
CgroupMemLimit string = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
Cgroupv2MemLimit string = "/sys/fs/cgroup/memory.max"
Cgroupv2MemRequest string = "/sys/fs/cgroup/memory.min"
Cgroupv2CPULimit string = "/sys/fs/cgroup/cpu.max"
Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight"
CPUPeriod string = "100000"
MinContainerRuntimeVersion string = "1.6.9"
fakeExtendedResource = "dummy.com/dummy"
)
var (
podOnCgroupv2Node *bool
)
type ContainerResources struct {
CPUReq string
CPULim string
MemReq string
MemLim string
EphStorReq string
EphStorLim string
ExtendedResourceReq string
ExtendedResourceLim string
}
type ContainerAllocations struct {
CPUAlloc string
MemAlloc string
ephStorAlloc string
ExtendedResourceAlloc string
}
type TestContainerInfo struct {
Name string
Resources *ContainerResources
Allocations *ContainerAllocations
CPUPolicy *v1.ResourceResizeRestartPolicy
MemPolicy *v1.ResourceResizeRestartPolicy
RestartCount int32
}
type containerPatch struct {
Name string `json:"name"`
Resources struct {
Requests struct {
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
EphStor string `json:"ephemeral-storage,omitempty"`
} `json:"requests"`
Limits struct {
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
EphStor string `json:"ephemeral-storage,omitempty"`
} `json:"limits"`
} `json:"resources"`
}
type patchSpec struct {
Spec struct {
Containers []containerPatch `json:"containers"`
} `json:"spec"`
}
func isInPlacePodVerticalScalingSupportedByRuntime(ctx context.Context, c clientset.Interface) bool {
node, err := e2enode.GetRandomReadySchedulableNode(ctx, c)
framework.ExpectNoError(err)
re := regexp.MustCompile("containerd://(.*)")
match := re.FindStringSubmatch(node.Status.NodeInfo.ContainerRuntimeVersion)
if len(match) != 2 {
return false
}
if ver, verr := semver.ParseTolerant(match[1]); verr == nil {
if ver.Compare(semver.MustParse(MinContainerRuntimeVersion)) < 0 {
return false
}
return true
}
return false
}
func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1.ResourceList, []v1.ContainerResizePolicy) {
var res v1.ResourceRequirements
var alloc v1.ResourceList
var resizePol []v1.ContainerResizePolicy
if tcInfo.Resources != nil {
var lim, req v1.ResourceList
if tcInfo.Resources.CPULim != "" || tcInfo.Resources.MemLim != "" || tcInfo.Resources.EphStorLim != "" {
lim = make(v1.ResourceList)
}
if tcInfo.Resources.CPUReq != "" || tcInfo.Resources.MemReq != "" || tcInfo.Resources.EphStorReq != "" {
req = make(v1.ResourceList)
}
if tcInfo.Resources.CPULim != "" {
lim[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPULim)
}
if tcInfo.Resources.MemLim != "" {
lim[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemLim)
}
if tcInfo.Resources.EphStorLim != "" {
lim[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorLim)
}
if tcInfo.Resources.CPUReq != "" {
req[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPUReq)
}
if tcInfo.Resources.MemReq != "" {
req[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemReq)
}
if tcInfo.Resources.EphStorReq != "" {
req[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorReq)
}
res = v1.ResourceRequirements{Limits: lim, Requests: req}
}
if tcInfo.Allocations != nil {
alloc = make(v1.ResourceList)
if tcInfo.Allocations.CPUAlloc != "" {
alloc[v1.ResourceCPU] = resource.MustParse(tcInfo.Allocations.CPUAlloc)
}
if tcInfo.Allocations.MemAlloc != "" {
alloc[v1.ResourceMemory] = resource.MustParse(tcInfo.Allocations.MemAlloc)
}
if tcInfo.Allocations.ephStorAlloc != "" {
alloc[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Allocations.ephStorAlloc)
}
}
if tcInfo.CPUPolicy != nil {
cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
resizePol = append(resizePol, cpuPol)
}
if tcInfo.MemPolicy != nil {
memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
resizePol = append(resizePol, memPol)
}
return res, alloc, resizePol
}
func initDefaultResizePolicy(containers []TestContainerInfo) {
noRestart := v1.NotRequired
setDefaultPolicy := func(ci *TestContainerInfo) {
if ci.CPUPolicy == nil {
ci.CPUPolicy = &noRestart
}
if ci.MemPolicy == nil {
ci.MemPolicy = &noRestart
}
}
for i := range containers {
setDefaultPolicy(&containers[i])
}
}
func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStatus) {
cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
res, alloc, resizePol := getTestResourceInfo(tcInfo)
tc := v1.Container{
Name: tcInfo.Name,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", cmd},
Resources: res,
ResizePolicy: resizePol,
}
tcStatus := v1.ContainerStatus{
Name: tcInfo.Name,
AllocatedResources: alloc,
}
return tc, tcStatus
}
func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod {
var testContainers []v1.Container
for _, ci := range tcInfo {
tc, _ := makeTestContainer(ci)
testContainers = append(testContainers, tc)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
Labels: map[string]string{
"time": timeStamp,
},
},
Spec: v1.PodSpec{
OS: &v1.PodOS{Name: v1.Linux},
Containers: testContainers,
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
return pod
}
func verifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []TestContainerInfo) {
ginkgo.GinkgoHelper()
for i, wantCtr := range wantCtrs {
gotCtr := &gotPod.Spec.Containers[i]
ctr, _ := makeTestContainer(wantCtr)
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
}
}
func verifyPodResources(gotPod *v1.Pod, wantCtrs []TestContainerInfo) {
ginkgo.GinkgoHelper()
for i, wantCtr := range wantCtrs {
gotCtr := &gotPod.Spec.Containers[i]
ctr, _ := makeTestContainer(wantCtr)
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
}
}
func verifyPodAllocations(gotPod *v1.Pod, wantCtrs []TestContainerInfo) error {
ginkgo.GinkgoHelper()
for i, wantCtr := range wantCtrs {
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
if wantCtr.Allocations == nil {
if wantCtr.Resources != nil {
alloc := &ContainerAllocations{CPUAlloc: wantCtr.Resources.CPUReq, MemAlloc: wantCtr.Resources.MemReq}
wantCtr.Allocations = alloc
defer func() {
wantCtr.Allocations = nil
}()
}
}
_, ctrStatus := makeTestContainer(wantCtr)
gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctrStatus.Name))
if !cmp.Equal(gotCtrStatus.AllocatedResources, ctrStatus.AllocatedResources) {
return fmt.Errorf("failed to verify Pod allocations, allocated resources not equal to expected")
}
}
return nil
}
func verifyPodStatusResources(gotPod *v1.Pod, wantCtrs []TestContainerInfo) {
ginkgo.GinkgoHelper()
for i, wantCtr := range wantCtrs {
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
ctr, _ := makeTestContainer(wantCtr)
gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctr.Name))
gomega.Expect(ctr.Resources).To(gomega.Equal(*gotCtrStatus.Resources))
}
}
func isPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
// Determine if pod is running on cgroupv2 or cgroupv1 node
//TODO(vinaykul,InPlacePodVerticalScaling): Is there a better way to determine this?
cmd := "mount -t cgroup2"
out, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd)
if err != nil {
return false
}
return len(out) != 0
}
func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []TestContainerInfo) error {
ginkgo.GinkgoHelper()
if podOnCgroupv2Node == nil {
value := isPodOnCgroupv2Node(f, pod)
podOnCgroupv2Node = &value
}
cgroupMemLimit := Cgroupv2MemLimit
cgroupCPULimit := Cgroupv2CPULimit
cgroupCPURequest := Cgroupv2CPURequest
if !*podOnCgroupv2Node {
cgroupMemLimit = CgroupMemLimit
cgroupCPULimit = CgroupCPUQuota
cgroupCPURequest = CgroupCPUShares
}
verifyCgroupValue := func(cName, cgPath, expectedCgValue string) error {
cmd := fmt.Sprintf("head -n 1 %s", cgPath)
framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
cgValue, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
if err != nil {
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
}
cgValue = strings.Trim(cgValue, "\n")
if cgValue != expectedCgValue {
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
}
return nil
}
for _, ci := range tcInfo {
if ci.Resources == nil {
continue
}
tc, _ := makeTestContainer(ci)
if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
var expectedCPUShares int64
var expectedCPULimitString, expectedMemLimitString string
expectedMemLimitInBytes := tc.Resources.Limits.Memory().Value()
cpuRequest := tc.Resources.Requests.Cpu()
cpuLimit := tc.Resources.Limits.Cpu()
if cpuRequest.IsZero() && !cpuLimit.IsZero() {
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuLimit.MilliValue()))
} else {
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue()))
}
cpuQuota := kubecm.MilliCPUToQuota(cpuLimit.MilliValue(), kubecm.QuotaPeriod)
if cpuLimit.IsZero() {
cpuQuota = -1
}
expectedCPULimitString = strconv.FormatInt(cpuQuota, 10)
expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10)
if *podOnCgroupv2Node {
if expectedCPULimitString == "-1" {
expectedCPULimitString = "max"
}
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
if expectedMemLimitString == "0" {
expectedMemLimitString = "max"
}
// convert cgroup v1 cpu.shares value to cgroup v2 cpu.weight value
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
}
if expectedMemLimitString != "0" {
err := verifyCgroupValue(ci.Name, cgroupMemLimit, expectedMemLimitString)
if err != nil {
return err
}
}
err := verifyCgroupValue(ci.Name, cgroupCPULimit, expectedCPULimitString)
if err != nil {
return err
}
err = verifyCgroupValue(ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10))
if err != nil {
return err
}
}
}
return nil
}
func waitForContainerRestart(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) error {
ginkgo.GinkgoHelper()
var restartContainersExpected []string
restartContainers := expectedContainers
// if we're rolling back, extract restart counts from test case "expected" containers
if isRollback {
restartContainers = initialContainers
}
for _, ci := range restartContainers {
if ci.RestartCount > 0 {
restartContainersExpected = append(restartContainersExpected, ci.Name)
}
}
if len(restartContainersExpected) == 0 {
return nil
}
pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return err
}
restartedContainersCount := 0
for _, cName := range restartContainersExpected {
cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
if cs.RestartCount < 1 {
break
}
restartedContainersCount++
}
if restartedContainersCount == len(restartContainersExpected) {
return nil
}
if restartedContainersCount > len(restartContainersExpected) {
return fmt.Errorf("more container restarts than expected")
} else {
return fmt.Errorf("less container restarts than expected")
}
}
func waitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) *v1.Pod {
ginkgo.GinkgoHelper()
var resizedPod *v1.Pod
var pErr error
timeouts := framework.NewTimeoutContext()
// Wait for container restart
gomega.Eventually(ctx, waitForContainerRestart, timeouts.PodStartShort, timeouts.Poll).
WithArguments(podClient, pod, expectedContainers, initialContainers, isRollback).
ShouldNot(gomega.HaveOccurred(), "failed waiting for expected container restart")
// Verify Pod Containers Cgroup Values
gomega.Eventually(ctx, verifyPodContainersCgroupValues, timeouts.PodStartShort, timeouts.Poll).
WithArguments(f, patchedPod, expectedContainers).
ShouldNot(gomega.HaveOccurred(), "failed to verify container cgroup values to match expected")
// Wait for pod resource allocations to equal expected values after resize
gomega.Eventually(ctx, func() error {
resizedPod, pErr = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
if pErr != nil {
return pErr
}
return verifyPodAllocations(resizedPod, expectedContainers)
}, timeouts.PodStartShort, timeouts.Poll).
ShouldNot(gomega.HaveOccurred(), "timed out waiting for pod resource allocation values to match expected")
return resizedPod
}
func genPatchString(containers []TestContainerInfo) (string, error) {
var patch patchSpec
for _, container := range containers {
var cPatch containerPatch
cPatch.Name = container.Name
cPatch.Resources.Requests.CPU = container.Resources.CPUReq
cPatch.Resources.Requests.Memory = container.Resources.MemReq
cPatch.Resources.Limits.CPU = container.Resources.CPULim
cPatch.Resources.Limits.Memory = container.Resources.MemLim
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
}
patchBytes, err := json.Marshal(patch)
if err != nil {
return "", err
}
return string(patchBytes), nil
}
func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, new *v1.Node) error {
oldData, err := json.Marshal(old)
if err != nil {
return err
}
newData, err := json.Marshal(new)
if err != nil {
return err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
if err != nil {
return fmt.Errorf("failed to create merge patch for node %q: %w", old.Name, err)
}
_, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
return err
}
func addExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string, extendedResourceQuantity resource.Quantity) {
extendedResource := v1.ResourceName(extendedResourceName)
ginkgo.By("Adding a custom resource")
OriginalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
node := OriginalNode.DeepCopy()
node.Status.Capacity[extendedResource] = extendedResourceQuantity
node.Status.Allocatable[extendedResource] = extendedResourceQuantity
err = patchNode(context.Background(), clientSet, OriginalNode.DeepCopy(), node)
framework.ExpectNoError(err)
gomega.Eventually(func() error {
node, err = clientSet.CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
fakeResourceCapacity, exists := node.Status.Capacity[extendedResource]
if !exists {
return fmt.Errorf("node %s has no %s resource capacity", node.Name, extendedResourceName)
}
if expectedResource := resource.MustParse("123"); fakeResourceCapacity.Cmp(expectedResource) != 0 {
return fmt.Errorf("node %s has resource capacity %s, expected: %s", node.Name, fakeResourceCapacity.String(), expectedResource.String())
}
return nil
}).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred())
}
func removeExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string) {
extendedResource := v1.ResourceName(extendedResourceName)
ginkgo.By("Removing a custom resource")
originalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
node := originalNode.DeepCopy()
delete(node.Status.Capacity, extendedResource)
delete(node.Status.Allocatable, extendedResource)
err = patchNode(context.Background(), clientSet, originalNode.DeepCopy(), node)
framework.ExpectNoError(err)
gomega.Eventually(func() error {
node, err = clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
if _, exists := node.Status.Capacity[extendedResource]; exists {
return fmt.Errorf("node %s has resource capacity %s which is expected to be removed", node.Name, extendedResourceName)
}
return nil
}).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred())
}
func doPodResizeTests() {
f := framework.NewDefaultFramework("pod-resize-test")
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2epod.NewPodClient(f)
})
type testCase struct {
name string
containers []TestContainerInfo
patchString string
expected []TestContainerInfo
addExtendedResource bool
}
noRestart := v1.NotRequired
doRestart := v1.RestartContainer
tests := []testCase{
{
name: "Guaranteed QoS pod, one container - increase CPU & memory",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
},
{
name: "Guaranteed QoS pod, one container - decrease CPU & memory",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"100m","memory":"250Mi"},"limits":{"cpu":"100m","memory":"250Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "250Mi", MemLim: "250Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
},
{
name: "Guaranteed QoS pod, one container - increase CPU & decrease memory",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"100Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "100Mi", MemLim: "100Mi"},
},
},
},
{
name: "Guaranteed QoS pod, one container - decrease CPU & increase memory",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"300Mi"},"limits":{"cpu":"50m","memory":"300Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "50m", CPULim: "50m", MemReq: "300Mi", MemLim: "300Mi"},
},
},
},
{
name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "200Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "300Mi", MemLim: "300Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"140m","memory":"50Mi"},"limits":{"cpu":"140m","memory":"50Mi"}}},
{"name":"c2", "resources":{"requests":{"cpu":"150m","memory":"240Mi"},"limits":{"cpu":"150m","memory":"240Mi"}}},
{"name":"c3", "resources":{"requests":{"cpu":"340m","memory":"250Mi"},"limits":{"cpu":"340m","memory":"250Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "140m", CPULim: "140m", MemReq: "50Mi", MemLim: "50Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "150m", CPULim: "150m", MemReq: "240Mi", MemLim: "240Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "340m", CPULim: "340m", MemReq: "250Mi", MemLim: "250Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"200Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"limits":{"memory":"400Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "400Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"300Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "300Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory limits only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"limits":{"memory":"600Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "600Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"100m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"limits":{"cpu":"300m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"150m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "150m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU limits only",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"limits":{"cpu":"500m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "500m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"cpu":"200m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"cpu":"400m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase CPU limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"cpu":"500m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "500m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease CPU limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"cpu":"300m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "250Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"memory":"300Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "300Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"memory":"500Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "300Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase memory limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"memory":"500Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease memory limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"memory":"300Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "300Mi", MemLim: "300Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase memory limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"memory":"500Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease memory limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"memory":"400Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase CPU limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"cpu":"300m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "300m", MemReq: "100Mi", MemLim: "400Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease CPU limits",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"cpu":"300m"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "300Mi", MemLim: "400Mi"},
},
},
},
{
name: "Burstable QoS pod, one container with cpu & memory requests - decrease memory request",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", MemReq: "500Mi"},
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"400Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", MemReq: "400Mi"},
},
},
},
{
name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
RestartCount: 1,
},
},
},
{
name: "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
CPUPolicy: &doRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"100Mi"},"limits":{"cpu":"100m","memory":"200Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "50m", CPULim: "100m", MemReq: "100Mi", MemLim: "200Mi"},
CPUPolicy: &doRestart,
MemPolicy: &noRestart,
RestartCount: 1,
},
},
},
{
name: "Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod)",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"150m","memory":"150Mi"},"limits":{"cpu":"250m","memory":"250Mi"}}},
{"name":"c3", "resources":{"requests":{"cpu":"250m","memory":"250Mi"},"limits":{"cpu":"350m","memory":"350Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "150m", CPULim: "250m", MemReq: "150Mi", MemLim: "250Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "250m", CPULim: "350m", MemReq: "250Mi", MemLim: "350Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
},
{
name: "Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod)",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"50Mi"},"limits":{"cpu":"150m","memory":"150Mi"}}},
{"name":"c2", "resources":{"requests":{"cpu":"350m","memory":"350Mi"},"limits":{"cpu":"450m","memory":"450Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "50m", CPULim: "150m", MemReq: "50Mi", MemLim: "150Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "350m", CPULim: "450m", MemReq: "350Mi", MemLim: "450Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
RestartCount: 1,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
},
{
name: "Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod)",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
CPUPolicy: &doRestart,
MemPolicy: &doRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
CPUPolicy: &doRestart,
MemPolicy: &noRestart,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
CPUPolicy: &noRestart,
MemPolicy: &doRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c2", "resources":{"requests":{"cpu":"250m","memory":"250Mi"},"limits":{"cpu":"350m","memory":"350Mi"}}},
{"name":"c3", "resources":{"requests":{"cpu":"100m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"200Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
CPUPolicy: &doRestart,
MemPolicy: &doRestart,
},
{
Name: "c2",
Resources: &ContainerResources{CPUReq: "250m", CPULim: "350m", MemReq: "250Mi", MemLim: "350Mi"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
RestartCount: 1,
},
{
Name: "c3",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
CPUPolicy: &doRestart,
MemPolicy: &doRestart,
RestartCount: 1,
},
},
},
{
name: "Guaranteed QoS pod, one container - increase CPU & memory with an extended resource",
containers: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi",
ExtendedResourceReq: "1", ExtendedResourceLim: "1"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
]}}`,
expected: []TestContainerInfo{
{
Name: "c1",
Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi",
ExtendedResourceReq: "1", ExtendedResourceLim: "1"},
CPUPolicy: &noRestart,
MemPolicy: &noRestart,
},
},
addExtendedResource: true,
},
}
timeouts := framework.NewTimeoutContext()
for idx := range tests {
tc := tests[idx]
ginkgo.It(tc.name, func(ctx context.Context) {
ginkgo.By("check if in place pod vertical scaling is supported", func() {
if !isInPlacePodVerticalScalingSupportedByRuntime(ctx, f.ClientSet) || framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
}
})
var testPod, patchedPod *v1.Pod
var pErr error
tStamp := strconv.Itoa(time.Now().Nanosecond())
initDefaultResizePolicy(tc.containers)
initDefaultResizePolicy(tc.expected)
testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers)
testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
if tc.addExtendedResource {
nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet)
framework.ExpectNoError(err)
for _, node := range nodes.Items {
addExtendedResource(f.ClientSet, node.Name, fakeExtendedResource, resource.MustParse("123"))
}
defer func() {
for _, node := range nodes.Items {
removeExtendedResource(f.ClientSet, node.Name, fakeExtendedResource)
}
}()
}
ginkgo.By("creating pod")
newPod := podClient.CreateSync(ctx, testPod)
ginkgo.By("verifying initial pod resources, allocations are as expected")
verifyPodResources(newPod, tc.containers)
ginkgo.By("verifying initial pod resize policy is as expected")
verifyPodResizePolicy(newPod, tc.containers)
ginkgo.By("verifying initial pod status resources are as expected")
verifyPodStatusResources(newPod, tc.containers)
ginkgo.By("verifying initial cgroup config are as expected")
framework.ExpectNoError(verifyPodContainersCgroupValues(ctx, f, newPod, tc.containers))
patchAndVerify := func(patchString string, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, opStr string, isRollback bool) {
ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
framework.ExpectNoError(pErr, fmt.Sprintf("failed to patch pod for %s", opStr))
ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
verifyPodResources(patchedPod, expectedContainers)
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
WithArguments(patchedPod, initialContainers).
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
resizedPod := waitForPodResizeActuation(ctx, f, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback)
// Check cgroup values only for containerd versions before 1.6.9
ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr))
framework.ExpectNoError(verifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers))
ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr))
verifyPodResources(resizedPod, expectedContainers)
ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr))
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
WithArguments(resizedPod, expectedContainers).
Should(gomega.BeNil(), "failed to verify Pod allocations for resizedPod")
}
patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false)
rbPatchStr, err := genPatchString(tc.containers)
framework.ExpectNoError(err)
// Resize has been actuated, test rollback
patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true)
ginkgo.By("deleting pod")
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
})
}
}
func doPodResizeErrorTests() {
f := framework.NewDefaultFramework("pod-resize-errors")
var podClient *e2epod.PodClient
ginkgo.BeforeEach(func() {
podClient = e2epod.NewPodClient(f)
})
type testCase struct {
name string
containers []TestContainerInfo
patchString string
patchError string
expected []TestContainerInfo
}
tests := []testCase{
{
name: "BestEffort pod - try requesting memory, expect error",
containers: []TestContainerInfo{
{
Name: "c1",
},
},
patchString: `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"memory":"400Mi"}}}
]}}`,
patchError: "Pod QoS is immutable",
expected: []TestContainerInfo{
{
Name: "c1",
},
},
},
}
timeouts := framework.NewTimeoutContext()
for idx := range tests {
tc := tests[idx]
ginkgo.It(tc.name, func(ctx context.Context) {
ginkgo.By("check if in place pod vertical scaling is supported", func() {
if !isInPlacePodVerticalScalingSupportedByRuntime(ctx, f.ClientSet) || framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
}
})
var testPod, patchedPod *v1.Pod
var pErr error
tStamp := strconv.Itoa(time.Now().Nanosecond())
initDefaultResizePolicy(tc.containers)
initDefaultResizePolicy(tc.expected)
testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers)
testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
ginkgo.By("creating pod")
newPod := podClient.CreateSync(ctx, testPod)
ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
verifyPodResources(newPod, tc.containers)
verifyPodResizePolicy(newPod, tc.containers)
ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
verifyPodStatusResources(newPod, tc.containers)
ginkgo.By("patching pod for resize")
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(ctx, newPod.Name,
types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{})
if tc.patchError == "" {
framework.ExpectNoError(pErr, "failed to patch pod for resize")
} else {
gomega.Expect(pErr).To(gomega.HaveOccurred(), tc.patchError)
patchedPod = newPod
}
ginkgo.By("verifying pod resources after patch")
verifyPodResources(patchedPod, tc.expected)
ginkgo.By("verifying pod allocations after patch")
gomega.Eventually(ctx, verifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
WithArguments(patchedPod, tc.expected).
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
ginkgo.By("deleting pod")
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
})
}
}
// NOTE: Pod resize scheduler resource quota tests are out of scope in e2e_node tests,
// because in e2e_node tests
// a) scheduler and controller manager is not running by the Node e2e
// b) api-server in services doesn't start with --enable-admission-plugins=ResourceQuota
// and is not possible to start it from TEST_ARGS
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
// in test/e2e/node/pod_resize.go
var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, func() {
doPodResizeTests()
doPodResizeErrorTests()
})