Merge pull request #132498 from ffromani/e2e-serial-node-cpumanager-fix-ordered

e2e: serial: node cpumanager parity with the old suite
This commit is contained in:
Kubernetes Prow Robot
2025-07-01 07:15:31 -07:00
committed by GitHub

View File

@@ -117,7 +117,7 @@ var (
* A better approach would be check what we do have in the node. This is deferred to a later stage alongside
* other improvements.
*/
var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, framework.WithSerial(), feature.CPUManager, func() {
var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, ginkgo.ContinueOnFailure, framework.WithSerial(), feature.CPUManager, func() {
f := framework.NewDefaultFramework("cpumanager-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@@ -1016,6 +1016,30 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, framework.WithSerial(), featu
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "max"))
})
ginkgo.It("should disable for guaranteed pod with exclusive CPUs assigned", func(ctx context.Context) {
cpuCount := 4
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-disabled"
pod := makeCPUManagerPod("gu-pod-cfsquota-off", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "3",
cpuLimit: "3",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("max"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "max"))
gomega.Expect(pod).To(HaveContainerCPUsCount(ctnName, 3))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf(ctnName, onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith(ctnName, reservedCPUs))
})
ginkgo.It("should enforce for guaranteed pod", func(ctx context.Context) {
cpuCount := 1 // overshoot, minimum request is 1
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
@@ -1106,6 +1130,216 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, framework.WithSerial(), featu
})
})
ginkgo.When("checking the CFS quota management can be disabled", ginkgo.Label("cfs-quota"), func() {
// NOTE: these tests check only cases on which the quota is set to "max", so we intentionally
// don't duplicate the all the tests
ginkgo.BeforeEach(func(ctx context.Context) {
// WARNING: this assumes 2-way SMT systems - we don't know how to access other SMT levels.
// this means on more-than-2-way SMT systems this test will prove nothing
reservedCPUs = cpuset.New(0)
updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: reservedCPUs,
disableCPUQuotaWithExclusiveCPUs: false,
}))
})
ginkgo.It("should not disable for guaranteed pod with exclusive CPUs assigned", func(ctx context.Context) {
cpuCount := 1
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-disabled"
pod := makeCPUManagerPod("gu-pod-cfsquota-off", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "1",
cpuLimit: "1",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("max"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "max"))
})
ginkgo.It("should not disable for guaranteed pod with exclusive CPUs assigned", func(ctx context.Context) {
cpuCount := 4
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-disabled"
pod := makeCPUManagerPod("gu-pod-cfsquota-off", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "3",
cpuLimit: "3",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("max"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "max"))
gomega.Expect(pod).To(HaveContainerCPUsCount(ctnName, 3))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf(ctnName, onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith(ctnName, reservedCPUs))
})
ginkgo.It("should enforce for guaranteed pod", func(ctx context.Context) {
cpuCount := 1 // overshoot, minimum request is 1
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-enabled"
pod := makeCPUManagerPod("gu-pod-cfs-quota-on", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "500m",
cpuLimit: "500m",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("50000"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "50000"))
})
ginkgo.It("should not enforce with multiple containers only in the container with exclusive CPUs", func(ctx context.Context) {
cpuCount := 2
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
pod := makeCPUManagerPod("gu-pod-multicontainer-mixed", []ctnAttribute{
{
ctnName: "gu-container-non-int-values",
cpuRequest: "500m",
cpuLimit: "500m",
},
{
ctnName: "gu-container-int-values",
cpuRequest: "1",
cpuLimit: "1",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("max"))
gomega.Expect(pod).To(HaveContainerQuota("gu-container-non-int-values", "50000"))
gomega.Expect(pod).To(HaveContainerQuota("gu-container-int-values", "max"))
})
})
ginkgo.When("checking the CFS quota management can be disabled", ginkgo.Label("cfs-quota"), func() {
// NOTE: these tests check only cases on which the quota is set to "max", so we intentionally
// don't duplicate the all the tests
ginkgo.BeforeEach(func(ctx context.Context) {
// WARNING: this assumes 2-way SMT systems - we don't know how to access other SMT levels.
// this means on more-than-2-way SMT systems this test will prove nothing
reservedCPUs = cpuset.New(0)
updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: reservedCPUs,
disableCPUQuotaWithExclusiveCPUs: false,
}))
})
ginkgo.It("should not disable for guaranteed pod with exclusive CPUs assigned", func(ctx context.Context) {
cpuCount := 1
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-disabled"
pod := makeCPUManagerPod("gu-pod-cfsquota-off", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "1",
cpuLimit: "1",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("100000"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "100000"))
})
ginkgo.It("should not disable for guaranteed pod with exclusive CPUs assigned", func(ctx context.Context) {
cpuCount := 4
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-disabled"
pod := makeCPUManagerPod("gu-pod-cfsquota-off", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "3",
cpuLimit: "3",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("300000"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "300000"))
gomega.Expect(pod).To(HaveContainerCPUsCount(ctnName, 3))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf(ctnName, onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith(ctnName, reservedCPUs))
})
ginkgo.It("should enforce for guaranteed pod", func(ctx context.Context) {
cpuCount := 1 // overshoot, minimum request is 1
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
ctnName := "gu-container-cfsquota-enabled"
pod := makeCPUManagerPod("gu-pod-cfs-quota-on", []ctnAttribute{
{
ctnName: ctnName,
cpuRequest: "500m",
cpuLimit: "500m",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("50000"))
gomega.Expect(pod).To(HaveContainerQuota(ctnName, "50000"))
})
ginkgo.It("should not enforce with multiple containers only in the container with exclusive CPUs", func(ctx context.Context) {
cpuCount := 2
skipIfAllocatableCPUsLessThan(getLocalNode(ctx, f), cpuCount)
pod := makeCPUManagerPod("gu-pod-multicontainer-mixed", []ctnAttribute{
{
ctnName: "gu-container-non-int-values",
cpuRequest: "500m",
cpuLimit: "500m",
},
{
ctnName: "gu-container-int-values",
cpuRequest: "1",
cpuLimit: "1",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
gomega.Expect(pod).To(HaveSandboxQuota("150000"))
gomega.Expect(pod).To(HaveContainerQuota("gu-container-non-int-values", "50000"))
gomega.Expect(pod).To(HaveContainerQuota("gu-container-int-values", "100000"))
})
})
f.Context("When checking the sidecar containers", feature.SidecarContainers, func() {
ginkgo.BeforeEach(func(ctx context.Context) {
reservedCPUs = cpuset.New(0)