diff --git a/test/e2e_node/cpumanager_test.go b/test/e2e_node/cpumanager_test.go index e2ac29620f9..4c3a442dcdf 100644 --- a/test/e2e_node/cpumanager_test.go +++ b/test/e2e_node/cpumanager_test.go @@ -357,6 +357,130 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, framework.WithSerial(), featu toleration := 1 gomega.Expect(pod).To(HaveContainerCPUsQuasiThreadSiblings("gu-container", toleration)) }) + + ginkgo.It("should allocate exclusively CPUs to a multi-container pod (1+2)", func(ctx context.Context) { + cpuCount := 3 // total + reservedCPUs := cpuset.New(0) + + skipIfNotEnoughAllocatableCPUs(ctx, f, cpuCount+reservedCPUs.Size(), onlineCPUs) + + _ = updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{ + policyName: string(cpumanager.PolicyStatic), + reservedSystemCPUs: reservedCPUs, // Not really needed for the tests but helps to make a more precise check + })) + + pod := makeCPUManagerPod("gu-pod", []ctnAttribute{ + { + ctnName: "gu-container-1", + cpuRequest: "1000m", + cpuLimit: "1000m", + }, + { + ctnName: "gu-container-2", + cpuRequest: "2000m", + cpuLimit: "2000m", + }, + }) + ginkgo.By("creating the test pod") + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) + podMap[string(pod.UID)] = pod + + ginkgo.By("checking if the expected cpuset was assigned") + + // we cannot nor we should predict which CPUs the container gets + gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-1", 1)) + gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-1", onlineCPUs)) + gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-1", reservedCPUs)) + + gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-2", 2)) + gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-2", onlineCPUs)) + gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-2", reservedCPUs)) + // TODO: this is probably too strict but it is the closest of the old test did + gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-2")) + }) + + ginkgo.It("should allocate exclusively CPUs to a multi-container pod (3+2)", func(ctx context.Context) { + cpuCount := 5 // total + reservedCPUs := cpuset.New(0) + + skipIfNotEnoughAllocatableCPUs(ctx, f, cpuCount+reservedCPUs.Size(), onlineCPUs) + + _ = updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{ + policyName: string(cpumanager.PolicyStatic), + reservedSystemCPUs: reservedCPUs, // Not really needed for the tests but helps to make a more precise check + })) + + pod := makeCPUManagerPod("gu-pod", []ctnAttribute{ + { + ctnName: "gu-container-1", + cpuRequest: "3000m", + cpuLimit: "3000m", + }, + { + ctnName: "gu-container-2", + cpuRequest: "2000m", + cpuLimit: "2000m", + }, + }) + ginkgo.By("creating the test pod") + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) + podMap[string(pod.UID)] = pod + + ginkgo.By("checking if the expected cpuset was assigned") + + // we cannot nor we should predict which CPUs the container gets + gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-1", 3)) + gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-1", onlineCPUs)) + gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-1", reservedCPUs)) + toleration := 1 + gomega.Expect(pod).To(HaveContainerCPUsQuasiThreadSiblings("gu-container-1", toleration)) + + gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-2", 2)) + gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-2", onlineCPUs)) + gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-2", reservedCPUs)) + gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-2")) + }) + + ginkgo.It("should allocate exclusively CPUs to a multi-container pod (4+2)", func(ctx context.Context) { + cpuCount := 6 // total + reservedCPUs := cpuset.New(0) + + skipIfNotEnoughAllocatableCPUs(ctx, f, cpuCount+reservedCPUs.Size(), onlineCPUs) + + _ = updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{ + policyName: string(cpumanager.PolicyStatic), + reservedSystemCPUs: reservedCPUs, // Not really needed for the tests but helps to make a more precise check + })) + + pod := makeCPUManagerPod("gu-pod", []ctnAttribute{ + { + ctnName: "gu-container-1", + cpuRequest: "4000m", + cpuLimit: "4000m", + }, + { + ctnName: "gu-container-2", + cpuRequest: "2000m", + cpuLimit: "2000m", + }, + }) + ginkgo.By("creating the test pod") + pod = e2epod.NewPodClient(f).CreateSync(ctx, pod) + podMap[string(pod.UID)] = pod + + ginkgo.By("checking if the expected cpuset was assigned") + + // we cannot nor we should predict which CPUs the container gets + gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-1", 4)) + gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-1", onlineCPUs)) + gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-1", reservedCPUs)) + gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-1")) + + gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-2", 2)) + gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-2", onlineCPUs)) + gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-2", reservedCPUs)) + gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-2")) + }) }) ginkgo.When("running with strict CPU reservation", ginkgo.Label("strict-cpu-reservation"), func() {