e2e: node: initial multi-container tests

rewrite tests which exercise multiple container within the
same pod. Preserve the existing testcases, add more.

Note basic coverage for mixed pods - some containers requiring
exclusive CPUs, some not, was already added with the initial batch.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani
2025-05-06 16:20:18 +02:00
parent 2419d9ccc5
commit ccc662c228

View File

@@ -357,6 +357,130 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, framework.WithSerial(), featu
toleration := 1
gomega.Expect(pod).To(HaveContainerCPUsQuasiThreadSiblings("gu-container", toleration))
})
ginkgo.It("should allocate exclusively CPUs to a multi-container pod (1+2)", func(ctx context.Context) {
cpuCount := 3 // total
reservedCPUs := cpuset.New(0)
skipIfNotEnoughAllocatableCPUs(ctx, f, cpuCount+reservedCPUs.Size(), onlineCPUs)
_ = updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: reservedCPUs, // Not really needed for the tests but helps to make a more precise check
}))
pod := makeCPUManagerPod("gu-pod", []ctnAttribute{
{
ctnName: "gu-container-1",
cpuRequest: "1000m",
cpuLimit: "1000m",
},
{
ctnName: "gu-container-2",
cpuRequest: "2000m",
cpuLimit: "2000m",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
ginkgo.By("checking if the expected cpuset was assigned")
// we cannot nor we should predict which CPUs the container gets
gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-1", 1))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-1", onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-1", reservedCPUs))
gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-2", 2))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-2", onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-2", reservedCPUs))
// TODO: this is probably too strict but it is the closest of the old test did
gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-2"))
})
ginkgo.It("should allocate exclusively CPUs to a multi-container pod (3+2)", func(ctx context.Context) {
cpuCount := 5 // total
reservedCPUs := cpuset.New(0)
skipIfNotEnoughAllocatableCPUs(ctx, f, cpuCount+reservedCPUs.Size(), onlineCPUs)
_ = updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: reservedCPUs, // Not really needed for the tests but helps to make a more precise check
}))
pod := makeCPUManagerPod("gu-pod", []ctnAttribute{
{
ctnName: "gu-container-1",
cpuRequest: "3000m",
cpuLimit: "3000m",
},
{
ctnName: "gu-container-2",
cpuRequest: "2000m",
cpuLimit: "2000m",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
ginkgo.By("checking if the expected cpuset was assigned")
// we cannot nor we should predict which CPUs the container gets
gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-1", 3))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-1", onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-1", reservedCPUs))
toleration := 1
gomega.Expect(pod).To(HaveContainerCPUsQuasiThreadSiblings("gu-container-1", toleration))
gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-2", 2))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-2", onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-2", reservedCPUs))
gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-2"))
})
ginkgo.It("should allocate exclusively CPUs to a multi-container pod (4+2)", func(ctx context.Context) {
cpuCount := 6 // total
reservedCPUs := cpuset.New(0)
skipIfNotEnoughAllocatableCPUs(ctx, f, cpuCount+reservedCPUs.Size(), onlineCPUs)
_ = updateKubeletConfigIfNeeded(ctx, f, configureCPUManagerInKubelet(oldCfg, &cpuManagerKubeletArguments{
policyName: string(cpumanager.PolicyStatic),
reservedSystemCPUs: reservedCPUs, // Not really needed for the tests but helps to make a more precise check
}))
pod := makeCPUManagerPod("gu-pod", []ctnAttribute{
{
ctnName: "gu-container-1",
cpuRequest: "4000m",
cpuLimit: "4000m",
},
{
ctnName: "gu-container-2",
cpuRequest: "2000m",
cpuLimit: "2000m",
},
})
ginkgo.By("creating the test pod")
pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
podMap[string(pod.UID)] = pod
ginkgo.By("checking if the expected cpuset was assigned")
// we cannot nor we should predict which CPUs the container gets
gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-1", 4))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-1", onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-1", reservedCPUs))
gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-1"))
gomega.Expect(pod).To(HaveContainerCPUsCount("gu-container-2", 2))
gomega.Expect(pod).To(HaveContainerCPUsASubsetOf("gu-container-2", onlineCPUs))
gomega.Expect(pod).ToNot(HaveContainerCPUsOverlapWith("gu-container-2", reservedCPUs))
gomega.Expect(pod).To(HaveContainerCPUsThreadSiblings("gu-container-2"))
})
})
ginkgo.When("running with strict CPU reservation", ginkgo.Label("strict-cpu-reservation"), func() {