e2e: node: cpumanager cgroup v1 compatibility

While we support cgroup v1, we want some test coverage.
This patch enables v1 coverage for most of the testcases.
We intentionally rule out the CFS quota tests because we
want to support this change only on cgroup v2.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani
2025-07-08 15:48:37 +02:00
parent 9529e101ea
commit ea326373ef

View File

@@ -176,10 +176,6 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, ginkgo.ContinueOnFailure, fra
})
ginkgo.JustBeforeEach(func(ctx context.Context) {
if !e2enodeCgroupV2Enabled {
e2eskipper.Skipf("Skipping since CgroupV2 not used")
}
// note intentionally NOT set reservedCPUs - this must be initialized on a test-by-test basis
// use a closure to minimize the arguments, to make the usage more straightforward
@@ -1188,6 +1184,7 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, ginkgo.ContinueOnFailure, fra
ginkgo.When("checking the CFS quota management", ginkgo.Label("cfs-quota"), func() {
ginkgo.BeforeEach(func(ctx context.Context) {
requireCGroupV2()
// WARNING: this assumes 2-way SMT systems - we don't know how to access other SMT levels.
// this means on more-than-2-way SMT systems this test will prove nothing
reservedCPUs = cpuset.New(0)
@@ -1353,6 +1350,7 @@ var _ = SIGDescribe("CPU Manager", ginkgo.Ordered, ginkgo.ContinueOnFailure, fra
// don't duplicate the all the tests
ginkgo.BeforeEach(func(ctx context.Context) {
requireCGroupV2()
// WARNING: this assumes 2-way SMT systems - we don't know how to access other SMT levels.
// this means on more-than-2-way SMT systems this test will prove nothing
reservedCPUs = cpuset.New(0)
@@ -1696,12 +1694,13 @@ func HaveContainerCPUsQuasiThreadSiblings(ctnName string, toleration int) types.
// Other helpers
func getContainerAllowedCPUs(pod *v1.Pod, ctnName string, isInit bool) (cpuset.CPUSet, error) {
cgPath, err := makeCgroupPathForContainer(pod, ctnName, isInit)
cgPath, err := makeCgroupPathForContainer(pod, ctnName, isInit, e2enodeCgroupV2Enabled)
if err != nil {
return cpuset.CPUSet{}, err
}
cgPath = filepath.Join(cgPath, cpusetFileNameFromVersion(e2enodeCgroupV2Enabled))
framework.Logf("pod %s/%s cnt %s qos=%s path %q", pod.Namespace, pod.Name, ctnName, pod.Status.QOSClass, cgPath)
data, err := os.ReadFile(filepath.Join(cgPath, "cpuset.cpus.effective"))
data, err := os.ReadFile(cgPath)
if err != nil {
return cpuset.CPUSet{}, err
}
@@ -1711,7 +1710,10 @@ func getContainerAllowedCPUs(pod *v1.Pod, ctnName string, isInit bool) (cpuset.C
}
func getSandboxCFSQuota(pod *v1.Pod) (string, error) {
cgPath := filepath.Join(makeCgroupPathForPod(pod), "cpu.max")
if !e2enodeCgroupV2Enabled {
return "", fmt.Errorf("only Cgroup V2 is supported")
}
cgPath := filepath.Join(makeCgroupPathForPod(pod, true), "cpu.max")
data, err := os.ReadFile(cgPath)
if err != nil {
return "", err
@@ -1722,7 +1724,10 @@ func getSandboxCFSQuota(pod *v1.Pod) (string, error) {
}
func getContainerCFSQuota(pod *v1.Pod, ctnName string, isInit bool) (string, error) {
cgPath, err := makeCgroupPathForContainer(pod, ctnName, isInit)
if !e2enodeCgroupV2Enabled {
return "", fmt.Errorf("only Cgroup V2 is supported")
}
cgPath, err := makeCgroupPathForContainer(pod, ctnName, isInit, true)
if err != nil {
return "", err
}
@@ -1739,10 +1744,12 @@ const (
kubeCgroupRoot = "/sys/fs/cgroup"
)
// example path (systemd):
// example path (systemd, crio, v2):
// /sys/fs/cgroup/ kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b7632a2_a56e_4278_987a_22de18008dbe.slice/ crio-conmon-0bc5eac79e3ae7a0c2651f14722aa10fa333eb2325c2ca97da33aa284cda81b0.scope
// example path (cgroup, containerd, v1):
// /sys/fs/cgroup/cpuset kubepods/burstable pod8e414e92-17c2-41de-81c7-0045bba9103b b5791f89a6971bb4a751ffbebf533399c91630aa2906d7c6b5e239f405f3b97a
func makeCgroupPathForPod(pod *v1.Pod) string {
func makeCgroupPathForPod(pod *v1.Pod, isV2 bool) string {
components := []string{defaultNodeAllocatableCgroup}
if pod.Status.QOSClass != v1.PodQOSGuaranteed {
components = append(components, strings.ToLower(string(pod.Status.QOSClass)))
@@ -1757,11 +1764,13 @@ func makeCgroupPathForPod(pod *v1.Pod) string {
} else {
cgroupFsName = cgroupName.ToCgroupfs()
}
if !isV2 {
cgroupFsName = filepath.Join("cpuset", cgroupFsName)
}
return filepath.Join(kubeCgroupRoot, cgroupFsName)
}
func makeCgroupPathForContainer(pod *v1.Pod, ctnName string, isInit bool) (string, error) {
func makeCgroupPathForContainer(pod *v1.Pod, ctnName string, isInit, isV2 bool) (string, error) {
fullCntID, ok := findContainerIDByName(pod, ctnName, isInit)
if !ok {
return "", fmt.Errorf("cannot find status for container %q", ctnName)
@@ -1777,7 +1786,14 @@ func makeCgroupPathForContainer(pod *v1.Pod, ctnName string, isInit bool) (strin
cntPath = cntID
}
return filepath.Join(makeCgroupPathForPod(pod), cntPath), nil
return filepath.Join(makeCgroupPathForPod(pod, isV2), cntPath), nil
}
func cpusetFileNameFromVersion(isV2 bool) string {
if isV2 {
return "cpuset.cpus.effective"
}
return "cpuset.cpus"
}
func containerCgroupPathPrefixFromDriver(runtimeName string) string {
@@ -1998,3 +2014,11 @@ func makeCPUManagerBEPod(podName string, ctnAttributes []ctnAttribute) *v1.Pod {
},
}
}
func requireCGroupV2() {
if e2enodeCgroupV2Enabled {
return
}
e2eskipper.Skipf("Skipping since CgroupV2 not used")
}