mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 12:18:16 +00:00 
			
		
		
		
	Merge pull request #130163 from ffromani/e2e-node-fix-cpu-quota-test
e2e: node: cpumgr: cleanup after each test case
This commit is contained in:
		@@ -592,24 +592,36 @@ func runMultipleCPUContainersGuPod(ctx context.Context, f *framework.Framework)
 | 
				
			|||||||
	waitForContainerRemoval(ctx, pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
 | 
						waitForContainerRemoval(ctx, pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQuotaWithExclusiveCPUs bool) {
 | 
					func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQuotaWithExclusiveCPUs bool, cpuAlloc int64) {
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
	var ctnAttrs []ctnAttribute
 | 
						var ctnAttrs []ctnAttribute
 | 
				
			||||||
	var pod1, pod2, pod3 *v1.Pod
 | 
						var pod1, pod2, pod3 *v1.Pod
 | 
				
			||||||
	var cleanupPods []*v1.Pod
 | 
						podsToClean := make(map[string]*v1.Pod) // pod.UID -> pod
 | 
				
			||||||
	ginkgo.DeferCleanup(func() {
 | 
					
 | 
				
			||||||
 | 
						framework.Logf("runCfsQuotaGuPods: disableQuota=%v, CPU Allocatable=%v", disabledCPUQuotaWithExclusiveCPUs, cpuAlloc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						deleteTestPod := func(pod *v1.Pod) {
 | 
				
			||||||
		// waitForContainerRemoval takes "long" to complete; if we use the parent ctx we get a
 | 
							// waitForContainerRemoval takes "long" to complete; if we use the parent ctx we get a
 | 
				
			||||||
		// 'deadline expired' message and the cleanup aborts, which we don't want.
 | 
							// 'deadline expired' message and the cleanup aborts, which we don't want.
 | 
				
			||||||
		ctx2 := context.TODO()
 | 
							// So let's use a separate and more generous timeout (determined by trial and error)
 | 
				
			||||||
		ginkgo.By("by deleting the pods and waiting for container removal")
 | 
							ctx2, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
 | 
				
			||||||
		for _, cleanupPod := range cleanupPods {
 | 
							defer cancel()
 | 
				
			||||||
			framework.Logf("deleting pod: %s/%s", cleanupPod.Namespace, cleanupPod.Name)
 | 
							deletePodSyncAndWait(ctx2, f, pod.Namespace, pod.Name)
 | 
				
			||||||
			deletePodSyncByName(ctx2, f, cleanupPod.Name)
 | 
							delete(podsToClean, string(pod.UID))
 | 
				
			||||||
			waitForContainerRemoval(ctx2, cleanupPod.Spec.Containers[0].Name, cleanupPod.Name, cleanupPod.Namespace)
 | 
					 | 
				
			||||||
			framework.Logf("deleted pod: %s/%s", cleanupPod.Namespace, cleanupPod.Name)
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// cleanup leftovers on test failure. The happy path is covered by `deleteTestPod` calls
 | 
				
			||||||
 | 
						ginkgo.DeferCleanup(func() {
 | 
				
			||||||
 | 
							ginkgo.By("by deleting the pods and waiting for container removal")
 | 
				
			||||||
 | 
							// waitForContainerRemoval takes "long" to complete; if we use the parent ctx we get a
 | 
				
			||||||
 | 
							// 'deadline expired' message and the cleanup aborts, which we don't want.
 | 
				
			||||||
 | 
							// So let's use a separate and more generous timeout (determined by trial and error)
 | 
				
			||||||
 | 
							ctx2, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
 | 
				
			||||||
 | 
							defer cancel()
 | 
				
			||||||
 | 
							deletePodsAsync(ctx2, f, podsToClean)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						podCFSCheckCommand := []string{"sh", "-c", `cat $(find /sysfscgroup | grep "$(cat /podinfo/uid | sed 's/-/_/g').slice/cpu.max$") && sleep 1d`}
 | 
				
			||||||
	cfsCheckCommand := []string{"sh", "-c", "cat /sys/fs/cgroup/cpu.max && sleep 1d"}
 | 
						cfsCheckCommand := []string{"sh", "-c", "cat /sys/fs/cgroup/cpu.max && sleep 1d"}
 | 
				
			||||||
	defaultPeriod := "100000"
 | 
						defaultPeriod := "100000"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -623,7 +635,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs)
 | 
						pod1 = makeCPUManagerPod("gu-pod1", ctnAttrs)
 | 
				
			||||||
	pod1.Spec.Containers[0].Command = cfsCheckCommand
 | 
						pod1.Spec.Containers[0].Command = cfsCheckCommand
 | 
				
			||||||
	pod1 = e2epod.NewPodClient(f).CreateSync(ctx, pod1)
 | 
						pod1 = e2epod.NewPodClient(f).CreateSync(ctx, pod1)
 | 
				
			||||||
	cleanupPods = append(cleanupPods, pod1)
 | 
						podsToClean[string(pod1.UID)] = pod1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("checking if the expected cfs quota was assigned (GU pod, exclusive CPUs, unlimited)")
 | 
						ginkgo.By("checking if the expected cfs quota was assigned (GU pod, exclusive CPUs, unlimited)")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -635,6 +647,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod1.Name, pod1.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
						err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod1.Name, pod1.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
				
			||||||
	framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
						framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
				
			||||||
		pod1.Spec.Containers[0].Name, pod1.Name)
 | 
							pod1.Spec.Containers[0].Name, pod1.Name)
 | 
				
			||||||
 | 
						deleteTestPod(pod1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctnAttrs = []ctnAttribute{
 | 
						ctnAttrs = []ctnAttribute{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -646,7 +659,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
 | 
						pod2 = makeCPUManagerPod("gu-pod2", ctnAttrs)
 | 
				
			||||||
	pod2.Spec.Containers[0].Command = cfsCheckCommand
 | 
						pod2.Spec.Containers[0].Command = cfsCheckCommand
 | 
				
			||||||
	pod2 = e2epod.NewPodClient(f).CreateSync(ctx, pod2)
 | 
						pod2 = e2epod.NewPodClient(f).CreateSync(ctx, pod2)
 | 
				
			||||||
	cleanupPods = append(cleanupPods, pod2)
 | 
						podsToClean[string(pod2.UID)] = pod2
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("checking if the expected cfs quota was assigned (GU pod, limited)")
 | 
						ginkgo.By("checking if the expected cfs quota was assigned (GU pod, limited)")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -655,6 +668,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
						err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod2.Name, pod2.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
				
			||||||
	framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
						framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
				
			||||||
		pod2.Spec.Containers[0].Name, pod2.Name)
 | 
							pod2.Spec.Containers[0].Name, pod2.Name)
 | 
				
			||||||
 | 
						deleteTestPod(pod2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctnAttrs = []ctnAttribute{
 | 
						ctnAttrs = []ctnAttribute{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -666,7 +680,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	pod3 = makeCPUManagerPod("non-gu-pod3", ctnAttrs)
 | 
						pod3 = makeCPUManagerPod("non-gu-pod3", ctnAttrs)
 | 
				
			||||||
	pod3.Spec.Containers[0].Command = cfsCheckCommand
 | 
						pod3.Spec.Containers[0].Command = cfsCheckCommand
 | 
				
			||||||
	pod3 = e2epod.NewPodClient(f).CreateSync(ctx, pod3)
 | 
						pod3 = e2epod.NewPodClient(f).CreateSync(ctx, pod3)
 | 
				
			||||||
	cleanupPods = append(cleanupPods, pod3)
 | 
						podsToClean[string(pod3.UID)] = pod3
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("checking if the expected cfs quota was assigned (BU pod, limited)")
 | 
						ginkgo.By("checking if the expected cfs quota was assigned (BU pod, limited)")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -675,7 +689,9 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod3.Name, pod3.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
						err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod3.Name, pod3.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
				
			||||||
	framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
						framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
				
			||||||
		pod3.Spec.Containers[0].Name, pod3.Name)
 | 
							pod3.Spec.Containers[0].Name, pod3.Name)
 | 
				
			||||||
 | 
						deleteTestPod(pod3)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if cpuAlloc >= 2 {
 | 
				
			||||||
		ctnAttrs = []ctnAttribute{
 | 
							ctnAttrs = []ctnAttribute{
 | 
				
			||||||
			{
 | 
								{
 | 
				
			||||||
				ctnName:    "gu-container-non-int-values",
 | 
									ctnName:    "gu-container-non-int-values",
 | 
				
			||||||
@@ -692,7 +708,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
		pod4.Spec.Containers[0].Command = cfsCheckCommand
 | 
							pod4.Spec.Containers[0].Command = cfsCheckCommand
 | 
				
			||||||
		pod4.Spec.Containers[1].Command = cfsCheckCommand
 | 
							pod4.Spec.Containers[1].Command = cfsCheckCommand
 | 
				
			||||||
		pod4 = e2epod.NewPodClient(f).CreateSync(ctx, pod4)
 | 
							pod4 = e2epod.NewPodClient(f).CreateSync(ctx, pod4)
 | 
				
			||||||
	cleanupPods = append(cleanupPods, pod4)
 | 
							podsToClean[string(pod4.UID)] = pod4
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("checking if the expected cfs quota was assigned (GU pod, container 0 exclusive CPUs unlimited, container 1 limited)")
 | 
							ginkgo.By("checking if the expected cfs quota was assigned (GU pod, container 0 exclusive CPUs unlimited, container 1 limited)")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -709,6 +725,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
		err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod4.Name, pod4.Spec.Containers[1].Name, expCFSQuotaRegex)
 | 
							err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod4.Name, pod4.Spec.Containers[1].Name, expCFSQuotaRegex)
 | 
				
			||||||
		framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
							framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]",
 | 
				
			||||||
			pod4.Spec.Containers[1].Name, pod4.Name)
 | 
								pod4.Spec.Containers[1].Name, pod4.Name)
 | 
				
			||||||
 | 
							deleteTestPod(pod4)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ctnAttrs = []ctnAttribute{
 | 
							ctnAttrs = []ctnAttribute{
 | 
				
			||||||
			{
 | 
								{
 | 
				
			||||||
@@ -723,12 +740,11 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	podCFSCheckCommand := []string{"sh", "-c", `cat $(find /sysfscgroup | grep "$(cat /podinfo/uid | sed 's/-/_/g').slice/cpu.max$") && sleep 1d`}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		pod5 := makeCPUManagerPod("gu-pod5", ctnAttrs)
 | 
							pod5 := makeCPUManagerPod("gu-pod5", ctnAttrs)
 | 
				
			||||||
		pod5.Spec.Containers[0].Command = podCFSCheckCommand
 | 
							pod5.Spec.Containers[0].Command = podCFSCheckCommand
 | 
				
			||||||
		pod5 = e2epod.NewPodClient(f).CreateSync(ctx, pod5)
 | 
							pod5 = e2epod.NewPodClient(f).CreateSync(ctx, pod5)
 | 
				
			||||||
	cleanupPods = append(cleanupPods, pod5)
 | 
							podsToClean[string(pod5.UID)] = pod5
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("checking if the expected cfs quota was assigned to pod (GU pod, unlimited)")
 | 
							ginkgo.By("checking if the expected cfs quota was assigned to pod (GU pod, unlimited)")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		expectedQuota = "150000"
 | 
							expectedQuota = "150000"
 | 
				
			||||||
@@ -741,6 +757,10 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod5.Name, pod5.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
							err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod5.Name, pod5.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
				
			||||||
		framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod5.Spec.Containers[0].Name, pod5.Name)
 | 
							framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod5.Spec.Containers[0].Name, pod5.Name)
 | 
				
			||||||
 | 
							deleteTestPod(pod5)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							ginkgo.By(fmt.Sprintf("some cases SKIPPED - requests at least %d allocatable cores, got %d", 2, cpuAlloc))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ctnAttrs = []ctnAttribute{
 | 
						ctnAttrs = []ctnAttribute{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -753,7 +773,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	pod6 := makeCPUManagerPod("gu-pod6", ctnAttrs)
 | 
						pod6 := makeCPUManagerPod("gu-pod6", ctnAttrs)
 | 
				
			||||||
	pod6.Spec.Containers[0].Command = podCFSCheckCommand
 | 
						pod6.Spec.Containers[0].Command = podCFSCheckCommand
 | 
				
			||||||
	pod6 = e2epod.NewPodClient(f).CreateSync(ctx, pod6)
 | 
						pod6 = e2epod.NewPodClient(f).CreateSync(ctx, pod6)
 | 
				
			||||||
	cleanupPods = append(cleanupPods, pod6)
 | 
						podsToClean[string(pod6.UID)] = pod6
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.By("checking if the expected cfs quota was assigned to pod (GU pod, limited)")
 | 
						ginkgo.By("checking if the expected cfs quota was assigned to pod (GU pod, limited)")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -761,7 +781,7 @@ func runCfsQuotaGuPods(ctx context.Context, f *framework.Framework, disabledCPUQ
 | 
				
			|||||||
	expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
 | 
						expCFSQuotaRegex = fmt.Sprintf("^%s %s\n$", expectedQuota, defaultPeriod)
 | 
				
			||||||
	err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod6.Name, pod6.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
						err = e2epod.NewPodClient(f).MatchContainerOutput(ctx, pod6.Name, pod6.Spec.Containers[0].Name, expCFSQuotaRegex)
 | 
				
			||||||
	framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod6.Spec.Containers[0].Name, pod6.Name)
 | 
						framework.ExpectNoError(err, "expected log not found in container [%s] of pod [%s]", pod6.Spec.Containers[0].Name, pod6.Name)
 | 
				
			||||||
 | 
						deleteTestPod(pod6)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func runMultipleGuPods(ctx context.Context, f *framework.Framework) {
 | 
					func runMultipleGuPods(ctx context.Context, f *framework.Framework) {
 | 
				
			||||||
@@ -921,6 +941,10 @@ func runCPUManagerTests(f *framework.Framework) {
 | 
				
			|||||||
		if !IsCgroup2UnifiedMode() {
 | 
							if !IsCgroup2UnifiedMode() {
 | 
				
			||||||
			e2eskipper.Skipf("Skipping since CgroupV2 not used")
 | 
								e2eskipper.Skipf("Skipping since CgroupV2 not used")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							_, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
 | 
				
			||||||
 | 
							if cpuAlloc < 1 { // save expensive kubelet restart
 | 
				
			||||||
 | 
								e2eskipper.Skipf("Skipping since not enough allocatable CPU got %d required 1", cpuAlloc)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		newCfg := configureCPUManagerInKubelet(oldCfg,
 | 
							newCfg := configureCPUManagerInKubelet(oldCfg,
 | 
				
			||||||
			&cpuManagerKubeletArguments{
 | 
								&cpuManagerKubeletArguments{
 | 
				
			||||||
				policyName:                       string(cpumanager.PolicyStatic),
 | 
									policyName:                       string(cpumanager.PolicyStatic),
 | 
				
			||||||
@@ -929,13 +953,19 @@ func runCPUManagerTests(f *framework.Framework) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		)
 | 
							)
 | 
				
			||||||
		updateKubeletConfig(ctx, f, newCfg, true)
 | 
							updateKubeletConfig(ctx, f, newCfg, true)
 | 
				
			||||||
		runCfsQuotaGuPods(ctx, f, true)
 | 
					
 | 
				
			||||||
 | 
							_, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) // check again after we reserved 1 full CPU. Some tests require > 1 exclusive CPU
 | 
				
			||||||
 | 
							runCfsQuotaGuPods(ctx, f, true, cpuAlloc)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.It("should keep enforcing the CFS quota for containers with static CPUs assigned and feature gate disabled", func(ctx context.Context) {
 | 
						ginkgo.It("should keep enforcing the CFS quota for containers with static CPUs assigned and feature gate disabled", func(ctx context.Context) {
 | 
				
			||||||
		if !IsCgroup2UnifiedMode() {
 | 
							if !IsCgroup2UnifiedMode() {
 | 
				
			||||||
			e2eskipper.Skipf("Skipping since CgroupV2 not used")
 | 
								e2eskipper.Skipf("Skipping since CgroupV2 not used")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							_, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
 | 
				
			||||||
 | 
							if cpuAlloc < 1 { // save expensive kubelet restart
 | 
				
			||||||
 | 
								e2eskipper.Skipf("Skipping since not enough allocatable CPU got %d required 1", cpuAlloc)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		newCfg := configureCPUManagerInKubelet(oldCfg,
 | 
							newCfg := configureCPUManagerInKubelet(oldCfg,
 | 
				
			||||||
			&cpuManagerKubeletArguments{
 | 
								&cpuManagerKubeletArguments{
 | 
				
			||||||
				policyName:                       string(cpumanager.PolicyStatic),
 | 
									policyName:                       string(cpumanager.PolicyStatic),
 | 
				
			||||||
@@ -945,7 +975,9 @@ func runCPUManagerTests(f *framework.Framework) {
 | 
				
			|||||||
		)
 | 
							)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		updateKubeletConfig(ctx, f, newCfg, true)
 | 
							updateKubeletConfig(ctx, f, newCfg, true)
 | 
				
			||||||
		runCfsQuotaGuPods(ctx, f, false)
 | 
					
 | 
				
			||||||
 | 
							_, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) // check again after we reserved 1 full CPU. Some tests require > 1 exclusive CPU
 | 
				
			||||||
 | 
							runCfsQuotaGuPods(ctx, f, false, cpuAlloc)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	f.It("should not reuse CPUs of restartable init containers", feature.SidecarContainers, func(ctx context.Context) {
 | 
						f.It("should not reuse CPUs of restartable init containers", feature.SidecarContainers, func(ctx context.Context) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -463,14 +463,19 @@ func deletePodsAsync(ctx context.Context, f *framework.Framework, podMap map[str
 | 
				
			|||||||
		go func(podNS, podName string) {
 | 
							go func(podNS, podName string) {
 | 
				
			||||||
			defer ginkgo.GinkgoRecover()
 | 
								defer ginkgo.GinkgoRecover()
 | 
				
			||||||
			defer wg.Done()
 | 
								defer wg.Done()
 | 
				
			||||||
 | 
								deletePodSyncAndWait(ctx, f, podNS, podName)
 | 
				
			||||||
			deletePodSyncByName(ctx, f, podName)
 | 
					 | 
				
			||||||
			waitForAllContainerRemoval(ctx, podName, podNS)
 | 
					 | 
				
			||||||
		}(pod.Namespace, pod.Name)
 | 
							}(pod.Namespace, pod.Name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	wg.Wait()
 | 
						wg.Wait()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func deletePodSyncAndWait(ctx context.Context, f *framework.Framework, podNS, podName string) {
 | 
				
			||||||
 | 
						framework.Logf("deleting pod: %s/%s", podNS, podName)
 | 
				
			||||||
 | 
						deletePodSyncByName(ctx, f, podName)
 | 
				
			||||||
 | 
						waitForAllContainerRemoval(ctx, podName, podNS)
 | 
				
			||||||
 | 
						framework.Logf("deleted pod: %s/%s", podNS, podName)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func runTopologyManagerNegativeTest(ctx context.Context, f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
 | 
					func runTopologyManagerNegativeTest(ctx context.Context, f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
 | 
				
			||||||
	podName := "gu-pod"
 | 
						podName := "gu-pod"
 | 
				
			||||||
	framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
 | 
						framework.Logf("creating pod %s attrs %v", podName, ctnAttrs)
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user