mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 18:28:13 +00:00 
			
		
		
		
	Merge pull request #123386 from kannon92/mark-jobs-as-flaky
mark flaky jobs as flaky and move them to a different job
This commit is contained in:
		| @@ -401,7 +401,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceMana | ||||
| 				Should(HaveAllocatableDevices()) | ||||
| 		}) | ||||
|  | ||||
| 		ginkgo.It("should deploy pod consuming devices first but fail with admission error after kubelet restart in case device plugin hasn't re-registered", func(ctx context.Context) { | ||||
| 		framework.It("should deploy pod consuming devices first but fail with admission error after kubelet restart in case device plugin hasn't re-registered", framework.WithFlaky(), func(ctx context.Context) { | ||||
| 			var err error | ||||
| 			podCMD := "while true; do sleep 1000; done;" | ||||
|  | ||||
|   | ||||
| @@ -844,7 +844,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) { | ||||
| 		// simulate node reboot scenario by removing pods using CRI before kubelet is started. In addition to that, | ||||
| 		// intentionally a scenario is created where after node reboot, application pods requesting devices appear before the device plugin pod | ||||
| 		// exposing those devices as resource has restarted. The expected behavior is that the application pod fails at admission time. | ||||
| 		ginkgo.It("Keeps device plugin assignments across node reboots (no pod restart, no device plugin re-registration)", func(ctx context.Context) { | ||||
| 		framework.It("Keeps device plugin assignments across node reboots (no pod restart, no device plugin re-registration)", framework.WithFlaky(), func(ctx context.Context) { | ||||
| 			podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever) | ||||
| 			pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD)) | ||||
| 			deviceIDRE := "stub devices: (Dev-[0-9]+)" | ||||
|   | ||||
| @@ -376,7 +376,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature. | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.Context("when gracefully shutting down with Pod priority", func() { | ||||
| 	framework.Context("when gracefully shutting down with Pod priority", framework.WithFlaky(), func() { | ||||
|  | ||||
| 		const ( | ||||
| 			pollInterval                 = 1 * time.Second | ||||
|   | ||||
| @@ -953,7 +953,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.Context("without SRIOV devices in the system", func() { | ||||
| 	framework.Context("without SRIOV devices in the system", framework.WithFlaky(), func() { | ||||
| 		ginkgo.BeforeEach(func() { | ||||
| 			requireLackOfSRIOVDevices() | ||||
| 		}) | ||||
| @@ -1224,7 +1224,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.Context("with the builtin rate limit values", func() { | ||||
| 	framework.Context("with the builtin rate limit values", framework.WithFlaky(), func() { | ||||
| 		ginkgo.It("should hit throttling when calling podresources List in a tight loop", func(ctx context.Context) { | ||||
| 			// ensure APIs have been called at least once | ||||
| 			endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Prow Robot
					Kubernetes Prow Robot