mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-30 17:58:14 +00:00 
			
		
		
		
	Use ExpectNoError(err)
We missed another pattern to check error doesn't happen on e2e tests. This covers the pattern for consistent test implementation.
This commit is contained in:
		| @@ -35,6 +35,10 @@ do | ||||
|     then | ||||
|         errors_expect_no_error+=( "${file}" ) | ||||
|     fi | ||||
|     if grep -E "Expect\(err\)\.To\(gomega\.BeNil\(\)\)" "${file}" > /dev/null | ||||
|     then | ||||
|         errors_expect_no_error+=( "${file}" ) | ||||
|     fi | ||||
| done | ||||
|  | ||||
| errors_expect_error=() | ||||
|   | ||||
| @@ -415,7 +415,7 @@ func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensi | ||||
| 		}, | ||||
| 	} | ||||
| 	_, err := customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.By("v2 custom resource should be converted") | ||||
| 	v2crd, err := customResourceClients["v2"].Get(context.TODO(), name, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err, "Getting v2 of custom resource %s", name) | ||||
| @@ -440,13 +440,13 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { | ||||
| 		}, | ||||
| 	} | ||||
| 	_, err := customResourceClients["v1"].Create(context.TODO(), crInstance, metav1.CreateOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Now cr-instance-1 is stored as v1. lets change storage version | ||||
| 	crd, err = integration.UpdateV1CustomResourceDefinitionWithRetry(testCrd.APIExtensionClient, crd.Name, func(c *apiextensionsv1.CustomResourceDefinition) { | ||||
| 		c.Spec.Versions = alternativeAPIVersions | ||||
| 	}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.By("Create a v2 custom resource") | ||||
| 	crInstance = &unstructured.Unstructured{ | ||||
| 		Object: map[string]interface{}{ | ||||
| @@ -471,13 +471,13 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected. | ||||
|  | ||||
| 	ginkgo.By("List CRs in v1") | ||||
| 	list, err := customResourceClients["v1"].List(context.TODO(), metav1.ListOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) | ||||
| 	framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || | ||||
| 		(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1), true) | ||||
| @@ -486,7 +486,7 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { | ||||
|  | ||||
| 	ginkgo.By("List CRs in v2") | ||||
| 	list, err = customResourceClients["v2"].List(context.TODO(), metav1.ListOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) | ||||
| 	framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || | ||||
| 		(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1), true) | ||||
|   | ||||
| @@ -48,7 +48,6 @@ import ( | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo" | ||||
| 	"github.com/onsi/gomega" | ||||
|  | ||||
| 	// ensure libs have a chance to initialize | ||||
| 	_ "github.com/stretchr/testify/assert" | ||||
| @@ -1064,7 +1063,7 @@ func testMutatingPodWebhook(f *framework.Framework) { | ||||
| 	client := f.ClientSet | ||||
| 	pod := toBeMutatedPod(f) | ||||
| 	mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	if len(mutatedPod.Spec.InitContainers) != 1 { | ||||
| 		framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers) | ||||
| 	} | ||||
| @@ -2211,9 +2210,9 @@ func testSlowWebhookTimeoutNoError(f *framework.Framework) { | ||||
| 	client := f.ClientSet | ||||
| 	name := "e2e-test-slow-webhook-configmap" | ||||
| 	_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}}, metav1.CreateOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| } | ||||
|  | ||||
| // createAdmissionWebhookMultiVersionTestCRDWithV1Storage creates a new CRD specifically | ||||
|   | ||||
| @@ -222,7 +222,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { | ||||
| 		event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, | ||||
| 			recordEvents(events, conditions.PodCompleted), | ||||
| 		) | ||||
| 		gomega.Expect(err).To(gomega.BeNil()) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		checkInvariants(events, containerInitInvariant) | ||||
| 		endPod := event.Object.(*v1.Pod) | ||||
| @@ -299,7 +299,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { | ||||
| 		ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout) | ||||
| 		defer cancel() | ||||
| 		event, err := watchtools.Until(ctx, startedPod.ResourceVersion, w, recordEvents(events, conditions.PodRunning)) | ||||
| 		gomega.Expect(err).To(gomega.BeNil()) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		checkInvariants(events, containerInitInvariant) | ||||
| 		endPod := event.Object.(*v1.Pod) | ||||
| @@ -429,7 +429,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { | ||||
| 				} | ||||
| 			}, | ||||
| 		) | ||||
| 		gomega.Expect(err).To(gomega.BeNil()) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		checkInvariants(events, containerInitInvariant) | ||||
| 		endPod := event.Object.(*v1.Pod) | ||||
| @@ -544,7 +544,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { | ||||
| 				}), | ||||
| 			recordEvents(events, conditions.PodCompleted), | ||||
| 		) | ||||
| 		gomega.Expect(err).To(gomega.BeNil()) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		checkInvariants(events, containerInitInvariant) | ||||
| 		endPod := event.Object.(*v1.Pod) | ||||
|   | ||||
| @@ -177,7 +177,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { | ||||
| 			// running as cluster e2e test, because node e2e test does not create and | ||||
| 			// run controller manager, i.e., no node lifecycle controller. | ||||
| 			node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) | ||||
| 			gomega.Expect(err).To(gomega.BeNil()) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) | ||||
| 			framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) | ||||
| 		}) | ||||
| @@ -186,7 +186,7 @@ var _ = framework.KubeDescribe("NodeLease", func() { | ||||
|  | ||||
| func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) { | ||||
| 	node, err := clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady) | ||||
| 	framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue) | ||||
| 	heartbeatTime := readyCondition.LastHeartbeatTime.Time | ||||
|   | ||||
| @@ -594,7 +594,7 @@ var _ = SIGDescribe("Kubectl client", func() { | ||||
| 			gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed")) | ||||
| 			g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } | ||||
| 			runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) | ||||
| 			gomega.Expect(err).To(gomega.BeNil()) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { | ||||
| 				framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") | ||||
| 			} | ||||
| @@ -609,7 +609,7 @@ var _ = SIGDescribe("Kubectl client", func() { | ||||
| 				gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) | ||||
| 				return strings.Contains(logOutput, "abcd1234"), nil | ||||
| 			}) | ||||
| 			gomega.Expect(err).To(gomega.BeNil()) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil()) | ||||
| 		}) | ||||
|   | ||||
| @@ -113,7 +113,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam | ||||
| 	// Detach and delete volume | ||||
| 	detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) | ||||
| 	err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| } | ||||
|  | ||||
| func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { | ||||
| @@ -137,7 +137,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa | ||||
| 	// Detach and delete volume | ||||
| 	detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) | ||||
| 	err = e2epv.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) | ||||
| 	errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found` | ||||
| @@ -184,7 +184,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st | ||||
| // detachVolume delete the volume passed in the argument and wait until volume is detached from the node, | ||||
| func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { | ||||
| 	pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) | ||||
| 	gomega.Expect(err).To(gomega.BeNil()) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	nodeName := pod.Spec.NodeName | ||||
| 	ginkgo.By("Deleting pod") | ||||
| 	e2epod.DeletePodWithWait(client, pod) | ||||
|   | ||||
| @@ -327,7 +327,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H | ||||
| 					ginkgo.By("checking if the expected hugetlb settings were applied") | ||||
| 					f.PodClient().Create(verifyPod) | ||||
| 					err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) | ||||
| 					gomega.Expect(err).To(gomega.BeNil()) | ||||
| 					framework.ExpectNoError(err) | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kenichi Omichi
					Kenichi Omichi