From d54ff7441e150ff057c6d2a1be7b13d89afcd56a Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Fri, 22 Nov 2024 13:26:59 -0800 Subject: [PATCH] test: don't panic during an Eventually retry loop Signed-off-by: Jack Francis --- test/e2e/instrumentation/metrics.go | 5 +---- test/e2e/node/gpu.go | 5 ++++- test/e2e/storage/testsuites/volume_modify.go | 6 +++--- test/e2e/windows/eviction.go | 5 ++++- test/e2e/windows/memory_limits.go | 5 ++++- test/e2e_node/util.go | 5 ++++- 6 files changed, 20 insertions(+), 11 deletions(-) diff --git a/test/e2e/instrumentation/metrics.go b/test/e2e/instrumentation/metrics.go index 5ddb0b6cce5..1ebd46e7fb9 100644 --- a/test/e2e/instrumentation/metrics.go +++ b/test/e2e/instrumentation/metrics.go @@ -44,10 +44,7 @@ var _ = common.SIGDescribe("Metrics", func() { ec = f.KubemarkExternalClusterClientSet gomega.Eventually(ctx, func() error { grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true) - if err != nil { - framework.ExpectNoError(err, "failed to create metrics grabber") - } - return nil + return err }, 5*time.Minute, 10*time.Second).Should(gomega.BeNil()) }) diff --git a/test/e2e/node/gpu.go b/test/e2e/node/gpu.go index b1f057103be..65dbbb060ff 100644 --- a/test/e2e/node/gpu.go +++ b/test/e2e/node/gpu.go @@ -295,7 +295,10 @@ func SetupEnvironmentAndSkipIfNeeded(ctx context.Context, f *framework.Framework func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) bool { framework.Logf("Getting list of Nodes from API server") nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - framework.ExpectNoError(err, "getting node list") + if err != nil { + framework.Logf("Unexpected error getting node list: %v", err) + return false + } for _, node := range nodeList.Items { if node.Spec.Unschedulable { continue diff --git a/test/e2e/storage/testsuites/volume_modify.go b/test/e2e/storage/testsuites/volume_modify.go index c6b796a3dd7..9e412f09245 100644 --- a/test/e2e/storage/testsuites/volume_modify.go +++ b/test/e2e/storage/testsuites/volume_modify.go @@ -274,14 +274,14 @@ func SetPVCVACName(ctx context.Context, origPVC *v1.PersistentVolumeClaim, name pvcName := origPVC.Name var patchedPVC *v1.PersistentVolumeClaim - gomega.Eventually(ctx, func(g gomega.Gomega) { + gomega.Eventually(ctx, func() error { var err error patch := []map[string]interface{}{{"op": "replace", "path": "/spec/volumeAttributesClassName", "value": name}} patchBytes, _ := json.Marshal(patch) patchedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Patch(ctx, pvcName, types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - framework.ExpectNoError(err, "While patching PVC to add VAC name") - }, timeout, modifyPollInterval).Should(gomega.Succeed()) + return err + }, timeout, modifyPollInterval).Should(gomega.BeNil()) return patchedPVC } diff --git a/test/e2e/windows/eviction.go b/test/e2e/windows/eviction.go index 53bb4deff4a..9531eae49e3 100644 --- a/test/e2e/windows/eviction.go +++ b/test/e2e/windows/eviction.go @@ -178,7 +178,10 @@ var _ = sigDescribe(feature.Windows, "Eviction", framework.WithSerial(), framewo framework.Logf("Waiting for pod2 to get evicted") gomega.Eventually(ctx, func() bool { eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{}) - framework.ExpectNoError(err) + if err != nil { + framework.Logf("Error getting events: %v", err) + return false + } for _, e := range eventList.Items { // Look for an event that shows FailedScheduling if e.Type == "Warning" && e.Reason == "Evicted" && strings.Contains(e.Message, "pod2") { diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index d0e12b4ca25..e88efcf0d72 100644 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -164,7 +164,10 @@ func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework, framework.ExpectNoError(err) gomega.Eventually(ctx, func() bool { eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{}) - framework.ExpectNoError(err) + if err != nil { + framework.Logf("Error getting events: %v", err) + return false + } for _, e := range eventList.Items { // Look for an event that shows FailedScheduling if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePod.ObjectMeta.Name { diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index d6d37fc650d..471c4b69a7b 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -239,7 +239,10 @@ func waitForKubeletToStart(ctx context.Context, f *framework.Framework) { // Wait for the Kubelet to be ready. gomega.Eventually(ctx, func(ctx context.Context) bool { nodes, err := e2enode.TotalReady(ctx, f.ClientSet) - framework.ExpectNoError(err) + if err != nil { + framework.Logf("Error getting ready nodes: %v", err) + return false + } return nodes == 1 }, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state")) }