test: don't panic during an Eventually retry loop

Signed-off-by: Jack Francis <jackfrancis@gmail.com>
This commit is contained in:
Jack Francis
2024-11-22 13:26:59 -08:00
parent 020c4b7c65
commit d54ff7441e
6 changed files with 20 additions and 11 deletions

View File

@@ -44,10 +44,7 @@ var _ = common.SIGDescribe("Metrics", func() {
ec = f.KubemarkExternalClusterClientSet
gomega.Eventually(ctx, func() error {
grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true)
if err != nil {
framework.ExpectNoError(err, "failed to create metrics grabber")
}
return nil
return err
}, 5*time.Minute, 10*time.Second).Should(gomega.BeNil())
})

View File

@@ -295,7 +295,10 @@ func SetupEnvironmentAndSkipIfNeeded(ctx context.Context, f *framework.Framework
func areGPUsAvailableOnAllSchedulableNodes(ctx context.Context, clientSet clientset.Interface) bool {
framework.Logf("Getting list of Nodes from API server")
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "getting node list")
if err != nil {
framework.Logf("Unexpected error getting node list: %v", err)
return false
}
for _, node := range nodeList.Items {
if node.Spec.Unschedulable {
continue

View File

@@ -274,14 +274,14 @@ func SetPVCVACName(ctx context.Context, origPVC *v1.PersistentVolumeClaim, name
pvcName := origPVC.Name
var patchedPVC *v1.PersistentVolumeClaim
gomega.Eventually(ctx, func(g gomega.Gomega) {
gomega.Eventually(ctx, func() error {
var err error
patch := []map[string]interface{}{{"op": "replace", "path": "/spec/volumeAttributesClassName", "value": name}}
patchBytes, _ := json.Marshal(patch)
patchedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Patch(ctx, pvcName, types.JSONPatchType, patchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err, "While patching PVC to add VAC name")
}, timeout, modifyPollInterval).Should(gomega.Succeed())
return err
}, timeout, modifyPollInterval).Should(gomega.BeNil())
return patchedPVC
}

View File

@@ -178,7 +178,10 @@ var _ = sigDescribe(feature.Windows, "Eviction", framework.WithSerial(), framewo
framework.Logf("Waiting for pod2 to get evicted")
gomega.Eventually(ctx, func() bool {
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if err != nil {
framework.Logf("Error getting events: %v", err)
return false
}
for _, e := range eventList.Items {
// Look for an event that shows FailedScheduling
if e.Type == "Warning" && e.Reason == "Evicted" && strings.Contains(e.Message, "pod2") {

View File

@@ -164,7 +164,10 @@ func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework,
framework.ExpectNoError(err)
gomega.Eventually(ctx, func() bool {
eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if err != nil {
framework.Logf("Error getting events: %v", err)
return false
}
for _, e := range eventList.Items {
// Look for an event that shows FailedScheduling
if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePod.ObjectMeta.Name {

View File

@@ -239,7 +239,10 @@ func waitForKubeletToStart(ctx context.Context, f *framework.Framework) {
// Wait for the Kubelet to be ready.
gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err)
if err != nil {
framework.Logf("Error getting ready nodes: %v", err)
return false
}
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
}