mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-12-24 16:57:27 +00:00
Merge pull request #120406 from wlq1212/cheanup/framework/timeout
e2e_framework:stop using deprecated wait.ErrwaitTimeout
This commit is contained in:
@@ -524,7 +524,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, mi
|
||||
}
|
||||
})
|
||||
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
framework.Logf("Number of replicas was stable over %v", duration)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
|
||||
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
if err != nil && !wait.Interrupted(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
|
||||
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
if err != nil && !wait.Interrupted(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ func WaitForReadyReplicaSet(ctx context.Context, c clientset.Interface, ns, name
|
||||
}
|
||||
return *(rs.Spec.Replicas) == rs.Status.Replicas && *(rs.Spec.Replicas) == rs.Status.ReadyReplicas, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
err = fmt.Errorf("replicaset %q never became ready", name)
|
||||
}
|
||||
return err
|
||||
@@ -59,7 +59,7 @@ func WaitForReplicaSetTargetAvailableReplicasWithTimeout(ctx context.Context, c
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name)
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -182,7 +182,7 @@ func waitForPodsGone(ctx context.Context, ps *testutils.PodStore, interval, time
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
for _, pod := range pods {
|
||||
framework.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func waitForPodsInactive(ctx context.Context, ps *testutils.PodStore, interval,
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
for _, pod := range activePods {
|
||||
framework.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestReachableHTTPWithRetriableErrorCodes(ctx context.Context, host string,
|
||||
}
|
||||
|
||||
if err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, pollfn); err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
if wait.Interrupted(err) {
|
||||
framework.Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout)
|
||||
} else {
|
||||
framework.Failf("Failed to reach HTTP service through %v:%v: %v", host, port, err)
|
||||
|
||||
Reference in New Issue
Block a user