using wait.PollUntilContextTimeout instead of deprecated wait.Poll for pkg/scheduler

using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/integration/scheduler

using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/e2e/scheduling

using wait.ConditionWithContextFunc for PodScheduled/PodIsGettingEvicted/PodScheduledIn/PodUnschedulable/PodSchedulingError
This commit is contained in:
SataQiu
2023-08-03 22:33:47 +08:00
parent 4f874a224a
commit ef7d404702
15 changed files with 113 additions and 83 deletions

View File

@@ -177,7 +177,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying LimitRange updating is effective")
err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
err = wait.PollUntilContextTimeout(ctx, time.Second*2, time.Second*20, false, func(ctx context.Context) (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
@@ -199,7 +199,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying the LimitRange was deleted")
err = wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) {
err = wait.PollUntilContextTimeout(ctx, time.Second*5, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {

View File

@@ -728,7 +728,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// - if it's less than expected replicas, it denotes its pods are under-preempted
// "*2" means pods of ReplicaSet{1,2} are expected to be only preempted once.
expectedRSPods := []int32{1 * 2, 1 * 2, 1}
err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, false, func(ctx context.Context) (bool, error) {
for i := 0; i < len(podNamesSeen); i++ {
got := atomic.LoadInt32(&podNamesSeen[i])
if got < expectedRSPods[i] {
@@ -905,7 +905,7 @@ func createPod(ctx context.Context, f *framework.Framework, conf pausePodConfig)
// waitForPreemptingWithTimeout verifies if 'pod' is preempting within 'timeout', specifically it checks
// if the 'spec.NodeName' field of preemptor 'pod' has been set.
func waitForPreemptingWithTimeout(ctx context.Context, f *framework.Framework, pod *v1.Pod, timeout time.Duration) {
err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err