mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Assert on all conditions in the Pod Failure policy tests
This commit is contained in:
		@@ -240,9 +240,6 @@ type jobInitialStatus struct {
 | 
			
		||||
 | 
			
		||||
func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
	_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
	jobConditionComplete := batch.JobComplete
 | 
			
		||||
	jobConditionFailed := batch.JobFailed
 | 
			
		||||
	jobConditionSuspended := batch.JobSuspended
 | 
			
		||||
	referenceTime := time.Now()
 | 
			
		||||
 | 
			
		||||
	testCases := map[string]struct {
 | 
			
		||||
@@ -285,9 +282,7 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
		expectedCompletedIdxs  string
 | 
			
		||||
		expectedFailed         int32
 | 
			
		||||
		expectedTerminating    *int32
 | 
			
		||||
		expectedCondition       *batch.JobConditionType
 | 
			
		||||
		expectedConditionStatus v1.ConditionStatus
 | 
			
		||||
		expectedConditionReason string
 | 
			
		||||
		expectedConditions     []batch.JobCondition
 | 
			
		||||
		expectedCreatedIndexes sets.Set[int]
 | 
			
		||||
		expectedPodPatches     int
 | 
			
		||||
 | 
			
		||||
@@ -593,8 +588,12 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			backoffLimit:      6,
 | 
			
		||||
			succeededPods:     5,
 | 
			
		||||
			expectedSucceeded: 5,
 | 
			
		||||
			expectedCondition:       &jobConditionComplete,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:   batch.JobComplete,
 | 
			
		||||
					Status: v1.ConditionTrue,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 5,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -615,8 +614,12 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			backoffLimit:      6,
 | 
			
		||||
			succeededPods:     2,
 | 
			
		||||
			expectedSucceeded: 2,
 | 
			
		||||
			expectedCondition:       &jobConditionComplete,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:   batch.JobComplete,
 | 
			
		||||
					Status: v1.ConditionTrue,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 2,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -628,8 +631,12 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			failedPods:        1,
 | 
			
		||||
			expectedSucceeded: 1,
 | 
			
		||||
			expectedFailed:    1,
 | 
			
		||||
			expectedCondition:       &jobConditionComplete,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:   batch.JobComplete,
 | 
			
		||||
					Status: v1.ConditionTrue,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 2,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -694,9 +701,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			deleting:       true,
 | 
			
		||||
			failedPods:     1,
 | 
			
		||||
			expectedFailed: 1,
 | 
			
		||||
			expectedCondition:       &jobConditionFailed,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "BackoffLimitExceeded",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 1,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -763,8 +775,12 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedSucceeded:     3,
 | 
			
		||||
			expectedFailed:        1,
 | 
			
		||||
			expectedCompletedIdxs: "0-2",
 | 
			
		||||
			expectedCondition:       &jobConditionComplete,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:   batch.JobComplete,
 | 
			
		||||
					Status: v1.ConditionTrue,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 4,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -781,9 +797,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedSucceeded:     1,
 | 
			
		||||
			expectedFailed:        2,
 | 
			
		||||
			expectedCompletedIdxs: "0",
 | 
			
		||||
			expectedCondition:       &jobConditionFailed,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "BackoffLimitExceeded",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 3,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
			expectedDeletions:  1,
 | 
			
		||||
@@ -802,9 +823,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedSucceeded:       1,
 | 
			
		||||
			expectedFailed:          2,
 | 
			
		||||
			expectedCompletedIdxs:   "0",
 | 
			
		||||
			expectedCondition:       &jobConditionFailed,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "BackoffLimitExceeded",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches:  3,
 | 
			
		||||
			expectedReady:       ptr.To[int32](0),
 | 
			
		||||
			expectedDeletions:   1,
 | 
			
		||||
@@ -818,9 +844,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			readyPods:      2,
 | 
			
		||||
			failedPods:     1,
 | 
			
		||||
			expectedFailed: 3,
 | 
			
		||||
			expectedCondition:       &jobConditionFailed,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "BackoffLimitExceeded",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 3,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
			expectedDeletions:  2,
 | 
			
		||||
@@ -962,9 +993,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedCreations: 0,
 | 
			
		||||
			expectedDeletions: 2,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedCondition:       &jobConditionSuspended,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "JobSuspended",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobSuspended,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  "JobSuspended",
 | 
			
		||||
					Message: "Job suspended",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 2,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -980,9 +1016,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedCreations:       0,
 | 
			
		||||
			expectedDeletions:       2,
 | 
			
		||||
			expectedActive:          0,
 | 
			
		||||
			expectedCondition:       &jobConditionSuspended,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "JobSuspended",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobSuspended,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  "JobSuspended",
 | 
			
		||||
					Message: "Job suspended",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches:  2,
 | 
			
		||||
			expectedReady:       ptr.To[int32](0),
 | 
			
		||||
			expectedTerminating: ptr.To[int32](2),
 | 
			
		||||
@@ -997,9 +1038,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedCreations: 0,
 | 
			
		||||
			expectedDeletions: 2,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedCondition:       &jobConditionSuspended,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionTrue,
 | 
			
		||||
			expectedConditionReason: "JobSuspended",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobSuspended,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  "JobSuspended",
 | 
			
		||||
					Message: "Job suspended",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedPodPatches: 2,
 | 
			
		||||
			expectedReady:      ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
@@ -1055,9 +1101,14 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			expectedCreations: 2,
 | 
			
		||||
			expectedDeletions: 0,
 | 
			
		||||
			expectedActive:    2,
 | 
			
		||||
			expectedCondition:       &jobConditionSuspended,
 | 
			
		||||
			expectedConditionStatus: v1.ConditionFalse,
 | 
			
		||||
			expectedConditionReason: "JobResumed",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobSuspended,
 | 
			
		||||
					Status:  v1.ConditionFalse,
 | 
			
		||||
					Reason:  "JobResumed",
 | 
			
		||||
					Message: "Job resumed",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedReady: ptr.To[int32](0),
 | 
			
		||||
		},
 | 
			
		||||
		"suspending a deleted job": {
 | 
			
		||||
@@ -1181,8 +1232,7 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// run
 | 
			
		||||
			err = manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
 | 
			
		||||
			err = manager.syncJob(ctx, testutil.GetKey(job, t))
 | 
			
		||||
			// We need requeue syncJob task if podController error
 | 
			
		||||
			if tc.podControllerError != nil {
 | 
			
		||||
				if err == nil {
 | 
			
		||||
@@ -1263,17 +1313,8 @@ func TestControllerSyncJob(t *testing.T) {
 | 
			
		||||
				t.Error("Missing .status.startTime")
 | 
			
		||||
			}
 | 
			
		||||
			// validate conditions
 | 
			
		||||
			if tc.expectedCondition != nil {
 | 
			
		||||
				if !getCondition(actual, *tc.expectedCondition, tc.expectedConditionStatus, tc.expectedConditionReason) {
 | 
			
		||||
					t.Errorf("Expected completion condition.  Got %#v", actual.Status.Conditions)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if cond := hasTrueCondition(actual); cond != nil {
 | 
			
		||||
					t.Errorf("Got condition %s, want none", *cond)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if tc.expectedCondition == nil && tc.suspend && len(actual.Status.Conditions) != 0 {
 | 
			
		||||
				t.Errorf("Unexpected conditions %v", actual.Status.Conditions)
 | 
			
		||||
			if diff := cmp.Diff(tc.expectedConditions, actual.Status.Conditions, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
				t.Errorf("unexpected conditions (-want,+got):\n%s", diff)
 | 
			
		||||
			}
 | 
			
		||||
			// validate slow start
 | 
			
		||||
			expectedLimit := 0
 | 
			
		||||
@@ -2177,8 +2218,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
		expectedActive     int32
 | 
			
		||||
		expectedSucceeded  int32
 | 
			
		||||
		expectedFailed     int32
 | 
			
		||||
		expectedCondition       batch.JobConditionType
 | 
			
		||||
		expectedConditionReason string
 | 
			
		||||
		expectedConditions []batch.JobCondition
 | 
			
		||||
	}{
 | 
			
		||||
		"activeDeadlineSeconds less than single pod execution": {
 | 
			
		||||
			parallelism:           1,
 | 
			
		||||
@@ -2189,8 +2229,14 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
			activePods:            1,
 | 
			
		||||
			expectedDeletions:     1,
 | 
			
		||||
			expectedFailed:        1,
 | 
			
		||||
			expectedCondition:       batch.JobFailed,
 | 
			
		||||
			expectedConditionReason: batch.JobReasonDeadlineExceeded,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonDeadlineExceeded,
 | 
			
		||||
					Message: "Job was active longer than specified deadline",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"activeDeadlineSeconds bigger than single pod execution": {
 | 
			
		||||
			parallelism:           1,
 | 
			
		||||
@@ -2203,8 +2249,14 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
			expectedDeletions:     1,
 | 
			
		||||
			expectedSucceeded:     1,
 | 
			
		||||
			expectedFailed:        1,
 | 
			
		||||
			expectedCondition:       batch.JobFailed,
 | 
			
		||||
			expectedConditionReason: batch.JobReasonDeadlineExceeded,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonDeadlineExceeded,
 | 
			
		||||
					Message: "Job was active longer than specified deadline",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"activeDeadlineSeconds times-out before any pod starts": {
 | 
			
		||||
			parallelism:           1,
 | 
			
		||||
@@ -2212,8 +2264,14 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
			activeDeadlineSeconds: 10,
 | 
			
		||||
			startTime:             10,
 | 
			
		||||
			backoffLimit:          6,
 | 
			
		||||
			expectedCondition:       batch.JobFailed,
 | 
			
		||||
			expectedConditionReason: batch.JobReasonDeadlineExceeded,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonDeadlineExceeded,
 | 
			
		||||
					Message: "Job was active longer than specified deadline",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"activeDeadlineSeconds with backofflimit reach": {
 | 
			
		||||
			parallelism:           1,
 | 
			
		||||
@@ -2222,8 +2280,14 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
			startTime:             10,
 | 
			
		||||
			failedPods:            1,
 | 
			
		||||
			expectedFailed:        1,
 | 
			
		||||
			expectedCondition:       batch.JobFailed,
 | 
			
		||||
			expectedConditionReason: batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"activeDeadlineSeconds is not triggered when Job is suspended": {
 | 
			
		||||
			suspend:               true,
 | 
			
		||||
@@ -2232,8 +2296,14 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
			activeDeadlineSeconds: 10,
 | 
			
		||||
			startTime:             15,
 | 
			
		||||
			backoffLimit:          6,
 | 
			
		||||
			expectedCondition:       batch.JobSuspended,
 | 
			
		||||
			expectedConditionReason: "JobSuspended",
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobSuspended,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  "JobSuspended",
 | 
			
		||||
					Message: "Job suspended",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -2263,7 +2333,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
			setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods, 0, 0)
 | 
			
		||||
 | 
			
		||||
			// run
 | 
			
		||||
			err := manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
			err := manager.syncJob(ctx, testutil.GetKey(job, t))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Unexpected error when syncing jobs %v", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -2288,8 +2358,8 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
			
		||||
				t.Error("Missing .status.startTime")
 | 
			
		||||
			}
 | 
			
		||||
			// validate conditions
 | 
			
		||||
			if !getCondition(actual, tc.expectedCondition, v1.ConditionTrue, tc.expectedConditionReason) {
 | 
			
		||||
				t.Errorf("Expected fail condition.  Got %#v", actual.Status.Conditions)
 | 
			
		||||
			if diff := cmp.Diff(tc.expectedConditions, actual.Status.Conditions, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
				t.Errorf("unexpected conditions (-want,+got):\n%s", diff)
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
@@ -2304,15 +2374,6 @@ func getCondition(job *batch.Job, condition batch.JobConditionType, status v1.Co
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func hasTrueCondition(job *batch.Job) *batch.JobConditionType {
 | 
			
		||||
	for _, v := range job.Status.Conditions {
 | 
			
		||||
		if v.Status == v1.ConditionTrue {
 | 
			
		||||
			return &v.Type
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TestPastDeadlineJobFinished ensures that a Job is correctly tracked until
 | 
			
		||||
// reaching the active deadline, at which point it is marked as Failed.
 | 
			
		||||
func TestPastDeadlineJobFinished(t *testing.T) {
 | 
			
		||||
@@ -2415,7 +2476,7 @@ func TestSingleJobFailedCondition(t *testing.T) {
 | 
			
		||||
	job.Status.StartTime = &start
 | 
			
		||||
	job.Status.Conditions = append(job.Status.Conditions, *newCondition(batch.JobFailed, v1.ConditionFalse, "DeadlineExceeded", "Job was active longer than specified deadline", realClock.Now()))
 | 
			
		||||
	sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
 | 
			
		||||
	err := manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
	err := manager.syncJob(ctx, testutil.GetKey(job, t))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("Unexpected error when syncing jobs %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -2447,7 +2508,7 @@ func TestSyncJobComplete(t *testing.T) {
 | 
			
		||||
	job := newJob(1, 1, 6, batch.NonIndexedCompletion)
 | 
			
		||||
	job.Status.Conditions = append(job.Status.Conditions, *newCondition(batch.JobComplete, v1.ConditionTrue, "", "", realClock.Now()))
 | 
			
		||||
	sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
 | 
			
		||||
	err := manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
	err := manager.syncJob(ctx, testutil.GetKey(job, t))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Fatalf("Unexpected error when syncing jobs %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -2473,9 +2534,8 @@ func TestSyncJobDeleted(t *testing.T) {
 | 
			
		||||
		return job, nil
 | 
			
		||||
	}
 | 
			
		||||
	job := newJob(2, 2, 6, batch.NonIndexedCompletion)
 | 
			
		||||
	err := manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("Unexpected error when syncing jobs %v", err)
 | 
			
		||||
	if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
 | 
			
		||||
		t.Fatalf("error %v while reconciling the job %v", err, testutil.GetKey(job, t))
 | 
			
		||||
	}
 | 
			
		||||
	if len(fakePodControl.Templates) != 0 {
 | 
			
		||||
		t.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
 | 
			
		||||
@@ -2653,7 +2713,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
		enableJobPodReplacementPolicy bool
 | 
			
		||||
		job                           batch.Job
 | 
			
		||||
		pods                          []v1.Pod
 | 
			
		||||
		wantConditions                *[]batch.JobCondition
 | 
			
		||||
		wantConditions                []batch.JobCondition
 | 
			
		||||
		wantStatusFailed              int32
 | 
			
		||||
		wantStatusActive              int32
 | 
			
		||||
		wantStatusSucceeded           int32
 | 
			
		||||
@@ -2794,7 +2854,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -2849,7 +2915,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -2904,7 +2976,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container main-container for pod default/already-deleted-pod failed with exit code 5 matching FailJob rule at index 1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -2993,7 +3071,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container main-container for pod default/mypod-1 failed with exit code 5 matching FailJob rule at index 1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -3039,7 +3123,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container main-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -3092,7 +3182,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container main-container for pod default/mypod-0 failed with exit code 42 matching FailJob rule at index 0",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -3194,7 +3290,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Container init-container for pod default/mypod-0 failed with exit code 5 matching FailJob rule at index 1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -3321,7 +3423,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -3582,7 +3684,13 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			wantConditions: &[]batch.JobCondition{
 | 
			
		||||
			wantConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailureTarget,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonPodFailurePolicy,
 | 
			
		||||
					Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
@@ -3698,23 +3806,12 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
			
		||||
				sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pb.Pod)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
			if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
 | 
			
		||||
				t.Fatalf("error %v while reconciling the job %v", err, testutil.GetKey(job, t))
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if tc.wantConditions != nil {
 | 
			
		||||
				for _, wantCondition := range *tc.wantConditions {
 | 
			
		||||
					conditions := getConditionsByType(actual.Status.Conditions, wantCondition.Type)
 | 
			
		||||
					if len(conditions) != 1 {
 | 
			
		||||
						t.Fatalf("Expected a single completion condition. Got %#v for type: %q", conditions, wantCondition.Type)
 | 
			
		||||
					}
 | 
			
		||||
					condition := *conditions[0]
 | 
			
		||||
					if diff := cmp.Diff(wantCondition, condition, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
						t.Errorf("Unexpected job condition (-want,+got):\n%s", diff)
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if cond := hasTrueCondition(actual); cond != nil {
 | 
			
		||||
					t.Errorf("Got condition %s, want none", *cond)
 | 
			
		||||
				}
 | 
			
		||||
			if diff := cmp.Diff(tc.wantConditions, actual.Status.Conditions, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
				t.Errorf("unexpected job conditions (-want,+got):\n%s", diff)
 | 
			
		||||
			}
 | 
			
		||||
			// validate status
 | 
			
		||||
			if actual.Status.Active != tc.wantStatusActive {
 | 
			
		||||
@@ -4653,13 +4750,13 @@ func TestSyncJobWithJobSuccessPolicy(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
 | 
			
		||||
				t.Fatalf("Failed to complete syncJob: %v", err)
 | 
			
		||||
				t.Fatalf("failed to complete syncJob: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if diff := cmp.Diff(tc.wantStatus, actual.Status,
 | 
			
		||||
				cmpopts.IgnoreFields(batch.JobStatus{}, "StartTime", "CompletionTime", "Ready"),
 | 
			
		||||
				cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
				t.Errorf("Unexpectd Job status (-want,+got):\n%s", diff)
 | 
			
		||||
				t.Errorf("unexpected Job status (-want,+got):\n%s", diff)
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
@@ -5142,7 +5239,9 @@ func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
 | 
			
		||||
				sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pb.Pod)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
			if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
 | 
			
		||||
				t.Fatalf("error %v while reconciling the job %v", err, testutil.GetKey(job, t))
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// validate relevant fields of the status
 | 
			
		||||
			if diff := cmp.Diff(tc.wantStatus, actual.Status,
 | 
			
		||||
@@ -5816,7 +5915,9 @@ func TestSyncJobExpectations(t *testing.T) {
 | 
			
		||||
			podIndexer.Add(pods[1])
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
	if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
 | 
			
		||||
		t.Fatalf("error %v while reconciling the job %v", err, testutil.GetKey(job, t))
 | 
			
		||||
	}
 | 
			
		||||
	if len(fakePodControl.Templates) != 0 {
 | 
			
		||||
		t.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
 | 
			
		||||
	}
 | 
			
		||||
@@ -6281,9 +6382,6 @@ func TestJobBackoff(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
func TestJobBackoffForOnFailure(t *testing.T) {
 | 
			
		||||
	_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
	jobConditionComplete := batch.JobComplete
 | 
			
		||||
	jobConditionFailed := batch.JobFailed
 | 
			
		||||
	jobConditionSuspended := batch.JobSuspended
 | 
			
		||||
 | 
			
		||||
	testCases := map[string]struct {
 | 
			
		||||
		// job setup
 | 
			
		||||
@@ -6300,63 +6398,193 @@ func TestJobBackoffForOnFailure(t *testing.T) {
 | 
			
		||||
		expectedActive     int32
 | 
			
		||||
		expectedSucceeded  int32
 | 
			
		||||
		expectedFailed     int32
 | 
			
		||||
		expectedCondition       *batch.JobConditionType
 | 
			
		||||
		expectedConditionReason string
 | 
			
		||||
		expectedConditions []batch.JobCondition
 | 
			
		||||
	}{
 | 
			
		||||
		"backoffLimit 0 should have 1 pod active": {
 | 
			
		||||
			1, 1, 0,
 | 
			
		||||
			false, []int32{0}, v1.PodRunning,
 | 
			
		||||
			1, 0, 0, nil, "",
 | 
			
		||||
			parallelism:        1,
 | 
			
		||||
			completions:        1,
 | 
			
		||||
			backoffLimit:       0,
 | 
			
		||||
			suspend:            false,
 | 
			
		||||
			restartCounts:      []int32{0},
 | 
			
		||||
			podPhase:           v1.PodRunning,
 | 
			
		||||
			expectedActive:     1,
 | 
			
		||||
			expectedSucceeded:  0,
 | 
			
		||||
			expectedFailed:     0,
 | 
			
		||||
			expectedConditions: nil,
 | 
			
		||||
		},
 | 
			
		||||
		"backoffLimit 1 with restartCount 0 should have 1 pod active": {
 | 
			
		||||
			1, 1, 1,
 | 
			
		||||
			false, []int32{0}, v1.PodRunning,
 | 
			
		||||
			1, 0, 0, nil, "",
 | 
			
		||||
			parallelism:        1,
 | 
			
		||||
			completions:        1,
 | 
			
		||||
			backoffLimit:       1,
 | 
			
		||||
			suspend:            false,
 | 
			
		||||
			restartCounts:      []int32{0},
 | 
			
		||||
			podPhase:           v1.PodRunning,
 | 
			
		||||
			expectedActive:     1,
 | 
			
		||||
			expectedSucceeded:  0,
 | 
			
		||||
			expectedFailed:     0,
 | 
			
		||||
			expectedConditions: nil,
 | 
			
		||||
		},
 | 
			
		||||
		"backoffLimit 1 with restartCount 1 and podRunning should have 0 pod active": {
 | 
			
		||||
			1, 1, 1,
 | 
			
		||||
			false, []int32{1}, v1.PodRunning,
 | 
			
		||||
			0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       1,
 | 
			
		||||
			completions:       1,
 | 
			
		||||
			backoffLimit:      1,
 | 
			
		||||
			suspend:           false,
 | 
			
		||||
			restartCounts:     []int32{1},
 | 
			
		||||
			podPhase:          v1.PodRunning,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    1,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"backoffLimit 1 with restartCount 1 and podPending should have 0 pod active": {
 | 
			
		||||
			1, 1, 1,
 | 
			
		||||
			false, []int32{1}, v1.PodPending,
 | 
			
		||||
			0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       1,
 | 
			
		||||
			completions:       1,
 | 
			
		||||
			backoffLimit:      1,
 | 
			
		||||
			suspend:           false,
 | 
			
		||||
			restartCounts:     []int32{1},
 | 
			
		||||
			podPhase:          v1.PodPending,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    1,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"too many job failures with podRunning - single pod": {
 | 
			
		||||
			1, 5, 2,
 | 
			
		||||
			false, []int32{2}, v1.PodRunning,
 | 
			
		||||
			0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       1,
 | 
			
		||||
			completions:       5,
 | 
			
		||||
			backoffLimit:      2,
 | 
			
		||||
			suspend:           false,
 | 
			
		||||
			restartCounts:     []int32{2},
 | 
			
		||||
			podPhase:          v1.PodRunning,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    1,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"too many job failures with podPending - single pod": {
 | 
			
		||||
			1, 5, 2,
 | 
			
		||||
			false, []int32{2}, v1.PodPending,
 | 
			
		||||
			0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       1,
 | 
			
		||||
			completions:       5,
 | 
			
		||||
			backoffLimit:      2,
 | 
			
		||||
			suspend:           false,
 | 
			
		||||
			restartCounts:     []int32{2},
 | 
			
		||||
			podPhase:          v1.PodPending,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    1,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"too many job failures with podRunning - multiple pods": {
 | 
			
		||||
			2, 5, 2,
 | 
			
		||||
			false, []int32{1, 1}, v1.PodRunning,
 | 
			
		||||
			0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       2,
 | 
			
		||||
			completions:       5,
 | 
			
		||||
			backoffLimit:      2,
 | 
			
		||||
			suspend:           false,
 | 
			
		||||
			restartCounts:     []int32{1, 1},
 | 
			
		||||
			podPhase:          v1.PodRunning,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    2,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"too many job failures with podPending - multiple pods": {
 | 
			
		||||
			2, 5, 2,
 | 
			
		||||
			false, []int32{1, 1}, v1.PodPending,
 | 
			
		||||
			0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       2,
 | 
			
		||||
			completions:       5,
 | 
			
		||||
			backoffLimit:      2,
 | 
			
		||||
			suspend:           false,
 | 
			
		||||
			restartCounts:     []int32{1, 1},
 | 
			
		||||
			podPhase:          v1.PodPending,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    2,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"not enough failures": {
 | 
			
		||||
			2, 5, 3,
 | 
			
		||||
			false, []int32{1, 1}, v1.PodRunning,
 | 
			
		||||
			2, 0, 0, nil, "",
 | 
			
		||||
			parallelism:        2,
 | 
			
		||||
			completions:        5,
 | 
			
		||||
			backoffLimit:       3,
 | 
			
		||||
			suspend:            false,
 | 
			
		||||
			restartCounts:      []int32{1, 1},
 | 
			
		||||
			podPhase:           v1.PodRunning,
 | 
			
		||||
			expectedActive:     2,
 | 
			
		||||
			expectedSucceeded:  0,
 | 
			
		||||
			expectedFailed:     0,
 | 
			
		||||
			expectedConditions: nil,
 | 
			
		||||
		},
 | 
			
		||||
		"suspending a job": {
 | 
			
		||||
			2, 4, 6,
 | 
			
		||||
			true, []int32{1, 1}, v1.PodRunning,
 | 
			
		||||
			0, 0, 0, &jobConditionSuspended, "JobSuspended",
 | 
			
		||||
			parallelism:       2,
 | 
			
		||||
			completions:       4,
 | 
			
		||||
			backoffLimit:      6,
 | 
			
		||||
			suspend:           true,
 | 
			
		||||
			restartCounts:     []int32{1, 1},
 | 
			
		||||
			podPhase:          v1.PodRunning,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    0,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobSuspended,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  "JobSuspended",
 | 
			
		||||
					Message: "Job suspended",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"finished job": {
 | 
			
		||||
			parallelism:       2,
 | 
			
		||||
			completions:       4,
 | 
			
		||||
			backoffLimit:      6,
 | 
			
		||||
			suspend:           true,
 | 
			
		||||
			restartCounts:     []int32{1, 1, 2, 0},
 | 
			
		||||
			podPhase:          v1.PodSucceeded,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 4,
 | 
			
		||||
			expectedFailed:    0,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:   batch.JobComplete,
 | 
			
		||||
					Status: v1.ConditionTrue,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		"finshed job": {
 | 
			
		||||
			2, 4, 6,
 | 
			
		||||
			true, []int32{1, 1, 2, 0}, v1.PodSucceeded,
 | 
			
		||||
			0, 4, 0, &jobConditionComplete, "",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -6387,9 +6615,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// run
 | 
			
		||||
			err := manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
 | 
			
		||||
			if err != nil {
 | 
			
		||||
			if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
 | 
			
		||||
				t.Errorf("unexpected error syncing job.  Got %#v", err)
 | 
			
		||||
			}
 | 
			
		||||
			// validate status
 | 
			
		||||
@@ -6403,8 +6629,8 @@ func TestJobBackoffForOnFailure(t *testing.T) {
 | 
			
		||||
				t.Errorf("unexpected number of failed pods.  Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
 | 
			
		||||
			}
 | 
			
		||||
			// validate conditions
 | 
			
		||||
			if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, v1.ConditionTrue, tc.expectedConditionReason) {
 | 
			
		||||
				t.Errorf("expected completion condition.  Got %#v", actual.Status.Conditions)
 | 
			
		||||
			if diff := cmp.Diff(tc.expectedConditions, actual.Status.Conditions, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
				t.Errorf("unexpected conditions (-want,+got):\n%s", diff)
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
@@ -6412,7 +6638,6 @@ func TestJobBackoffForOnFailure(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
func TestJobBackoffOnRestartPolicyNever(t *testing.T) {
 | 
			
		||||
	_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
	jobConditionFailed := batch.JobFailed
 | 
			
		||||
 | 
			
		||||
	testCases := map[string]struct {
 | 
			
		||||
		// job setup
 | 
			
		||||
@@ -6429,33 +6654,81 @@ func TestJobBackoffOnRestartPolicyNever(t *testing.T) {
 | 
			
		||||
		expectedActive     int32
 | 
			
		||||
		expectedSucceeded  int32
 | 
			
		||||
		expectedFailed     int32
 | 
			
		||||
		expectedCondition       *batch.JobConditionType
 | 
			
		||||
		expectedConditionReason string
 | 
			
		||||
		expectedConditions []batch.JobCondition
 | 
			
		||||
	}{
 | 
			
		||||
		"not enough failures with backoffLimit 0 - single pod": {
 | 
			
		||||
			1, 1, 0,
 | 
			
		||||
			v1.PodRunning, 1, 0,
 | 
			
		||||
			1, 0, 0, nil, "",
 | 
			
		||||
			parallelism:        1,
 | 
			
		||||
			completions:        1,
 | 
			
		||||
			backoffLimit:       0,
 | 
			
		||||
			activePodsPhase:    v1.PodRunning,
 | 
			
		||||
			activePods:         1,
 | 
			
		||||
			failedPods:         0,
 | 
			
		||||
			expectedActive:     1,
 | 
			
		||||
			expectedSucceeded:  0,
 | 
			
		||||
			expectedFailed:     0,
 | 
			
		||||
			expectedConditions: nil,
 | 
			
		||||
		},
 | 
			
		||||
		"not enough failures with backoffLimit 1 - single pod": {
 | 
			
		||||
			1, 1, 1,
 | 
			
		||||
			"", 0, 1,
 | 
			
		||||
			1, 0, 1, nil, "",
 | 
			
		||||
			parallelism:        1,
 | 
			
		||||
			completions:        1,
 | 
			
		||||
			backoffLimit:       1,
 | 
			
		||||
			activePodsPhase:    "",
 | 
			
		||||
			activePods:         0,
 | 
			
		||||
			failedPods:         1,
 | 
			
		||||
			expectedActive:     1,
 | 
			
		||||
			expectedSucceeded:  0,
 | 
			
		||||
			expectedFailed:     1,
 | 
			
		||||
			expectedConditions: nil,
 | 
			
		||||
		},
 | 
			
		||||
		"too many failures with backoffLimit 1 - single pod": {
 | 
			
		||||
			1, 1, 1,
 | 
			
		||||
			"", 0, 2,
 | 
			
		||||
			0, 0, 2, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       1,
 | 
			
		||||
			completions:       1,
 | 
			
		||||
			backoffLimit:      1,
 | 
			
		||||
			activePodsPhase:   "",
 | 
			
		||||
			activePods:        0,
 | 
			
		||||
			failedPods:        2,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    2,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		"not enough failures with backoffLimit 6 - multiple pods": {
 | 
			
		||||
			2, 2, 6,
 | 
			
		||||
			v1.PodRunning, 1, 6,
 | 
			
		||||
			2, 0, 6, nil, "",
 | 
			
		||||
			parallelism:        2,
 | 
			
		||||
			completions:        2,
 | 
			
		||||
			backoffLimit:       6,
 | 
			
		||||
			activePodsPhase:    v1.PodRunning,
 | 
			
		||||
			activePods:         1,
 | 
			
		||||
			failedPods:         6,
 | 
			
		||||
			expectedActive:     2,
 | 
			
		||||
			expectedSucceeded:  0,
 | 
			
		||||
			expectedFailed:     6,
 | 
			
		||||
			expectedConditions: nil,
 | 
			
		||||
		},
 | 
			
		||||
		"too many failures with backoffLimit 6 - multiple pods": {
 | 
			
		||||
			2, 2, 6,
 | 
			
		||||
			"", 0, 7,
 | 
			
		||||
			0, 0, 7, &jobConditionFailed, "BackoffLimitExceeded",
 | 
			
		||||
			parallelism:       2,
 | 
			
		||||
			completions:       2,
 | 
			
		||||
			backoffLimit:      6,
 | 
			
		||||
			activePodsPhase:   "",
 | 
			
		||||
			activePods:        0,
 | 
			
		||||
			failedPods:        7,
 | 
			
		||||
			expectedActive:    0,
 | 
			
		||||
			expectedSucceeded: 0,
 | 
			
		||||
			expectedFailed:    7,
 | 
			
		||||
			expectedConditions: []batch.JobCondition{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    batch.JobFailed,
 | 
			
		||||
					Status:  v1.ConditionTrue,
 | 
			
		||||
					Reason:  batch.JobReasonBackoffLimitExceeded,
 | 
			
		||||
					Message: "Job has reached the specified backoff limit",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -6490,7 +6763,7 @@ func TestJobBackoffOnRestartPolicyNever(t *testing.T) {
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// run
 | 
			
		||||
			err := manager.syncJob(context.TODO(), testutil.GetKey(job, t))
 | 
			
		||||
			err := manager.syncJob(ctx, testutil.GetKey(job, t))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("unexpected error syncing job: %#v\n", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -6505,8 +6778,8 @@ func TestJobBackoffOnRestartPolicyNever(t *testing.T) {
 | 
			
		||||
				t.Errorf("unexpected number of failed pods. Expected %d, saw %d\n", tc.expectedFailed, actual.Status.Failed)
 | 
			
		||||
			}
 | 
			
		||||
			// validate conditions
 | 
			
		||||
			if tc.expectedCondition != nil && !getCondition(actual, *tc.expectedCondition, v1.ConditionTrue, tc.expectedConditionReason) {
 | 
			
		||||
				t.Errorf("expected completion condition. Got %#v", actual.Status.Conditions)
 | 
			
		||||
			if diff := cmp.Diff(tc.expectedConditions, actual.Status.Conditions, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
			
		||||
				t.Errorf("unexpected conditions (-want,+got):\n%s", diff)
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
@@ -6621,7 +6894,9 @@ func TestFinalizersRemovedExpectations(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	jobKey := testutil.GetKey(job, t)
 | 
			
		||||
 | 
			
		||||
	manager.syncJob(context.TODO(), jobKey)
 | 
			
		||||
	if err := manager.syncJob(ctx, jobKey); err == nil {
 | 
			
		||||
		t.Fatal("missing error as the podControl is mocked to error")
 | 
			
		||||
	}
 | 
			
		||||
	gotExpectedUIDs := manager.finalizerExpectations.getExpectedUIDs(jobKey)
 | 
			
		||||
	if len(gotExpectedUIDs) != 0 {
 | 
			
		||||
		t.Errorf("Got unwanted expectations for removed finalizers after first syncJob with client failures:\n%s", sets.List(gotExpectedUIDs))
 | 
			
		||||
@@ -6629,7 +6904,9 @@ func TestFinalizersRemovedExpectations(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Remove failures and re-sync.
 | 
			
		||||
	manager.podControl.(*controller.FakePodControl).Err = nil
 | 
			
		||||
	manager.syncJob(context.TODO(), jobKey)
 | 
			
		||||
	if err := manager.syncJob(ctx, jobKey); err != nil {
 | 
			
		||||
		t.Fatalf("unexpected error syncing job: %#v\n", err)
 | 
			
		||||
	}
 | 
			
		||||
	gotExpectedUIDs = manager.finalizerExpectations.getExpectedUIDs(jobKey)
 | 
			
		||||
	if diff := cmp.Diff(uids, gotExpectedUIDs); diff != "" {
 | 
			
		||||
		t.Errorf("Different expectations for removed finalizers after syncJob (-want,+got):\n%s", diff)
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user