mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Graduate PodDisruptionConditions to stable
This commit is contained in:
		@@ -2709,7 +2709,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	testCases := map[string]struct {
 | 
						testCases := map[string]struct {
 | 
				
			||||||
		enableJobPodFailurePolicy     bool
 | 
							enableJobPodFailurePolicy     bool
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
		enableJobPodReplacementPolicy bool
 | 
							enableJobPodReplacementPolicy bool
 | 
				
			||||||
		job                           batch.Job
 | 
							job                           batch.Job
 | 
				
			||||||
		pods                          []v1.Pod
 | 
							pods                          []v1.Pod
 | 
				
			||||||
@@ -3736,8 +3735,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"terminating Pod not considered failed when PodDisruptionConditions is enabled": {
 | 
							"terminating Pod not considered failed when PodDisruptionConditions is enabled": {
 | 
				
			||||||
			enableJobPodFailurePolicy:     true,
 | 
								enableJobPodFailurePolicy: true,
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			job: batch.Job{
 | 
								job: batch.Job{
 | 
				
			||||||
				TypeMeta:   metav1.TypeMeta{Kind: "Job"},
 | 
									TypeMeta:   metav1.TypeMeta{Kind: "Job"},
 | 
				
			||||||
				ObjectMeta: validObjectMeta,
 | 
									ObjectMeta: validObjectMeta,
 | 
				
			||||||
@@ -3776,7 +3774,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
 | 
				
			|||||||
	for name, tc := range testCases {
 | 
						for name, tc := range testCases {
 | 
				
			||||||
		t.Run(name, func(t *testing.T) {
 | 
							t.Run(name, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if tc.job.Spec.PodReplacementPolicy == nil {
 | 
								if tc.job.Spec.PodReplacementPolicy == nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,20 +31,17 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
						"k8s.io/apimachinery/pkg/util/sets"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/strategicpatch"
 | 
						"k8s.io/apimachinery/pkg/util/strategicpatch"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
					 | 
				
			||||||
	"k8s.io/client-go/informers"
 | 
						"k8s.io/client-go/informers"
 | 
				
			||||||
	coreinformers "k8s.io/client-go/informers/core/v1"
 | 
						coreinformers "k8s.io/client-go/informers/core/v1"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/client-go/kubernetes/fake"
 | 
						"k8s.io/client-go/kubernetes/fake"
 | 
				
			||||||
	clienttesting "k8s.io/client-go/testing"
 | 
						clienttesting "k8s.io/client-go/testing"
 | 
				
			||||||
	"k8s.io/client-go/util/workqueue"
 | 
						"k8s.io/client-go/util/workqueue"
 | 
				
			||||||
	featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
					 | 
				
			||||||
	metricstestutil "k8s.io/component-base/metrics/testutil"
 | 
						metricstestutil "k8s.io/component-base/metrics/testutil"
 | 
				
			||||||
	"k8s.io/klog/v2/ktesting"
 | 
						"k8s.io/klog/v2/ktesting"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/podgc/metrics"
 | 
						"k8s.io/kubernetes/pkg/controller/podgc/metrics"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/testutil"
 | 
						"k8s.io/kubernetes/pkg/controller/testutil"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/eviction"
 | 
						"k8s.io/kubernetes/pkg/kubelet/eviction"
 | 
				
			||||||
	testingclock "k8s.io/utils/clock/testing"
 | 
						testingclock "k8s.io/utils/clock/testing"
 | 
				
			||||||
	"k8s.io/utils/pointer"
 | 
						"k8s.io/utils/pointer"
 | 
				
			||||||
@@ -69,23 +66,21 @@ func TestGCTerminated(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		name                          string
 | 
							name            string
 | 
				
			||||||
		pods                          []nameToPhase
 | 
							pods            []nameToPhase
 | 
				
			||||||
		threshold                     int
 | 
							threshold       int
 | 
				
			||||||
		deletedPodNames               sets.Set[string]
 | 
							deletedPodNames sets.Set[string]
 | 
				
			||||||
		patchedPodNames               sets.Set[string]
 | 
							patchedPodNames sets.Set[string]
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "delete pod a which is PodFailed and pod b which is PodSucceeded; PodDisruptionConditions enabled",
 | 
								name: "delete pod a which is PodFailed and pod b which is PodSucceeded",
 | 
				
			||||||
			pods: []nameToPhase{
 | 
								pods: []nameToPhase{
 | 
				
			||||||
				{name: "a", phase: v1.PodFailed},
 | 
									{name: "a", phase: v1.PodFailed},
 | 
				
			||||||
				{name: "b", phase: v1.PodSucceeded},
 | 
									{name: "b", phase: v1.PodSucceeded},
 | 
				
			||||||
				{name: "c", phase: v1.PodFailed},
 | 
									{name: "c", phase: v1.PodFailed},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			threshold:                     1,
 | 
								threshold:       1,
 | 
				
			||||||
			deletedPodNames:               sets.New("a", "b"),
 | 
								deletedPodNames: sets.New("a", "b"),
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "threshold = 0, disables terminated pod deletion",
 | 
								name: "threshold = 0, disables terminated pod deletion",
 | 
				
			||||||
@@ -156,7 +151,6 @@ func TestGCTerminated(t *testing.T) {
 | 
				
			|||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			resetMetrics()
 | 
								resetMetrics()
 | 
				
			||||||
			_, ctx := ktesting.NewTestContext(t)
 | 
								_, ctx := ktesting.NewTestContext(t)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			creationTime := time.Unix(0, 0)
 | 
								creationTime := time.Unix(0, 0)
 | 
				
			||||||
			nodes := []*v1.Node{testutil.NewNode("node")}
 | 
								nodes := []*v1.Node{testutil.NewNode("node")}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -206,19 +200,18 @@ func waitForAdded(q workqueue.TypedDelayingInterface[string], depth int) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestGCOrphaned(t *testing.T) {
 | 
					func TestGCOrphaned(t *testing.T) {
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		name                          string
 | 
							name                 string
 | 
				
			||||||
		initialClientNodes            []*v1.Node
 | 
							initialClientNodes   []*v1.Node
 | 
				
			||||||
		initialInformerNodes          []*v1.Node
 | 
							initialInformerNodes []*v1.Node
 | 
				
			||||||
		delay                         time.Duration
 | 
							delay                time.Duration
 | 
				
			||||||
		addedClientNodes              []*v1.Node
 | 
							addedClientNodes     []*v1.Node
 | 
				
			||||||
		deletedClientNodes            []*v1.Node
 | 
							deletedClientNodes   []*v1.Node
 | 
				
			||||||
		addedInformerNodes            []*v1.Node
 | 
							addedInformerNodes   []*v1.Node
 | 
				
			||||||
		deletedInformerNodes          []*v1.Node
 | 
							deletedInformerNodes []*v1.Node
 | 
				
			||||||
		pods                          []*v1.Pod
 | 
							pods                 []*v1.Pod
 | 
				
			||||||
		itemsInQueue                  int
 | 
							itemsInQueue         int
 | 
				
			||||||
		deletedPodNames               sets.Set[string]
 | 
							deletedPodNames      sets.Set[string]
 | 
				
			||||||
		patchedPodNames               sets.Set[string]
 | 
							patchedPodNames      sets.Set[string]
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "nodes present in lister",
 | 
								name: "nodes present in lister",
 | 
				
			||||||
@@ -259,17 +252,16 @@ func TestGCOrphaned(t *testing.T) {
 | 
				
			|||||||
			deletedPodNames: sets.New("a", "b"),
 | 
								deletedPodNames: sets.New("a", "b"),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:  "no nodes with PodDisruptionConditions enabled",
 | 
								name:  "no nodes, one running pod",
 | 
				
			||||||
			delay: 2 * quarantineTime,
 | 
								delay: 2 * quarantineTime,
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				makePod("a", "deleted", v1.PodFailed),
 | 
									makePod("a", "deleted", v1.PodFailed),
 | 
				
			||||||
				makePod("b", "deleted", v1.PodSucceeded),
 | 
									makePod("b", "deleted", v1.PodSucceeded),
 | 
				
			||||||
				makePod("c", "deleted", v1.PodRunning),
 | 
									makePod("c", "deleted", v1.PodRunning),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			itemsInQueue:                  1,
 | 
								itemsInQueue:    1,
 | 
				
			||||||
			deletedPodNames:               sets.New("a", "b", "c"),
 | 
								deletedPodNames: sets.New("a", "b", "c"),
 | 
				
			||||||
			patchedPodNames:               sets.New("c"),
 | 
								patchedPodNames: sets.New("c"),
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:  "quarantine not finished",
 | 
								name:  "quarantine not finished",
 | 
				
			||||||
@@ -351,7 +343,6 @@ func TestGCOrphaned(t *testing.T) {
 | 
				
			|||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			resetMetrics()
 | 
								resetMetrics()
 | 
				
			||||||
			_, ctx := ktesting.NewTestContext(t)
 | 
								_, ctx := ktesting.NewTestContext(t)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			client := setupNewSimpleClient(test.initialClientNodes, test.pods)
 | 
								client := setupNewSimpleClient(test.initialClientNodes, test.pods)
 | 
				
			||||||
			gcc, podInformer, nodeInformer := NewFromClient(ctx, client, -1)
 | 
								gcc, podInformer, nodeInformer := NewFromClient(ctx, client, -1)
 | 
				
			||||||
@@ -416,23 +407,11 @@ func TestGCUnscheduledTerminating(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		name                          string
 | 
							name            string
 | 
				
			||||||
		pods                          []nameToPhase
 | 
							pods            []nameToPhase
 | 
				
			||||||
		deletedPodNames               sets.Set[string]
 | 
							deletedPodNames sets.Set[string]
 | 
				
			||||||
		patchedPodNames               sets.Set[string]
 | 
							patchedPodNames sets.Set[string]
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			name: "Unscheduled pod in any phase must be deleted, the phase of the running pod is changed to Failed; PodDisruptionConditions enabled",
 | 
					 | 
				
			||||||
			pods: []nameToPhase{
 | 
					 | 
				
			||||||
				{name: "a", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
 | 
					 | 
				
			||||||
				{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
 | 
					 | 
				
			||||||
				{name: "c", phase: v1.PodRunning, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			deletedPodNames:               sets.New("a", "b", "c"),
 | 
					 | 
				
			||||||
			patchedPodNames:               sets.New("c"),
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "Unscheduled pod in any phase must be deleted",
 | 
								name: "Unscheduled pod in any phase must be deleted",
 | 
				
			||||||
			pods: []nameToPhase{
 | 
								pods: []nameToPhase{
 | 
				
			||||||
@@ -457,7 +436,6 @@ func TestGCUnscheduledTerminating(t *testing.T) {
 | 
				
			|||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			resetMetrics()
 | 
								resetMetrics()
 | 
				
			||||||
			_, ctx := ktesting.NewTestContext(t)
 | 
								_, ctx := ktesting.NewTestContext(t)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			creationTime := time.Unix(0, 0)
 | 
								creationTime := time.Unix(0, 0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			pods := make([]*v1.Pod, 0, len(test.pods))
 | 
								pods := make([]*v1.Pod, 0, len(test.pods))
 | 
				
			||||||
@@ -505,12 +483,11 @@ func TestGCTerminating(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		name                          string
 | 
							name            string
 | 
				
			||||||
		pods                          []nameToPodConfig
 | 
							pods            []nameToPodConfig
 | 
				
			||||||
		nodes                         []node
 | 
							nodes           []node
 | 
				
			||||||
		deletedPodNames               sets.Set[string]
 | 
							deletedPodNames sets.Set[string]
 | 
				
			||||||
		patchedPodNames               sets.Set[string]
 | 
							patchedPodNames sets.Set[string]
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "pods have deletion timestamp set and the corresponding nodes are not ready",
 | 
								name: "pods have deletion timestamp set and the corresponding nodes are not ready",
 | 
				
			||||||
@@ -592,7 +569,7 @@ func TestGCTerminating(t *testing.T) {
 | 
				
			|||||||
			patchedPodNames: sets.New("b1", "b4", "b5", "b6"),
 | 
								patchedPodNames: sets.New("b1", "b4", "b5", "b6"),
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "pods deleted from node tained out-of-service; PodDisruptionConditions enabled",
 | 
								name: "pods deleted from node tainted out-of-service",
 | 
				
			||||||
			nodes: []node{
 | 
								nodes: []node{
 | 
				
			||||||
				{name: "worker", readyCondition: v1.ConditionFalse, taints: []v1.Taint{{Key: v1.TaintNodeOutOfService,
 | 
									{name: "worker", readyCondition: v1.ConditionFalse, taints: []v1.Taint{{Key: v1.TaintNodeOutOfService,
 | 
				
			||||||
					Effect: v1.TaintEffectNoExecute}}},
 | 
										Effect: v1.TaintEffectNoExecute}}},
 | 
				
			||||||
@@ -602,16 +579,14 @@ func TestGCTerminating(t *testing.T) {
 | 
				
			|||||||
				{name: "b", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: "worker"},
 | 
									{name: "b", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: "worker"},
 | 
				
			||||||
				{name: "c", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: "worker"},
 | 
									{name: "c", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: "worker"},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			deletedPodNames:               sets.New("a", "b", "c"),
 | 
								deletedPodNames: sets.New("a", "b", "c"),
 | 
				
			||||||
			patchedPodNames:               sets.New("a"),
 | 
								patchedPodNames: sets.New("a"),
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, test := range testCases {
 | 
						for _, test := range testCases {
 | 
				
			||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			resetMetrics()
 | 
								resetMetrics()
 | 
				
			||||||
			_, ctx := ktesting.NewTestContext(t)
 | 
								_, ctx := ktesting.NewTestContext(t)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			creationTime := time.Unix(0, 0)
 | 
								creationTime := time.Unix(0, 0)
 | 
				
			||||||
			nodes := make([]*v1.Node, 0, len(test.nodes))
 | 
								nodes := make([]*v1.Node, 0, len(test.nodes))
 | 
				
			||||||
@@ -720,7 +695,6 @@ func TestGCInspectingPatchedPodBeforeDeletion(t *testing.T) {
 | 
				
			|||||||
	for _, test := range testCases {
 | 
						for _, test := range testCases {
 | 
				
			||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			_, ctx := ktesting.NewTestContext(t)
 | 
								_, ctx := ktesting.NewTestContext(t)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			pods := []*v1.Pod{test.pod}
 | 
								pods := []*v1.Pod{test.pod}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -32,14 +32,11 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
						"k8s.io/apimachinery/pkg/labels"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/types"
 | 
						"k8s.io/apimachinery/pkg/types"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	"k8s.io/apiserver/pkg/util/feature"
 | 
					 | 
				
			||||||
	"k8s.io/client-go/informers"
 | 
						"k8s.io/client-go/informers"
 | 
				
			||||||
	"k8s.io/client-go/kubernetes/fake"
 | 
						"k8s.io/client-go/kubernetes/fake"
 | 
				
			||||||
	clienttesting "k8s.io/client-go/testing"
 | 
						clienttesting "k8s.io/client-go/testing"
 | 
				
			||||||
	"k8s.io/client-go/tools/cache"
 | 
						"k8s.io/client-go/tools/cache"
 | 
				
			||||||
	featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/testutil"
 | 
						"k8s.io/kubernetes/pkg/controller/testutil"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var timeForControllerToProgressForSanityCheck = 20 * time.Millisecond
 | 
					var timeForControllerToProgressForSanityCheck = 20 * time.Millisecond
 | 
				
			||||||
@@ -139,12 +136,11 @@ func TestFilterNoExecuteTaints(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestCreatePod(t *testing.T) {
 | 
					func TestCreatePod(t *testing.T) {
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		description                   string
 | 
							description  string
 | 
				
			||||||
		pod                           *corev1.Pod
 | 
							pod          *corev1.Pod
 | 
				
			||||||
		taintedNodes                  map[string][]corev1.Taint
 | 
							taintedNodes map[string][]corev1.Taint
 | 
				
			||||||
		expectPatch                   bool
 | 
							expectPatch  bool
 | 
				
			||||||
		expectDelete                  bool
 | 
							expectDelete bool
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description:  "not scheduled - ignore",
 | 
								description:  "not scheduled - ignore",
 | 
				
			||||||
@@ -164,18 +160,9 @@ func TestCreatePod(t *testing.T) {
 | 
				
			|||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
								taintedNodes: map[string][]corev1.Taint{
 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
									"node1": {createNoExecuteTaint(1)},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			description: "schedule on tainted Node; PodDisruptionConditions enabled",
 | 
					 | 
				
			||||||
			pod:         testutil.NewPod("pod1", "node1"),
 | 
					 | 
				
			||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
					 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			expectPatch:                   true,
 | 
					 | 
				
			||||||
			expectDelete:                  true,
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description: "schedule on tainted Node with finite toleration",
 | 
								description: "schedule on tainted Node with finite toleration",
 | 
				
			||||||
			pod:         addToleration(testutil.NewPod("pod1", "node1"), 1, 100),
 | 
								pod:         addToleration(testutil.NewPod("pod1", "node1"), 1, 100),
 | 
				
			||||||
@@ -198,13 +185,13 @@ func TestCreatePod(t *testing.T) {
 | 
				
			|||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
								taintedNodes: map[string][]corev1.Taint{
 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
									"node1": {createNoExecuteTaint(1)},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, item := range testCases {
 | 
						for _, item := range testCases {
 | 
				
			||||||
		t.Run(item.description, func(t *testing.T) {
 | 
							t.Run(item.description, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
								ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
			fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.pod}})
 | 
								fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.pod}})
 | 
				
			||||||
			controller, podIndexer, _ := setupNewController(ctx, fakeClientset)
 | 
								controller, podIndexer, _ := setupNewController(ctx, fakeClientset)
 | 
				
			||||||
@@ -240,27 +227,15 @@ func TestDeletePod(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestUpdatePod(t *testing.T) {
 | 
					func TestUpdatePod(t *testing.T) {
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		description                   string
 | 
							description               string
 | 
				
			||||||
		prevPod                       *corev1.Pod
 | 
							prevPod                   *corev1.Pod
 | 
				
			||||||
		awaitForScheduledEviction     bool
 | 
							awaitForScheduledEviction bool
 | 
				
			||||||
		newPod                        *corev1.Pod
 | 
							newPod                    *corev1.Pod
 | 
				
			||||||
		taintedNodes                  map[string][]corev1.Taint
 | 
							taintedNodes              map[string][]corev1.Taint
 | 
				
			||||||
		expectPatch                   bool
 | 
							expectPatch               bool
 | 
				
			||||||
		expectDelete                  bool
 | 
							expectDelete              bool
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
							skipOnWindows             bool
 | 
				
			||||||
		skipOnWindows                 bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			description: "scheduling onto tainted Node results in patch and delete when PodDisruptionConditions enabled",
 | 
					 | 
				
			||||||
			prevPod:     testutil.NewPod("pod1", ""),
 | 
					 | 
				
			||||||
			newPod:      testutil.NewPod("pod1", "node1"),
 | 
					 | 
				
			||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
					 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			expectPatch:                   true,
 | 
					 | 
				
			||||||
			expectDelete:                  true,
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description: "scheduling onto tainted Node",
 | 
								description: "scheduling onto tainted Node",
 | 
				
			||||||
			prevPod:     testutil.NewPod("pod1", ""),
 | 
								prevPod:     testutil.NewPod("pod1", ""),
 | 
				
			||||||
@@ -268,6 +243,7 @@ func TestUpdatePod(t *testing.T) {
 | 
				
			|||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
								taintedNodes: map[string][]corev1.Taint{
 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
									"node1": {createNoExecuteTaint(1)},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -287,6 +263,7 @@ func TestUpdatePod(t *testing.T) {
 | 
				
			|||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
								taintedNodes: map[string][]corev1.Taint{
 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
									"node1": {createNoExecuteTaint(1)},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -297,6 +274,7 @@ func TestUpdatePod(t *testing.T) {
 | 
				
			|||||||
			taintedNodes: map[string][]corev1.Taint{
 | 
								taintedNodes: map[string][]corev1.Taint{
 | 
				
			||||||
				"node1": {createNoExecuteTaint(1)},
 | 
									"node1": {createNoExecuteTaint(1)},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
 | 
								expectPatch:   true,
 | 
				
			||||||
			expectDelete:  true,
 | 
								expectDelete:  true,
 | 
				
			||||||
			skipOnWindows: true,
 | 
								skipOnWindows: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -308,7 +286,6 @@ func TestUpdatePod(t *testing.T) {
 | 
				
			|||||||
				// TODO: remove skip once the flaking test has been fixed.
 | 
									// TODO: remove skip once the flaking test has been fixed.
 | 
				
			||||||
				t.Skip("Skip flaking test on Windows.")
 | 
									t.Skip("Skip flaking test on Windows.")
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
								ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
			fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.prevPod}})
 | 
								fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.prevPod}})
 | 
				
			||||||
			controller, podIndexer, _ := setupNewController(context.TODO(), fakeClientset)
 | 
								controller, podIndexer, _ := setupNewController(context.TODO(), fakeClientset)
 | 
				
			||||||
@@ -417,33 +394,22 @@ func TestDeleteNode(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestUpdateNode(t *testing.T) {
 | 
					func TestUpdateNode(t *testing.T) {
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		description                   string
 | 
							description     string
 | 
				
			||||||
		pods                          []corev1.Pod
 | 
							pods            []corev1.Pod
 | 
				
			||||||
		oldNode                       *corev1.Node
 | 
							oldNode         *corev1.Node
 | 
				
			||||||
		newNode                       *corev1.Node
 | 
							newNode         *corev1.Node
 | 
				
			||||||
		expectPatch                   bool
 | 
							expectPatch     bool
 | 
				
			||||||
		expectDelete                  bool
 | 
							expectDelete    bool
 | 
				
			||||||
		additionalSleep               time.Duration
 | 
							additionalSleep time.Duration
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description: "Added taint, expect node patched and deleted when PodDisruptionConditions is enabled",
 | 
								description: "Added taint, expect node patched and deleted",
 | 
				
			||||||
			pods: []corev1.Pod{
 | 
					 | 
				
			||||||
				*testutil.NewPod("pod1", "node1"),
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			oldNode:                       testutil.NewNode("node1"),
 | 
					 | 
				
			||||||
			newNode:                       addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
 | 
					 | 
				
			||||||
			expectPatch:                   true,
 | 
					 | 
				
			||||||
			expectDelete:                  true,
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			description: "Added taint",
 | 
					 | 
				
			||||||
			pods: []corev1.Pod{
 | 
								pods: []corev1.Pod{
 | 
				
			||||||
				*testutil.NewPod("pod1", "node1"),
 | 
									*testutil.NewPod("pod1", "node1"),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			oldNode:      testutil.NewNode("node1"),
 | 
								oldNode:      testutil.NewNode("node1"),
 | 
				
			||||||
			newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
 | 
								newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -462,6 +428,7 @@ func TestUpdateNode(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			oldNode:      testutil.NewNode("node1"),
 | 
								oldNode:      testutil.NewNode("node1"),
 | 
				
			||||||
			newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}),
 | 
								newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}),
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -501,13 +468,13 @@ func TestUpdateNode(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			oldNode:      testutil.NewNode("node1"),
 | 
								oldNode:      testutil.NewNode("node1"),
 | 
				
			||||||
			newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}),
 | 
								newNode:      addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}),
 | 
				
			||||||
 | 
								expectPatch:  true,
 | 
				
			||||||
			expectDelete: true,
 | 
								expectDelete: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, item := range testCases {
 | 
						for _, item := range testCases {
 | 
				
			||||||
		t.Run(item.description, func(t *testing.T) {
 | 
							t.Run(item.description, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
								ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
			defer cancel()
 | 
								defer cancel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -566,6 +566,7 @@ const (
 | 
				
			|||||||
	// kep: https://kep.k8s.io/3329
 | 
						// kep: https://kep.k8s.io/3329
 | 
				
			||||||
	// alpha: v1.25
 | 
						// alpha: v1.25
 | 
				
			||||||
	// beta: v1.26
 | 
						// beta: v1.26
 | 
				
			||||||
 | 
						// stable: v1.31
 | 
				
			||||||
	//
 | 
						//
 | 
				
			||||||
	// Enables support for appending a dedicated pod condition indicating that
 | 
						// Enables support for appending a dedicated pod condition indicating that
 | 
				
			||||||
	// the pod is being deleted due to a disruption.
 | 
						// the pod is being deleted due to a disruption.
 | 
				
			||||||
@@ -1115,7 +1116,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
 | 
						PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta},
 | 
						PodDisruptionConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta},
 | 
						PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -271,14 +271,7 @@ func TestMemoryPressure_VerifyPodStatus(t *testing.T) {
 | 
				
			|||||||
	testCases := map[string]struct {
 | 
						testCases := map[string]struct {
 | 
				
			||||||
		wantPodStatus v1.PodStatus
 | 
							wantPodStatus v1.PodStatus
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"eviction due to memory pressure; no image fs": {
 | 
							"eviction due to memory pressure": {
 | 
				
			||||||
			wantPodStatus: v1.PodStatus{
 | 
					 | 
				
			||||||
				Phase:   v1.PodFailed,
 | 
					 | 
				
			||||||
				Reason:  "Evicted",
 | 
					 | 
				
			||||||
				Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"eviction due to memory pressure; image fs": {
 | 
					 | 
				
			||||||
			wantPodStatus: v1.PodStatus{
 | 
								wantPodStatus: v1.PodStatus{
 | 
				
			||||||
				Phase:   v1.PodFailed,
 | 
									Phase:   v1.PodFailed,
 | 
				
			||||||
				Reason:  "Evicted",
 | 
									Reason:  "Evicted",
 | 
				
			||||||
@@ -286,92 +279,84 @@ func TestMemoryPressure_VerifyPodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for name, tc := range testCases {
 | 
						for _, tc := range testCases {
 | 
				
			||||||
		for _, enablePodDisruptionConditions := range []bool{false, true} {
 | 
							podMaker := makePodWithMemoryStats
 | 
				
			||||||
			t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) {
 | 
							summaryStatsMaker := makeMemoryStats
 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions)
 | 
							podsToMake := []podToMake{
 | 
				
			||||||
 | 
								{name: "below-requests", requests: newResourceList("", "1Gi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "900Mi"},
 | 
				
			||||||
 | 
								{name: "above-requests", requests: newResourceList("", "100Mi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "700Mi"},
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							pods := []*v1.Pod{}
 | 
				
			||||||
 | 
							podStats := map[*v1.Pod]statsapi.PodStats{}
 | 
				
			||||||
 | 
							for _, podToMake := range podsToMake {
 | 
				
			||||||
 | 
								pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
 | 
				
			||||||
 | 
								pods = append(pods, pod)
 | 
				
			||||||
 | 
								podStats[pod] = podStat
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							activePodsFunc := func() []*v1.Pod {
 | 
				
			||||||
 | 
								return pods
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				podMaker := makePodWithMemoryStats
 | 
							fakeClock := testingclock.NewFakeClock(time.Now())
 | 
				
			||||||
				summaryStatsMaker := makeMemoryStats
 | 
							podKiller := &mockPodKiller{}
 | 
				
			||||||
				podsToMake := []podToMake{
 | 
							diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
 | 
				
			||||||
					{name: "below-requests", requests: newResourceList("", "1Gi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "900Mi"},
 | 
							diskGC := &mockDiskGC{err: nil}
 | 
				
			||||||
					{name: "above-requests", requests: newResourceList("", "100Mi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "700Mi"},
 | 
							nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				pods := []*v1.Pod{}
 | 
					 | 
				
			||||||
				podStats := map[*v1.Pod]statsapi.PodStats{}
 | 
					 | 
				
			||||||
				for _, podToMake := range podsToMake {
 | 
					 | 
				
			||||||
					pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
 | 
					 | 
				
			||||||
					pods = append(pods, pod)
 | 
					 | 
				
			||||||
					podStats[pod] = podStat
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				activePodsFunc := func() []*v1.Pod {
 | 
					 | 
				
			||||||
					return pods
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				fakeClock := testingclock.NewFakeClock(time.Now())
 | 
							config := Config{
 | 
				
			||||||
				podKiller := &mockPodKiller{}
 | 
								PressureTransitionPeriod: time.Minute * 5,
 | 
				
			||||||
				diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
 | 
								Thresholds: []evictionapi.Threshold{
 | 
				
			||||||
				diskGC := &mockDiskGC{err: nil}
 | 
									{
 | 
				
			||||||
				nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
 | 
										Signal:   evictionapi.SignalMemoryAvailable,
 | 
				
			||||||
 | 
										Operator: evictionapi.OpLessThan,
 | 
				
			||||||
				config := Config{
 | 
										Value: evictionapi.ThresholdValue{
 | 
				
			||||||
					PressureTransitionPeriod: time.Minute * 5,
 | 
											Quantity: quantityMustParse("2Gi"),
 | 
				
			||||||
					Thresholds: []evictionapi.Threshold{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Signal:   evictionapi.SignalMemoryAvailable,
 | 
					 | 
				
			||||||
							Operator: evictionapi.OpLessThan,
 | 
					 | 
				
			||||||
							Value: evictionapi.ThresholdValue{
 | 
					 | 
				
			||||||
								Quantity: quantityMustParse("2Gi"),
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				}
 | 
									},
 | 
				
			||||||
				summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500Mi", podStats)}
 | 
								},
 | 
				
			||||||
				manager := &managerImpl{
 | 
							}
 | 
				
			||||||
					clock:                        fakeClock,
 | 
							summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500Mi", podStats)}
 | 
				
			||||||
					killPodFunc:                  podKiller.killPodNow,
 | 
							manager := &managerImpl{
 | 
				
			||||||
					imageGC:                      diskGC,
 | 
								clock:                        fakeClock,
 | 
				
			||||||
					containerGC:                  diskGC,
 | 
								killPodFunc:                  podKiller.killPodNow,
 | 
				
			||||||
					config:                       config,
 | 
								imageGC:                      diskGC,
 | 
				
			||||||
					recorder:                     &record.FakeRecorder{},
 | 
								containerGC:                  diskGC,
 | 
				
			||||||
					summaryProvider:              summaryProvider,
 | 
								config:                       config,
 | 
				
			||||||
					nodeRef:                      nodeRef,
 | 
								recorder:                     &record.FakeRecorder{},
 | 
				
			||||||
					nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 | 
								summaryProvider:              summaryProvider,
 | 
				
			||||||
					thresholdsFirstObservedAt:    thresholdsObservedAt{},
 | 
								nodeRef:                      nodeRef,
 | 
				
			||||||
				}
 | 
								nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 | 
				
			||||||
 | 
								thresholdsFirstObservedAt:    thresholdsObservedAt{},
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// synchronize to detect the memory pressure
 | 
							// synchronize to detect the memory pressure
 | 
				
			||||||
				_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
 | 
							_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
					t.Fatalf("Manager expects no error but got %v", err)
 | 
								t.Fatalf("Manager expects no error but got %v", err)
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
				// verify memory pressure is detected
 | 
							// verify memory pressure is detected
 | 
				
			||||||
				if !manager.IsUnderMemoryPressure() {
 | 
							if !manager.IsUnderMemoryPressure() {
 | 
				
			||||||
					t.Fatalf("Manager should have detected memory pressure")
 | 
								t.Fatalf("Manager should have detected memory pressure")
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// verify a pod is selected for eviction
 | 
							// verify a pod is selected for eviction
 | 
				
			||||||
				if podKiller.pod == nil {
 | 
							if podKiller.pod == nil {
 | 
				
			||||||
					t.Fatalf("Manager should have selected a pod for eviction")
 | 
								t.Fatalf("Manager should have selected a pod for eviction")
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				wantPodStatus := tc.wantPodStatus.DeepCopy()
 | 
							wantPodStatus := tc.wantPodStatus.DeepCopy()
 | 
				
			||||||
				if enablePodDisruptionConditions {
 | 
							wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
 | 
				
			||||||
					wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
 | 
								Type:    "DisruptionTarget",
 | 
				
			||||||
						Type:    "DisruptionTarget",
 | 
								Status:  "True",
 | 
				
			||||||
						Status:  "True",
 | 
								Reason:  "TerminationByKubelet",
 | 
				
			||||||
						Reason:  "TerminationByKubelet",
 | 
								Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
 | 
				
			||||||
						Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
 | 
							})
 | 
				
			||||||
					})
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// verify the pod status after applying the status update function
 | 
							// verify the pod status after applying the status update function
 | 
				
			||||||
				podKiller.statusFn(&podKiller.pod.Status)
 | 
							podKiller.statusFn(&podKiller.pod.Status)
 | 
				
			||||||
				if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
							if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
				
			||||||
					t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
 | 
								t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			})
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -388,93 +373,85 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for name, tc := range testCases {
 | 
						for _, tc := range testCases {
 | 
				
			||||||
		for _, enablePodDisruptionConditions := range []bool{true, false} {
 | 
							podMaker := makePodWithPIDStats
 | 
				
			||||||
			t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) {
 | 
							summaryStatsMaker := makePIDStats
 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions)
 | 
							podsToMake := []podToMake{
 | 
				
			||||||
 | 
								{name: "pod1", priority: lowPriority, pidUsage: 500},
 | 
				
			||||||
 | 
								{name: "pod2", priority: defaultPriority, pidUsage: 500},
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							pods := []*v1.Pod{}
 | 
				
			||||||
 | 
							podStats := map[*v1.Pod]statsapi.PodStats{}
 | 
				
			||||||
 | 
							for _, podToMake := range podsToMake {
 | 
				
			||||||
 | 
								pod, podStat := podMaker(podToMake.name, podToMake.priority, 2)
 | 
				
			||||||
 | 
								pods = append(pods, pod)
 | 
				
			||||||
 | 
								podStats[pod] = podStat
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							activePodsFunc := func() []*v1.Pod {
 | 
				
			||||||
 | 
								return pods
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				podMaker := makePodWithPIDStats
 | 
							fakeClock := testingclock.NewFakeClock(time.Now())
 | 
				
			||||||
				summaryStatsMaker := makePIDStats
 | 
							podKiller := &mockPodKiller{}
 | 
				
			||||||
				podsToMake := []podToMake{
 | 
							diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
 | 
				
			||||||
					{name: "pod1", priority: lowPriority, pidUsage: 500},
 | 
							diskGC := &mockDiskGC{err: nil}
 | 
				
			||||||
					{name: "pod2", priority: defaultPriority, pidUsage: 500},
 | 
							nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				pods := []*v1.Pod{}
 | 
					 | 
				
			||||||
				podStats := map[*v1.Pod]statsapi.PodStats{}
 | 
					 | 
				
			||||||
				for _, podToMake := range podsToMake {
 | 
					 | 
				
			||||||
					pod, podStat := podMaker(podToMake.name, podToMake.priority, 2)
 | 
					 | 
				
			||||||
					pods = append(pods, pod)
 | 
					 | 
				
			||||||
					podStats[pod] = podStat
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				activePodsFunc := func() []*v1.Pod {
 | 
					 | 
				
			||||||
					return pods
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				fakeClock := testingclock.NewFakeClock(time.Now())
 | 
							config := Config{
 | 
				
			||||||
				podKiller := &mockPodKiller{}
 | 
								PressureTransitionPeriod: time.Minute * 5,
 | 
				
			||||||
				diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
 | 
								Thresholds: []evictionapi.Threshold{
 | 
				
			||||||
				diskGC := &mockDiskGC{err: nil}
 | 
									{
 | 
				
			||||||
				nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
 | 
										Signal:   evictionapi.SignalPIDAvailable,
 | 
				
			||||||
 | 
										Operator: evictionapi.OpLessThan,
 | 
				
			||||||
				config := Config{
 | 
										Value: evictionapi.ThresholdValue{
 | 
				
			||||||
					PressureTransitionPeriod: time.Minute * 5,
 | 
											Quantity: quantityMustParse("1200"),
 | 
				
			||||||
					Thresholds: []evictionapi.Threshold{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Signal:   evictionapi.SignalPIDAvailable,
 | 
					 | 
				
			||||||
							Operator: evictionapi.OpLessThan,
 | 
					 | 
				
			||||||
							Value: evictionapi.ThresholdValue{
 | 
					 | 
				
			||||||
								Quantity: quantityMustParse("1200"),
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				}
 | 
									},
 | 
				
			||||||
				summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500", "1000", podStats)}
 | 
								},
 | 
				
			||||||
				manager := &managerImpl{
 | 
							}
 | 
				
			||||||
					clock:                        fakeClock,
 | 
							summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500", "1000", podStats)}
 | 
				
			||||||
					killPodFunc:                  podKiller.killPodNow,
 | 
							manager := &managerImpl{
 | 
				
			||||||
					imageGC:                      diskGC,
 | 
								clock:                        fakeClock,
 | 
				
			||||||
					containerGC:                  diskGC,
 | 
								killPodFunc:                  podKiller.killPodNow,
 | 
				
			||||||
					config:                       config,
 | 
								imageGC:                      diskGC,
 | 
				
			||||||
					recorder:                     &record.FakeRecorder{},
 | 
								containerGC:                  diskGC,
 | 
				
			||||||
					summaryProvider:              summaryProvider,
 | 
								config:                       config,
 | 
				
			||||||
					nodeRef:                      nodeRef,
 | 
								recorder:                     &record.FakeRecorder{},
 | 
				
			||||||
					nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 | 
								summaryProvider:              summaryProvider,
 | 
				
			||||||
					thresholdsFirstObservedAt:    thresholdsObservedAt{},
 | 
								nodeRef:                      nodeRef,
 | 
				
			||||||
				}
 | 
								nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 | 
				
			||||||
 | 
								thresholdsFirstObservedAt:    thresholdsObservedAt{},
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// synchronize to detect the PID pressure
 | 
							// synchronize to detect the PID pressure
 | 
				
			||||||
				_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
 | 
							_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
					t.Fatalf("Manager expects no error but got %v", err)
 | 
								t.Fatalf("Manager expects no error but got %v", err)
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// verify PID pressure is detected
 | 
							// verify PID pressure is detected
 | 
				
			||||||
				if !manager.IsUnderPIDPressure() {
 | 
							if !manager.IsUnderPIDPressure() {
 | 
				
			||||||
					t.Fatalf("Manager should have detected PID pressure")
 | 
								t.Fatalf("Manager should have detected PID pressure")
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// verify a pod is selected for eviction
 | 
							// verify a pod is selected for eviction
 | 
				
			||||||
				if podKiller.pod == nil {
 | 
							if podKiller.pod == nil {
 | 
				
			||||||
					t.Fatalf("Manager should have selected a pod for eviction")
 | 
								t.Fatalf("Manager should have selected a pod for eviction")
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				wantPodStatus := tc.wantPodStatus.DeepCopy()
 | 
							wantPodStatus := tc.wantPodStatus.DeepCopy()
 | 
				
			||||||
				if enablePodDisruptionConditions {
 | 
							wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
 | 
				
			||||||
					wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
 | 
								Type:    "DisruptionTarget",
 | 
				
			||||||
						Type:    "DisruptionTarget",
 | 
								Status:  "True",
 | 
				
			||||||
						Status:  "True",
 | 
								Reason:  "TerminationByKubelet",
 | 
				
			||||||
						Reason:  "TerminationByKubelet",
 | 
								Message: "The node was low on resource: pids. Threshold quantity: 1200, available: 500. ",
 | 
				
			||||||
						Message: "The node was low on resource: pids. Threshold quantity: 1200, available: 500. ",
 | 
							})
 | 
				
			||||||
					})
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// verify the pod status after applying the status update function
 | 
							// verify the pod status after applying the status update function
 | 
				
			||||||
				podKiller.statusFn(&podKiller.pod.Status)
 | 
							podKiller.statusFn(&podKiller.pod.Status)
 | 
				
			||||||
				if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
							if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
				
			||||||
					t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
 | 
								t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			})
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -570,97 +547,90 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for name, tc := range testCases {
 | 
						for _, tc := range testCases {
 | 
				
			||||||
		for _, enablePodDisruptionConditions := range []bool{false, true} {
 | 
							featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletSeparateDiskGC, tc.kubeletSeparateDiskFeature)
 | 
				
			||||||
			t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) {
 | 
					 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletSeparateDiskGC, tc.kubeletSeparateDiskFeature)
 | 
					 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				podMaker := makePodWithDiskStats
 | 
							podMaker := makePodWithDiskStats
 | 
				
			||||||
				summaryStatsMaker := makeDiskStats
 | 
							summaryStatsMaker := makeDiskStats
 | 
				
			||||||
				podsToMake := tc.podToMakes
 | 
							podsToMake := tc.podToMakes
 | 
				
			||||||
				wantPodStatus := v1.PodStatus{
 | 
							wantPodStatus := v1.PodStatus{
 | 
				
			||||||
					Phase:   v1.PodFailed,
 | 
								Phase:   v1.PodFailed,
 | 
				
			||||||
					Reason:  "Evicted",
 | 
								Reason:  "Evicted",
 | 
				
			||||||
					Message: tc.evictionMessage,
 | 
								Message: tc.evictionMessage,
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
				pods := []*v1.Pod{}
 | 
							pods := []*v1.Pod{}
 | 
				
			||||||
				podStats := map[*v1.Pod]statsapi.PodStats{}
 | 
							podStats := map[*v1.Pod]statsapi.PodStats{}
 | 
				
			||||||
				for _, podToMake := range podsToMake {
 | 
							for _, podToMake := range podsToMake {
 | 
				
			||||||
					pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil)
 | 
								pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed, nil)
 | 
				
			||||||
					pods = append(pods, pod)
 | 
								pods = append(pods, pod)
 | 
				
			||||||
					podStats[pod] = podStat
 | 
								podStats[pod] = podStat
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
				activePodsFunc := func() []*v1.Pod {
 | 
							activePodsFunc := func() []*v1.Pod {
 | 
				
			||||||
					return pods
 | 
								return pods
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				fakeClock := testingclock.NewFakeClock(time.Now())
 | 
							fakeClock := testingclock.NewFakeClock(time.Now())
 | 
				
			||||||
				podKiller := &mockPodKiller{}
 | 
							podKiller := &mockPodKiller{}
 | 
				
			||||||
				diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs}
 | 
							diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: tc.dedicatedImageFs}
 | 
				
			||||||
				diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly}
 | 
							diskGC := &mockDiskGC{err: nil, readAndWriteSeparate: tc.writeableSeparateFromReadOnly}
 | 
				
			||||||
				nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
 | 
							nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				config := Config{
 | 
							config := Config{
 | 
				
			||||||
					PressureTransitionPeriod: time.Minute * 5,
 | 
								PressureTransitionPeriod: time.Minute * 5,
 | 
				
			||||||
					Thresholds:               []evictionapi.Threshold{tc.thresholdToMonitor},
 | 
								Thresholds:               []evictionapi.Threshold{tc.thresholdToMonitor},
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
				diskStat := diskStats{
 | 
							diskStat := diskStats{
 | 
				
			||||||
					rootFsAvailableBytes:      tc.nodeFsStats,
 | 
								rootFsAvailableBytes:      tc.nodeFsStats,
 | 
				
			||||||
					imageFsAvailableBytes:     tc.imageFsStats,
 | 
								imageFsAvailableBytes:     tc.imageFsStats,
 | 
				
			||||||
					containerFsAvailableBytes: tc.containerFsStats,
 | 
								containerFsAvailableBytes: tc.containerFsStats,
 | 
				
			||||||
					podStats:                  podStats,
 | 
								podStats:                  podStats,
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
				summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStat)}
 | 
							summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(diskStat)}
 | 
				
			||||||
				manager := &managerImpl{
 | 
							manager := &managerImpl{
 | 
				
			||||||
					clock:                        fakeClock,
 | 
								clock:                        fakeClock,
 | 
				
			||||||
					killPodFunc:                  podKiller.killPodNow,
 | 
								killPodFunc:                  podKiller.killPodNow,
 | 
				
			||||||
					imageGC:                      diskGC,
 | 
								imageGC:                      diskGC,
 | 
				
			||||||
					containerGC:                  diskGC,
 | 
								containerGC:                  diskGC,
 | 
				
			||||||
					config:                       config,
 | 
								config:                       config,
 | 
				
			||||||
					recorder:                     &record.FakeRecorder{},
 | 
								recorder:                     &record.FakeRecorder{},
 | 
				
			||||||
					summaryProvider:              summaryProvider,
 | 
								summaryProvider:              summaryProvider,
 | 
				
			||||||
					nodeRef:                      nodeRef,
 | 
								nodeRef:                      nodeRef,
 | 
				
			||||||
					nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 | 
								nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
 | 
				
			||||||
					thresholdsFirstObservedAt:    thresholdsObservedAt{},
 | 
								thresholdsFirstObservedAt:    thresholdsObservedAt{},
 | 
				
			||||||
				}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				// synchronize
 | 
							// synchronize
 | 
				
			||||||
				pods, synchErr := manager.synchronize(diskInfoProvider, activePodsFunc)
 | 
							pods, synchErr := manager.synchronize(diskInfoProvider, activePodsFunc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if synchErr == nil && tc.expectErr != "" {
 | 
							if synchErr == nil && tc.expectErr != "" {
 | 
				
			||||||
					t.Fatalf("Manager should report error but did not")
 | 
								t.Fatalf("Manager should report error but did not")
 | 
				
			||||||
				} else if tc.expectErr != "" && synchErr != nil {
 | 
							} else if tc.expectErr != "" && synchErr != nil {
 | 
				
			||||||
					if diff := cmp.Diff(tc.expectErr, synchErr.Error()); diff != "" {
 | 
								if diff := cmp.Diff(tc.expectErr, synchErr.Error()); diff != "" {
 | 
				
			||||||
						t.Errorf("Unexpected error (-want,+got):\n%s", diff)
 | 
									t.Errorf("Unexpected error (-want,+got):\n%s", diff)
 | 
				
			||||||
					}
 | 
								}
 | 
				
			||||||
				} else {
 | 
							} else {
 | 
				
			||||||
					// verify manager detected disk pressure
 | 
								// verify manager detected disk pressure
 | 
				
			||||||
					if !manager.IsUnderDiskPressure() {
 | 
								if !manager.IsUnderDiskPressure() {
 | 
				
			||||||
						t.Fatalf("Manager should report disk pressure")
 | 
									t.Fatalf("Manager should report disk pressure")
 | 
				
			||||||
					}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					// verify a pod is selected for eviction
 | 
								// verify a pod is selected for eviction
 | 
				
			||||||
					if podKiller.pod == nil {
 | 
								if podKiller.pod == nil {
 | 
				
			||||||
						t.Fatalf("Manager should have selected a pod for eviction")
 | 
									t.Fatalf("Manager should have selected a pod for eviction")
 | 
				
			||||||
					}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					if enablePodDisruptionConditions {
 | 
								wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
 | 
				
			||||||
						wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
 | 
									Type:    "DisruptionTarget",
 | 
				
			||||||
							Type:    "DisruptionTarget",
 | 
									Status:  "True",
 | 
				
			||||||
							Status:  "True",
 | 
									Reason:  "TerminationByKubelet",
 | 
				
			||||||
							Reason:  "TerminationByKubelet",
 | 
									Message: tc.evictionMessage,
 | 
				
			||||||
							Message: tc.evictionMessage,
 | 
					 | 
				
			||||||
						})
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
					// verify the pod status after applying the status update function
 | 
					 | 
				
			||||||
					podKiller.statusFn(&podKiller.pod.Status)
 | 
					 | 
				
			||||||
					if diff := cmp.Diff(wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
					 | 
				
			||||||
						t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
 | 
					 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								// verify the pod status after applying the status update function
 | 
				
			||||||
 | 
								podKiller.statusFn(&podKiller.pod.Status)
 | 
				
			||||||
 | 
								if diff := cmp.Diff(wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
 | 
				
			||||||
 | 
									t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3267,13 +3267,12 @@ func Test_generateAPIPodStatus(t *testing.T) {
 | 
				
			|||||||
		unreadyContainer                           []string
 | 
							unreadyContainer                           []string
 | 
				
			||||||
		previousStatus                             v1.PodStatus
 | 
							previousStatus                             v1.PodStatus
 | 
				
			||||||
		isPodTerminal                              bool
 | 
							isPodTerminal                              bool
 | 
				
			||||||
		enablePodDisruptionConditions              bool
 | 
					 | 
				
			||||||
		expected                                   v1.PodStatus
 | 
							expected                                   v1.PodStatus
 | 
				
			||||||
		expectedPodDisruptionCondition             v1.PodCondition
 | 
							expectedPodDisruptionCondition             *v1.PodCondition
 | 
				
			||||||
		expectedPodReadyToStartContainersCondition v1.PodCondition
 | 
							expectedPodReadyToStartContainersCondition v1.PodCondition
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "pod disruption condition is copied over and the phase is set to failed when deleted; PodDisruptionConditions enabled",
 | 
								name: "pod disruption condition is copied over and the phase is set to failed when deleted",
 | 
				
			||||||
			pod: &v1.Pod{
 | 
								pod: &v1.Pod{
 | 
				
			||||||
				Spec: desiredState,
 | 
									Spec: desiredState,
 | 
				
			||||||
				Status: v1.PodStatus{
 | 
									Status: v1.PodStatus{
 | 
				
			||||||
@@ -3301,8 +3300,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
 | 
				
			|||||||
					LastTransitionTime: normalized_now,
 | 
										LastTransitionTime: normalized_now,
 | 
				
			||||||
				}},
 | 
									}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			isPodTerminal:                 true,
 | 
								isPodTerminal: true,
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			expected: v1.PodStatus{
 | 
								expected: v1.PodStatus{
 | 
				
			||||||
				Phase:    v1.PodFailed,
 | 
									Phase:    v1.PodFailed,
 | 
				
			||||||
				HostIP:   "127.0.0.1",
 | 
									HostIP:   "127.0.0.1",
 | 
				
			||||||
@@ -3319,7 +3317,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
 | 
				
			|||||||
					ready(waitingWithLastTerminationUnknown("containerB", 0)),
 | 
										ready(waitingWithLastTerminationUnknown("containerB", 0)),
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedPodDisruptionCondition: v1.PodCondition{
 | 
								expectedPodDisruptionCondition: &v1.PodCondition{
 | 
				
			||||||
				Type:               v1.DisruptionTarget,
 | 
									Type:               v1.DisruptionTarget,
 | 
				
			||||||
				Status:             v1.ConditionTrue,
 | 
									Status:             v1.ConditionTrue,
 | 
				
			||||||
				LastTransitionTime: normalized_now,
 | 
									LastTransitionTime: normalized_now,
 | 
				
			||||||
@@ -3705,7 +3703,6 @@ func Test_generateAPIPodStatus(t *testing.T) {
 | 
				
			|||||||
	for _, test := range tests {
 | 
						for _, test := range tests {
 | 
				
			||||||
		for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
 | 
							for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
 | 
				
			||||||
			t.Run(test.name, func(t *testing.T) {
 | 
								t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)
 | 
									featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)
 | 
				
			||||||
				testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
 | 
									testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
 | 
				
			||||||
				defer testKubelet.Cleanup()
 | 
									defer testKubelet.Cleanup()
 | 
				
			||||||
@@ -3719,8 +3716,8 @@ func Test_generateAPIPodStatus(t *testing.T) {
 | 
				
			|||||||
				if enablePodReadyToStartContainersCondition {
 | 
									if enablePodReadyToStartContainersCondition {
 | 
				
			||||||
					expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...)
 | 
										expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if test.enablePodDisruptionConditions {
 | 
									if test.expectedPodDisruptionCondition != nil {
 | 
				
			||||||
					expected.Conditions = append([]v1.PodCondition{test.expectedPodDisruptionCondition}, expected.Conditions...)
 | 
										expected.Conditions = append([]v1.PodCondition{*test.expectedPodDisruptionCondition}, expected.Conditions...)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if !apiequality.Semantic.DeepEqual(*expected, actual) {
 | 
									if !apiequality.Semantic.DeepEqual(*expected, actual) {
 | 
				
			||||||
					t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual))
 | 
										t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -125,14 +125,13 @@ func TestManager(t *testing.T) {
 | 
				
			|||||||
		shutdownGracePeriodCriticalPods  time.Duration
 | 
							shutdownGracePeriodCriticalPods  time.Duration
 | 
				
			||||||
		systemInhibitDelay               time.Duration
 | 
							systemInhibitDelay               time.Duration
 | 
				
			||||||
		overrideSystemInhibitDelay       time.Duration
 | 
							overrideSystemInhibitDelay       time.Duration
 | 
				
			||||||
		enablePodDisruptionConditions    bool
 | 
					 | 
				
			||||||
		expectedDidOverrideInhibitDelay  bool
 | 
							expectedDidOverrideInhibitDelay  bool
 | 
				
			||||||
		expectedPodToGracePeriodOverride map[string]int64
 | 
							expectedPodToGracePeriodOverride map[string]int64
 | 
				
			||||||
		expectedError                    error
 | 
							expectedError                    error
 | 
				
			||||||
		expectedPodStatuses              map[string]v1.PodStatus
 | 
							expectedPodStatuses              map[string]v1.PodStatus
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			desc: "verify pod status; PodDisruptionConditions enabled",
 | 
								desc: "verify pod status",
 | 
				
			||||||
			activePods: []*v1.Pod{
 | 
								activePods: []*v1.Pod{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					ObjectMeta: metav1.ObjectMeta{Name: "running-pod"},
 | 
										ObjectMeta: metav1.ObjectMeta{Name: "running-pod"},
 | 
				
			||||||
@@ -160,7 +159,6 @@ func TestManager(t *testing.T) {
 | 
				
			|||||||
			shutdownGracePeriodCriticalPods:  time.Duration(10 * time.Second),
 | 
								shutdownGracePeriodCriticalPods:  time.Duration(10 * time.Second),
 | 
				
			||||||
			systemInhibitDelay:               time.Duration(40 * time.Second),
 | 
								systemInhibitDelay:               time.Duration(40 * time.Second),
 | 
				
			||||||
			overrideSystemInhibitDelay:       time.Duration(40 * time.Second),
 | 
								overrideSystemInhibitDelay:       time.Duration(40 * time.Second),
 | 
				
			||||||
			enablePodDisruptionConditions:    true,
 | 
					 | 
				
			||||||
			expectedDidOverrideInhibitDelay:  false,
 | 
								expectedDidOverrideInhibitDelay:  false,
 | 
				
			||||||
			expectedPodToGracePeriodOverride: map[string]int64{"running-pod": 20, "failed-pod": 20, "succeeded-pod": 20},
 | 
								expectedPodToGracePeriodOverride: map[string]int64{"running-pod": 20, "failed-pod": 20, "succeeded-pod": 20},
 | 
				
			||||||
			expectedPodStatuses: map[string]v1.PodStatus{
 | 
								expectedPodStatuses: map[string]v1.PodStatus{
 | 
				
			||||||
@@ -212,7 +210,6 @@ func TestManager(t *testing.T) {
 | 
				
			|||||||
			shutdownGracePeriodCriticalPods:  time.Duration(10 * time.Second),
 | 
								shutdownGracePeriodCriticalPods:  time.Duration(10 * time.Second),
 | 
				
			||||||
			systemInhibitDelay:               time.Duration(40 * time.Second),
 | 
								systemInhibitDelay:               time.Duration(40 * time.Second),
 | 
				
			||||||
			overrideSystemInhibitDelay:       time.Duration(40 * time.Second),
 | 
								overrideSystemInhibitDelay:       time.Duration(40 * time.Second),
 | 
				
			||||||
			enablePodDisruptionConditions:    false,
 | 
					 | 
				
			||||||
			expectedDidOverrideInhibitDelay:  false,
 | 
								expectedDidOverrideInhibitDelay:  false,
 | 
				
			||||||
			expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
 | 
								expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
 | 
				
			||||||
			expectedPodStatuses: map[string]v1.PodStatus{
 | 
								expectedPodStatuses: map[string]v1.PodStatus{
 | 
				
			||||||
@@ -220,11 +217,27 @@ func TestManager(t *testing.T) {
 | 
				
			|||||||
					Phase:   v1.PodFailed,
 | 
										Phase:   v1.PodFailed,
 | 
				
			||||||
					Message: "Pod was terminated in response to imminent node shutdown.",
 | 
										Message: "Pod was terminated in response to imminent node shutdown.",
 | 
				
			||||||
					Reason:  "Terminated",
 | 
										Reason:  "Terminated",
 | 
				
			||||||
 | 
										Conditions: []v1.PodCondition{
 | 
				
			||||||
 | 
											{
 | 
				
			||||||
 | 
												Type:    v1.DisruptionTarget,
 | 
				
			||||||
 | 
												Status:  v1.ConditionTrue,
 | 
				
			||||||
 | 
												Reason:  "TerminationByKubelet",
 | 
				
			||||||
 | 
												Message: "Pod was terminated in response to imminent node shutdown.",
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
				"critical-pod-nil-grace-period": {
 | 
									"critical-pod-nil-grace-period": {
 | 
				
			||||||
					Phase:   v1.PodFailed,
 | 
										Phase:   v1.PodFailed,
 | 
				
			||||||
					Message: "Pod was terminated in response to imminent node shutdown.",
 | 
										Message: "Pod was terminated in response to imminent node shutdown.",
 | 
				
			||||||
					Reason:  "Terminated",
 | 
										Reason:  "Terminated",
 | 
				
			||||||
 | 
										Conditions: []v1.PodCondition{
 | 
				
			||||||
 | 
											{
 | 
				
			||||||
 | 
												Type:    v1.DisruptionTarget,
 | 
				
			||||||
 | 
												Status:  v1.ConditionTrue,
 | 
				
			||||||
 | 
												Reason:  "TerminationByKubelet",
 | 
				
			||||||
 | 
												Message: "Pod was terminated in response to imminent node shutdown.",
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -331,7 +344,6 @@ func TestManager(t *testing.T) {
 | 
				
			|||||||
			systemDbus = func() (dbusInhibiter, error) {
 | 
								systemDbus = func() (dbusInhibiter, error) {
 | 
				
			||||||
				return fakeDbus, nil
 | 
									return fakeDbus, nil
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodDisruptionConditions, tc.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, true)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, true)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			proberManager := probetest.FakeManager{}
 | 
								proberManager := probetest.FakeManager{}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -35,14 +35,11 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/runtime"
 | 
						"k8s.io/apimachinery/pkg/runtime"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/runtime/schema"
 | 
						"k8s.io/apimachinery/pkg/runtime/schema"
 | 
				
			||||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
					 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	"k8s.io/client-go/kubernetes/fake"
 | 
						"k8s.io/client-go/kubernetes/fake"
 | 
				
			||||||
	core "k8s.io/client-go/testing"
 | 
						core "k8s.io/client-go/testing"
 | 
				
			||||||
	featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
					 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	api "k8s.io/kubernetes/pkg/apis/core"
 | 
						api "k8s.io/kubernetes/pkg/apis/core"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
					 | 
				
			||||||
	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
 | 
						kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
 | 
				
			||||||
	kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
 | 
						kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
 | 
				
			||||||
	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
 | 
						statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
 | 
				
			||||||
@@ -1065,9 +1062,8 @@ func TestTerminatePod_DefaultUnknownStatus(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestTerminatePod_EnsurePodPhaseIsTerminal(t *testing.T) {
 | 
					func TestTerminatePod_EnsurePodPhaseIsTerminal(t *testing.T) {
 | 
				
			||||||
	testCases := map[string]struct {
 | 
						testCases := map[string]struct {
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
							status     v1.PodStatus
 | 
				
			||||||
		status                        v1.PodStatus
 | 
							wantStatus v1.PodStatus
 | 
				
			||||||
		wantStatus                    v1.PodStatus
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"Pending pod": {
 | 
							"Pending pod": {
 | 
				
			||||||
			status: v1.PodStatus{
 | 
								status: v1.PodStatus{
 | 
				
			||||||
@@ -1542,24 +1538,14 @@ func deleteAction() core.DeleteAction {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestMergePodStatus(t *testing.T) {
 | 
					func TestMergePodStatus(t *testing.T) {
 | 
				
			||||||
	useCases := []struct {
 | 
						useCases := []struct {
 | 
				
			||||||
		desc                          string
 | 
							desc                 string
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
							hasRunningContainers bool
 | 
				
			||||||
		hasRunningContainers          bool
 | 
							oldPodStatus         func(input v1.PodStatus) v1.PodStatus
 | 
				
			||||||
		oldPodStatus                  func(input v1.PodStatus) v1.PodStatus
 | 
							newPodStatus         func(input v1.PodStatus) v1.PodStatus
 | 
				
			||||||
		newPodStatus                  func(input v1.PodStatus) v1.PodStatus
 | 
							expectPodStatus      v1.PodStatus
 | 
				
			||||||
		expectPodStatus               v1.PodStatus
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"no change",
 | 
								"add DisruptionTarget condition when transitioning into failed phase",
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus { return input },
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus { return input },
 | 
					 | 
				
			||||||
			getPodStatus(),
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			"add DisruptionTarget condition when transitioning into failed phase; PodDisruptionConditions enabled",
 | 
					 | 
				
			||||||
			true,
 | 
					 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus { return input },
 | 
								func(input v1.PodStatus) v1.PodStatus { return input },
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
@@ -1598,8 +1584,7 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"don't add DisruptionTarget condition when transitioning into failed phase, but there are might still be running containers; PodDisruptionConditions enabled",
 | 
								"don't add DisruptionTarget condition when transitioning into failed phase, but there might still be running containers",
 | 
				
			||||||
			true,
 | 
					 | 
				
			||||||
			true,
 | 
								true,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus { return input },
 | 
								func(input v1.PodStatus) v1.PodStatus { return input },
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
@@ -1627,8 +1612,7 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"preserve DisruptionTarget condition; PodDisruptionConditions enabled",
 | 
								"preserve DisruptionTarget condition",
 | 
				
			||||||
			true,
 | 
					 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
@@ -1662,43 +1646,7 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"preserve DisruptionTarget condition; PodDisruptionConditions disabled",
 | 
								"override DisruptionTarget condition",
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
					 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
					 | 
				
			||||||
					Type:   v1.DisruptionTarget,
 | 
					 | 
				
			||||||
					Status: v1.ConditionTrue,
 | 
					 | 
				
			||||||
					Reason: "TerminationByKubelet",
 | 
					 | 
				
			||||||
				})
 | 
					 | 
				
			||||||
				return input
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
					 | 
				
			||||||
				return input
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			v1.PodStatus{
 | 
					 | 
				
			||||||
				Phase: v1.PodRunning,
 | 
					 | 
				
			||||||
				Conditions: []v1.PodCondition{
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						Type:   v1.PodReady,
 | 
					 | 
				
			||||||
						Status: v1.ConditionTrue,
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						Type:   v1.PodScheduled,
 | 
					 | 
				
			||||||
						Status: v1.ConditionTrue,
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						Type:   v1.DisruptionTarget,
 | 
					 | 
				
			||||||
						Status: v1.ConditionTrue,
 | 
					 | 
				
			||||||
						Reason: "TerminationByKubelet",
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Message: "Message",
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			"override DisruptionTarget condition; PodDisruptionConditions enabled",
 | 
					 | 
				
			||||||
			true,
 | 
					 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
@@ -1744,8 +1692,7 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"don't override DisruptionTarget condition when remaining in running phase; PodDisruptionConditions enabled",
 | 
								"don't override DisruptionTarget condition when remaining in running phase",
 | 
				
			||||||
			true,
 | 
					 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
@@ -1784,8 +1731,7 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers; PodDisruptionConditions enabled",
 | 
								"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers",
 | 
				
			||||||
			true,
 | 
					 | 
				
			||||||
			true,
 | 
								true,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
@@ -1827,7 +1773,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			"readiness changes",
 | 
								"readiness changes",
 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus { return input },
 | 
								func(input v1.PodStatus) v1.PodStatus { return input },
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions[0].Status = v1.ConditionFalse
 | 
									input.Conditions[0].Status = v1.ConditionFalse
 | 
				
			||||||
@@ -1851,7 +1796,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			"additional pod condition",
 | 
								"additional pod condition",
 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
					Type:   v1.PodConditionType("example.com/feature"),
 | 
										Type:   v1.PodConditionType("example.com/feature"),
 | 
				
			||||||
@@ -1882,7 +1826,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			"additional pod condition and readiness changes",
 | 
								"additional pod condition and readiness changes",
 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
					Type:   v1.PodConditionType("example.com/feature"),
 | 
										Type:   v1.PodConditionType("example.com/feature"),
 | 
				
			||||||
@@ -1916,7 +1859,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			"additional pod condition changes",
 | 
								"additional pod condition changes",
 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
									input.Conditions = append(input.Conditions, v1.PodCondition{
 | 
				
			||||||
					Type:   v1.PodConditionType("example.com/feature"),
 | 
										Type:   v1.PodConditionType("example.com/feature"),
 | 
				
			||||||
@@ -1953,7 +1895,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			"phase is transitioning to failed and no containers running",
 | 
								"phase is transitioning to failed and no containers running",
 | 
				
			||||||
			false,
 | 
								false,
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Phase = v1.PodRunning
 | 
									input.Phase = v1.PodRunning
 | 
				
			||||||
				input.Reason = "Unknown"
 | 
									input.Reason = "Unknown"
 | 
				
			||||||
@@ -1990,7 +1931,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			"phase is transitioning to failed and containers running",
 | 
								"phase is transitioning to failed and containers running",
 | 
				
			||||||
			false,
 | 
					 | 
				
			||||||
			true,
 | 
								true,
 | 
				
			||||||
			func(input v1.PodStatus) v1.PodStatus {
 | 
								func(input v1.PodStatus) v1.PodStatus {
 | 
				
			||||||
				input.Phase = v1.PodRunning
 | 
									input.Phase = v1.PodRunning
 | 
				
			||||||
@@ -2024,7 +1964,6 @@ func TestMergePodStatus(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for _, tc := range useCases {
 | 
						for _, tc := range useCases {
 | 
				
			||||||
		t.Run(tc.desc, func(t *testing.T) {
 | 
							t.Run(tc.desc, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers)
 | 
								output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers)
 | 
				
			||||||
			if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) {
 | 
								if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) {
 | 
				
			||||||
				t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output))
 | 
									t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,7 +27,6 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestPodConditionByKubelet(t *testing.T) {
 | 
					func TestPodConditionByKubelet(t *testing.T) {
 | 
				
			||||||
	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, true)
 | 
						featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, true)
 | 
				
			||||||
	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	trueCases := []v1.PodConditionType{
 | 
						trueCases := []v1.PodConditionType{
 | 
				
			||||||
		v1.PodScheduled,
 | 
							v1.PodScheduled,
 | 
				
			||||||
@@ -56,8 +55,6 @@ func TestPodConditionByKubelet(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestPodConditionSharedByKubelet(t *testing.T) {
 | 
					func TestPodConditionSharedByKubelet(t *testing.T) {
 | 
				
			||||||
	featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	trueCases := []v1.PodConditionType{
 | 
						trueCases := []v1.PodConditionType{
 | 
				
			||||||
		v1.DisruptionTarget,
 | 
							v1.DisruptionTarget,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -802,46 +802,42 @@ func TestAddConditionAndDelete(t *testing.T) {
 | 
				
			|||||||
	evictionRest := newEvictionStorage(storage.Store, client.PolicyV1())
 | 
						evictionRest := newEvictionStorage(storage.Store, client.PolicyV1())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, tc := range cases {
 | 
						for _, tc := range cases {
 | 
				
			||||||
		for _, conditionsEnabled := range []bool{true, false} {
 | 
							t.Run(tc.name, func(t *testing.T) {
 | 
				
			||||||
			name := fmt.Sprintf("%s_conditions=%v", tc.name, conditionsEnabled)
 | 
								var deleteOptions *metav1.DeleteOptions
 | 
				
			||||||
			t.Run(name, func(t *testing.T) {
 | 
								if tc.initialPod {
 | 
				
			||||||
				featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, conditionsEnabled)
 | 
									newPod := validNewPod()
 | 
				
			||||||
				var deleteOptions *metav1.DeleteOptions
 | 
									createdObj, err := storage.Create(testContext, newPod, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
 | 
				
			||||||
				if tc.initialPod {
 | 
									if err != nil {
 | 
				
			||||||
					newPod := validNewPod()
 | 
										t.Fatal(err)
 | 
				
			||||||
					createdObj, err := storage.Create(testContext, newPod, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
 | 
									}
 | 
				
			||||||
					if err != nil {
 | 
									t.Cleanup(func() {
 | 
				
			||||||
 | 
										zero := int64(0)
 | 
				
			||||||
 | 
										if _, _, err := storage.Delete(testContext, newPod.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{GracePeriodSeconds: &zero}); err != nil && !apierrors.IsNotFound(err) {
 | 
				
			||||||
						t.Fatal(err)
 | 
											t.Fatal(err)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
					t.Cleanup(func() {
 | 
									})
 | 
				
			||||||
						zero := int64(0)
 | 
									deleteOptions = tc.makeDeleteOptions(createdObj.(*api.Pod))
 | 
				
			||||||
						if _, _, err := storage.Delete(testContext, newPod.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{GracePeriodSeconds: &zero}); err != nil && !apierrors.IsNotFound(err) {
 | 
								} else {
 | 
				
			||||||
							t.Fatal(err)
 | 
									deleteOptions = tc.makeDeleteOptions(nil)
 | 
				
			||||||
						}
 | 
								}
 | 
				
			||||||
					})
 | 
								if deleteOptions == nil {
 | 
				
			||||||
					deleteOptions = tc.makeDeleteOptions(createdObj.(*api.Pod))
 | 
									deleteOptions = &metav1.DeleteOptions{}
 | 
				
			||||||
				} else {
 | 
								}
 | 
				
			||||||
					deleteOptions = tc.makeDeleteOptions(nil)
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
				if deleteOptions == nil {
 | 
					 | 
				
			||||||
					deleteOptions = &metav1.DeleteOptions{}
 | 
					 | 
				
			||||||
				}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				err := addConditionAndDeletePod(evictionRest, testContext, "foo", rest.ValidateAllObjectFunc, deleteOptions)
 | 
								err := addConditionAndDeletePod(evictionRest, testContext, "foo", rest.ValidateAllObjectFunc, deleteOptions)
 | 
				
			||||||
				if err == nil {
 | 
								if err == nil {
 | 
				
			||||||
					if tc.expectErr != "" {
 | 
									if tc.expectErr != "" {
 | 
				
			||||||
						t.Fatalf("expected err containing %q, got none", tc.expectErr)
 | 
										t.Fatalf("expected err containing %q, got none", tc.expectErr)
 | 
				
			||||||
					}
 | 
					 | 
				
			||||||
					return
 | 
					 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				if tc.expectErr == "" {
 | 
									return
 | 
				
			||||||
					t.Fatalf("unexpected err: %v", err)
 | 
								}
 | 
				
			||||||
				}
 | 
								if tc.expectErr == "" {
 | 
				
			||||||
				if !strings.Contains(err.Error(), tc.expectErr) {
 | 
									t.Fatalf("unexpected err: %v", err)
 | 
				
			||||||
					t.Fatalf("expected err containing %q, got %v", tc.expectErr, err)
 | 
								}
 | 
				
			||||||
				}
 | 
								if !strings.Contains(err.Error(), tc.expectErr) {
 | 
				
			||||||
			})
 | 
									t.Fatalf("expected err containing %q, got %v", tc.expectErr, err)
 | 
				
			||||||
		}
 | 
								}
 | 
				
			||||||
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1437,7 +1437,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
 | 
				
			|||||||
			pods:                []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
 | 
								pods:                []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
 | 
				
			||||||
			nodes:               []string{"node1"},
 | 
								nodes:               []string{"node1"},
 | 
				
			||||||
			nominatedNodeStatus: nil,
 | 
								nominatedNodeStatus: nil,
 | 
				
			||||||
			expected:            false,
 | 
								expected:            true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:                "Pod without nominated node",
 | 
								name:                "Pod without nominated node",
 | 
				
			||||||
@@ -1456,8 +1456,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
 | 
				
			|||||||
			expected:            false,
 | 
								expected:            false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "victim Pods terminating, feature PodDisruptionConditions is enabled",
 | 
								name: "victim Pods terminating",
 | 
				
			||||||
			fts:  feature.Features{EnablePodDisruptionConditions: true},
 | 
					 | 
				
			||||||
			pod:  st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
 | 
								pod:  st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
 | 
				
			||||||
			pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
 | 
								pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
 | 
				
			||||||
				Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
 | 
									Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
 | 
				
			||||||
@@ -1465,34 +1464,17 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
 | 
				
			|||||||
			expected: false,
 | 
								expected: false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:     "non-victim Pods terminating, feature PodDisruptionConditions is enabled",
 | 
								name:     "non-victim Pods terminating",
 | 
				
			||||||
			fts:      feature.Features{EnablePodDisruptionConditions: true},
 | 
					 | 
				
			||||||
			pod:      st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
 | 
								pod:      st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
 | 
				
			||||||
			pods:     []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
 | 
								pods:     []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
 | 
				
			||||||
			nodes:    []string{"node1"},
 | 
								nodes:    []string{"node1"},
 | 
				
			||||||
			expected: true,
 | 
								expected: true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			name: "victim Pods terminating, feature PodDisruptionConditions is disabled",
 | 
					 | 
				
			||||||
			fts:  feature.Features{EnablePodDisruptionConditions: false},
 | 
					 | 
				
			||||||
			pod:  st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
 | 
					 | 
				
			||||||
			pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
 | 
					 | 
				
			||||||
				Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
 | 
					 | 
				
			||||||
			nodes:    []string{"node1"},
 | 
					 | 
				
			||||||
			expected: false,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			name:     "non-victim Pods terminating, feature PodDisruptionConditions is disabled",
 | 
					 | 
				
			||||||
			fts:      feature.Features{EnablePodDisruptionConditions: false},
 | 
					 | 
				
			||||||
			pod:      st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
 | 
					 | 
				
			||||||
			pods:     []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
 | 
					 | 
				
			||||||
			nodes:    []string{"node1"},
 | 
					 | 
				
			||||||
			expected: false,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, test := range tests {
 | 
						for _, test := range tests {
 | 
				
			||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
 | 
								test.fts.EnablePodDisruptionConditions = true
 | 
				
			||||||
			logger, ctx := ktesting.NewTestContext(t)
 | 
								logger, ctx := ktesting.NewTestContext(t)
 | 
				
			||||||
			ctx, cancel := context.WithCancel(ctx)
 | 
								ctx, cancel := context.WithCancel(ctx)
 | 
				
			||||||
			defer cancel()
 | 
								defer cancel()
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,7 +31,6 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/fields"
 | 
						"k8s.io/apimachinery/pkg/fields"
 | 
				
			||||||
	kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
 | 
						kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
					 | 
				
			||||||
	kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
 | 
						kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/kubelet/eviction"
 | 
						"k8s.io/kubernetes/pkg/kubelet/eviction"
 | 
				
			||||||
	evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
 | 
						evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
 | 
				
			||||||
@@ -513,16 +512,13 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
 | 
				
			|||||||
		runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
 | 
							runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled", nodefeature.PodDisruptionConditions, func() {
 | 
						f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", nodefeature.PodDisruptionConditions, func() {
 | 
				
			||||||
		tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
 | 
							tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
 | 
				
			||||||
			pidsConsumed := int64(10000)
 | 
								pidsConsumed := int64(10000)
 | 
				
			||||||
			summary := eventuallyGetSummary(ctx)
 | 
								summary := eventuallyGetSummary(ctx)
 | 
				
			||||||
			availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
 | 
								availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
 | 
				
			||||||
			initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
 | 
								initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
 | 
				
			||||||
			initialConfig.EvictionMinimumReclaim = map[string]string{}
 | 
								initialConfig.EvictionMinimumReclaim = map[string]string{}
 | 
				
			||||||
			initialConfig.FeatureGates = map[string]bool{
 | 
					 | 
				
			||||||
				string(features.PodDisruptionConditions): true,
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		disruptionTarget := v1.DisruptionTarget
 | 
							disruptionTarget := v1.DisruptionTarget
 | 
				
			||||||
		specs := []podEvictSpec{
 | 
							specs := []podEvictSpec{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -83,7 +83,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	f.Context("graceful node shutdown when PodDisruptionConditions are enabled", nodefeature.PodDisruptionConditions, func() {
 | 
						f.Context("graceful node shutdown; baseline scenario to verify DisruptionTarget is added", nodefeature.PodDisruptionConditions, func() {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		const (
 | 
							const (
 | 
				
			||||||
			pollInterval            = 1 * time.Second
 | 
								pollInterval            = 1 * time.Second
 | 
				
			||||||
@@ -95,7 +95,6 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
 | 
				
			|||||||
		tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
 | 
							tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
 | 
				
			||||||
			initialConfig.FeatureGates = map[string]bool{
 | 
								initialConfig.FeatureGates = map[string]bool{
 | 
				
			||||||
				string(features.GracefulNodeShutdown):                   true,
 | 
									string(features.GracefulNodeShutdown):                   true,
 | 
				
			||||||
				string(features.PodDisruptionConditions):                true,
 | 
					 | 
				
			||||||
				string(features.GracefulNodeShutdownBasedOnPodPriority): false,
 | 
									string(features.GracefulNodeShutdownBasedOnPodPriority): false,
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			initialConfig.ShutdownGracePeriod = metav1.Duration{Duration: nodeShutdownGracePeriod}
 | 
								initialConfig.ShutdownGracePeriod = metav1.Duration{Duration: nodeShutdownGracePeriod}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -346,36 +346,22 @@ func TestEvictionVersions(t *testing.T) {
 | 
				
			|||||||
// TestEvictionWithFinalizers tests eviction with the use of finalizers
 | 
					// TestEvictionWithFinalizers tests eviction with the use of finalizers
 | 
				
			||||||
func TestEvictionWithFinalizers(t *testing.T) {
 | 
					func TestEvictionWithFinalizers(t *testing.T) {
 | 
				
			||||||
	cases := map[string]struct {
 | 
						cases := map[string]struct {
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
							phase                    v1.PodPhase
 | 
				
			||||||
		phase                         v1.PodPhase
 | 
							dryRun                   bool
 | 
				
			||||||
		dryRun                        bool
 | 
							wantDisruptionTargetCond bool
 | 
				
			||||||
		wantDisruptionTargetCond      bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"terminal pod with PodDisruptionConditions enabled": {
 | 
							"terminal pod": {
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
								phase:                    v1.PodSucceeded,
 | 
				
			||||||
			phase:                         v1.PodSucceeded,
 | 
								wantDisruptionTargetCond: true,
 | 
				
			||||||
			wantDisruptionTargetCond:      true,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"terminal pod with PodDisruptionConditions disabled": {
 | 
							"running pod": {
 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
								phase:                    v1.PodRunning,
 | 
				
			||||||
			phase:                         v1.PodSucceeded,
 | 
								wantDisruptionTargetCond: true,
 | 
				
			||||||
			wantDisruptionTargetCond:      false,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"running pod with PodDisruptionConditions enabled": {
 | 
							"running pod should not update conditions in dry-run mode": {
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
								phase:                    v1.PodRunning,
 | 
				
			||||||
			phase:                         v1.PodRunning,
 | 
								dryRun:                   true,
 | 
				
			||||||
			wantDisruptionTargetCond:      true,
 | 
								wantDisruptionTargetCond: false,
 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"running pod with PodDisruptionConditions disabled": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
					 | 
				
			||||||
			phase:                         v1.PodRunning,
 | 
					 | 
				
			||||||
			wantDisruptionTargetCond:      false,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"running pod with PodDisruptionConditions enabled should not update conditions in dry-run mode": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			phase:                         v1.PodRunning,
 | 
					 | 
				
			||||||
			dryRun:                        true,
 | 
					 | 
				
			||||||
			wantDisruptionTargetCond:      false,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for name, tc := range cases {
 | 
						for name, tc := range cases {
 | 
				
			||||||
@@ -386,7 +372,6 @@ func TestEvictionWithFinalizers(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-finalizers", t)
 | 
								ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-finalizers", t)
 | 
				
			||||||
			defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
 | 
								defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			defer tCtx.Cancel("test has completed")
 | 
								defer tCtx.Cancel("test has completed")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			informers.Start(tCtx.Done())
 | 
								informers.Start(tCtx.Done())
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -51,31 +51,21 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
 | 
				
			|||||||
	nodeIndex := 1 // the exact node doesn't matter, pick one
 | 
						nodeIndex := 1 // the exact node doesn't matter, pick one
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tests := map[string]struct {
 | 
						tests := map[string]struct {
 | 
				
			||||||
		enablePodDisruptionConditions          bool
 | 
					 | 
				
			||||||
		enableSeparateTaintEvictionController  bool
 | 
							enableSeparateTaintEvictionController  bool
 | 
				
			||||||
		startStandaloneTaintEvictionController bool
 | 
							startStandaloneTaintEvictionController bool
 | 
				
			||||||
		wantPodEvicted                         bool
 | 
							wantPodEvicted                         bool
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"Test eviction for NoExecute taint added by user; pod condition added when PodDisruptionConditions enabled; separate taint eviction controller disabled": {
 | 
							"Test eviction for NoExecute taint added by user; pod condition added; separate taint eviction controller disabled": {
 | 
				
			||||||
			enablePodDisruptionConditions:          true,
 | 
					 | 
				
			||||||
			enableSeparateTaintEvictionController:  false,
 | 
					 | 
				
			||||||
			startStandaloneTaintEvictionController: false,
 | 
					 | 
				
			||||||
			wantPodEvicted:                         true,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"Test eviction for NoExecute taint added by user; no pod condition added when PodDisruptionConditions disabled; separate taint eviction controller disabled": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions:          false,
 | 
					 | 
				
			||||||
			enableSeparateTaintEvictionController:  false,
 | 
								enableSeparateTaintEvictionController:  false,
 | 
				
			||||||
			startStandaloneTaintEvictionController: false,
 | 
								startStandaloneTaintEvictionController: false,
 | 
				
			||||||
			wantPodEvicted:                         true,
 | 
								wantPodEvicted:                         true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled but not started": {
 | 
							"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled but not started": {
 | 
				
			||||||
			enablePodDisruptionConditions:          false,
 | 
					 | 
				
			||||||
			enableSeparateTaintEvictionController:  true,
 | 
								enableSeparateTaintEvictionController:  true,
 | 
				
			||||||
			startStandaloneTaintEvictionController: false,
 | 
								startStandaloneTaintEvictionController: false,
 | 
				
			||||||
			wantPodEvicted:                         false,
 | 
								wantPodEvicted:                         false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled and started": {
 | 
							"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled and started": {
 | 
				
			||||||
			enablePodDisruptionConditions:          false,
 | 
					 | 
				
			||||||
			enableSeparateTaintEvictionController:  true,
 | 
								enableSeparateTaintEvictionController:  true,
 | 
				
			||||||
			startStandaloneTaintEvictionController: true,
 | 
								startStandaloneTaintEvictionController: true,
 | 
				
			||||||
			wantPodEvicted:                         true,
 | 
								wantPodEvicted:                         true,
 | 
				
			||||||
@@ -124,7 +114,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SeparateTaintEvictionController, test.enableSeparateTaintEvictionController)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SeparateTaintEvictionController, test.enableSeparateTaintEvictionController)
 | 
				
			||||||
			testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
 | 
								testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
 | 
				
			||||||
			cs := testCtx.ClientSet
 | 
								cs := testCtx.ClientSet
 | 
				
			||||||
@@ -202,9 +191,9 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
 | 
				
			|||||||
				t.Fatalf("Test Failed: error: %q, while getting updated pod", err)
 | 
									t.Fatalf("Test Failed: error: %q, while getting updated pod", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			_, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget)
 | 
								_, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget)
 | 
				
			||||||
			if test.enablePodDisruptionConditions && cond == nil {
 | 
								if test.wantPodEvicted && cond == nil {
 | 
				
			||||||
				t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
 | 
									t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
 | 
				
			||||||
			} else if !test.enablePodDisruptionConditions && cond != nil {
 | 
								} else if !test.wantPodEvicted && cond != nil {
 | 
				
			||||||
				t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
 | 
									t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -40,16 +40,14 @@ import (
 | 
				
			|||||||
// TestPodGcOrphanedPodsWithFinalizer tests deletion of orphaned pods
 | 
					// TestPodGcOrphanedPodsWithFinalizer tests deletion of orphaned pods
 | 
				
			||||||
func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
 | 
					func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
 | 
				
			||||||
	tests := map[string]struct {
 | 
						tests := map[string]struct {
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
		enableJobPodReplacementPolicy bool
 | 
							enableJobPodReplacementPolicy bool
 | 
				
			||||||
		phase                         v1.PodPhase
 | 
							phase                         v1.PodPhase
 | 
				
			||||||
		wantPhase                     v1.PodPhase
 | 
							wantPhase                     v1.PodPhase
 | 
				
			||||||
		wantDisruptionTarget          *v1.PodCondition
 | 
							wantDisruptionTarget          *v1.PodCondition
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"PodDisruptionConditions enabled": {
 | 
							"pending pod": {
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
								phase:     v1.PodPending,
 | 
				
			||||||
			phase:                         v1.PodPending,
 | 
								wantPhase: v1.PodFailed,
 | 
				
			||||||
			wantPhase:                     v1.PodFailed,
 | 
					 | 
				
			||||||
			wantDisruptionTarget: &v1.PodCondition{
 | 
								wantDisruptionTarget: &v1.PodCondition{
 | 
				
			||||||
				Type:    v1.DisruptionTarget,
 | 
									Type:    v1.DisruptionTarget,
 | 
				
			||||||
				Status:  v1.ConditionTrue,
 | 
									Status:  v1.ConditionTrue,
 | 
				
			||||||
@@ -57,8 +55,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
 | 
				
			|||||||
				Message: "PodGC: node no longer exists",
 | 
									Message: "PodGC: node no longer exists",
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"PodDisruptionConditions and PodReplacementPolicy enabled": {
 | 
							"pending pod; PodReplacementPolicy enabled": {
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			enableJobPodReplacementPolicy: true,
 | 
								enableJobPodReplacementPolicy: true,
 | 
				
			||||||
			phase:                         v1.PodPending,
 | 
								phase:                         v1.PodPending,
 | 
				
			||||||
			wantPhase:                     v1.PodFailed,
 | 
								wantPhase:                     v1.PodFailed,
 | 
				
			||||||
@@ -69,32 +66,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
 | 
				
			|||||||
				Message: "PodGC: node no longer exists",
 | 
									Message: "PodGC: node no longer exists",
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"Only PodReplacementPolicy enabled; no PodDisruptionCondition": {
 | 
							"succeeded pod": {
 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
								phase:     v1.PodSucceeded,
 | 
				
			||||||
			enableJobPodReplacementPolicy: true,
 | 
								wantPhase: v1.PodSucceeded,
 | 
				
			||||||
			phase:                         v1.PodPending,
 | 
					 | 
				
			||||||
			wantPhase:                     v1.PodFailed,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"PodDisruptionConditions disabled": {
 | 
							"failed pod": {
 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
								phase:     v1.PodFailed,
 | 
				
			||||||
			phase:                         v1.PodPending,
 | 
								wantPhase: v1.PodFailed,
 | 
				
			||||||
			wantPhase:                     v1.PodPending,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"PodDisruptionConditions enabled; succeeded pod": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			phase:                         v1.PodSucceeded,
 | 
					 | 
				
			||||||
			wantPhase:                     v1.PodSucceeded,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"PodDisruptionConditions enabled; failed pod": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			phase:                         v1.PodFailed,
 | 
					 | 
				
			||||||
			wantPhase:                     v1.PodFailed,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for name, test := range tests {
 | 
						for name, test := range tests {
 | 
				
			||||||
		t.Run(name, func(t *testing.T) {
 | 
							t.Run(name, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
 | 
				
			||||||
			testCtx := setup(t, "podgc-orphaned")
 | 
								testCtx := setup(t, "podgc-orphaned")
 | 
				
			||||||
			cs := testCtx.ClientSet
 | 
								cs := testCtx.ClientSet
 | 
				
			||||||
@@ -170,31 +153,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
 | 
				
			|||||||
// TestTerminatingOnOutOfServiceNode tests deletion pods terminating on out-of-service nodes
 | 
					// TestTerminatingOnOutOfServiceNode tests deletion pods terminating on out-of-service nodes
 | 
				
			||||||
func TestTerminatingOnOutOfServiceNode(t *testing.T) {
 | 
					func TestTerminatingOnOutOfServiceNode(t *testing.T) {
 | 
				
			||||||
	tests := map[string]struct {
 | 
						tests := map[string]struct {
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
		enableJobPodReplacementPolicy bool
 | 
							enableJobPodReplacementPolicy bool
 | 
				
			||||||
		withFinalizer                 bool
 | 
							withFinalizer                 bool
 | 
				
			||||||
		wantPhase                     v1.PodPhase
 | 
							wantPhase                     v1.PodPhase
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		"pod has phase changed to Failed when PodDisruptionConditions enabled": {
 | 
							"pod has phase changed to Failed": {
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
								withFinalizer: true,
 | 
				
			||||||
			withFinalizer:                 true,
 | 
								wantPhase:     v1.PodFailed,
 | 
				
			||||||
			wantPhase:                     v1.PodFailed,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"pod has phase unchanged when PodDisruptionConditions disabled": {
 | 
							"pod is getting deleted when no finalizer": {
 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
								withFinalizer: false,
 | 
				
			||||||
			withFinalizer:                 true,
 | 
					 | 
				
			||||||
			wantPhase:                     v1.PodPending,
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		"pod is getting deleted when no finalizer and PodDisruptionConditions enabled": {
 | 
							"pod has phase changed when JobPodReplacementPolicy enabled": {
 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
			withFinalizer:                 false,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"pod is getting deleted when no finalizer and PodDisruptionConditions disabled": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
					 | 
				
			||||||
			withFinalizer:                 false,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		"pod has phase changed when PodDisruptionConditions disabled, but JobPodReplacementPolicy enabled": {
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: false,
 | 
					 | 
				
			||||||
			enableJobPodReplacementPolicy: true,
 | 
								enableJobPodReplacementPolicy: true,
 | 
				
			||||||
			withFinalizer:                 true,
 | 
								withFinalizer:                 true,
 | 
				
			||||||
			wantPhase:                     v1.PodFailed,
 | 
								wantPhase:                     v1.PodFailed,
 | 
				
			||||||
@@ -203,7 +173,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for name, test := range tests {
 | 
						for name, test := range tests {
 | 
				
			||||||
		t.Run(name, func(t *testing.T) {
 | 
							t.Run(name, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
 | 
								featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
 | 
				
			||||||
			testCtx := setup(t, "podgc-out-of-service")
 | 
								testCtx := setup(t, "podgc-out-of-service")
 | 
				
			||||||
@@ -385,7 +354,6 @@ func TestPodGcForPodsWithDuplicatedFieldKeys(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for name, test := range tests {
 | 
						for name, test := range tests {
 | 
				
			||||||
		t.Run(name, func(t *testing.T) {
 | 
							t.Run(name, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
 | 
					 | 
				
			||||||
			testCtx := setup(t, "podgc-orphaned")
 | 
								testCtx := setup(t, "podgc-orphaned")
 | 
				
			||||||
			cs := testCtx.ClientSet
 | 
								cs := testCtx.ClientSet
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -33,17 +33,14 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/types"
 | 
						"k8s.io/apimachinery/pkg/types"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/intstr"
 | 
						"k8s.io/apimachinery/pkg/util/intstr"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	"k8s.io/apiserver/pkg/util/feature"
 | 
					 | 
				
			||||||
	"k8s.io/client-go/informers"
 | 
						"k8s.io/client-go/informers"
 | 
				
			||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	restclient "k8s.io/client-go/rest"
 | 
						restclient "k8s.io/client-go/rest"
 | 
				
			||||||
	featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
					 | 
				
			||||||
	"k8s.io/component-helpers/storage/volume"
 | 
						"k8s.io/component-helpers/storage/volume"
 | 
				
			||||||
	"k8s.io/klog/v2"
 | 
						"k8s.io/klog/v2"
 | 
				
			||||||
	configv1 "k8s.io/kube-scheduler/config/v1"
 | 
						configv1 "k8s.io/kube-scheduler/config/v1"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/scheduling"
 | 
						"k8s.io/kubernetes/pkg/apis/scheduling"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler"
 | 
						"k8s.io/kubernetes/pkg/scheduler"
 | 
				
			||||||
	configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
 | 
						configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/framework"
 | 
						"k8s.io/kubernetes/pkg/scheduler/framework"
 | 
				
			||||||
@@ -200,41 +197,14 @@ func TestPreemption(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	maxTokens := 1000
 | 
						maxTokens := 1000
 | 
				
			||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		name                          string
 | 
							name                string
 | 
				
			||||||
		existingPods                  []*v1.Pod
 | 
							existingPods        []*v1.Pod
 | 
				
			||||||
		pod                           *v1.Pod
 | 
							pod                 *v1.Pod
 | 
				
			||||||
		initTokens                    int
 | 
							initTokens          int
 | 
				
			||||||
		enablePreFilter               bool
 | 
							enablePreFilter     bool
 | 
				
			||||||
		unresolvable                  bool
 | 
							unresolvable        bool
 | 
				
			||||||
		preemptedPodIndexes           map[int]struct{}
 | 
							preemptedPodIndexes map[int]struct{}
 | 
				
			||||||
		enablePodDisruptionConditions bool
 | 
					 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			name:       "basic pod preemption with PodDisruptionConditions enabled",
 | 
					 | 
				
			||||||
			initTokens: maxTokens,
 | 
					 | 
				
			||||||
			existingPods: []*v1.Pod{
 | 
					 | 
				
			||||||
				initPausePod(&testutils.PausePodConfig{
 | 
					 | 
				
			||||||
					Name:      "victim-pod",
 | 
					 | 
				
			||||||
					Namespace: testCtx.NS.Name,
 | 
					 | 
				
			||||||
					Priority:  &lowPriority,
 | 
					 | 
				
			||||||
					Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
 | 
					 | 
				
			||||||
						v1.ResourceCPU:    *resource.NewMilliQuantity(400, resource.DecimalSI),
 | 
					 | 
				
			||||||
						v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				}),
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			pod: initPausePod(&testutils.PausePodConfig{
 | 
					 | 
				
			||||||
				Name:      "preemptor-pod",
 | 
					 | 
				
			||||||
				Namespace: testCtx.NS.Name,
 | 
					 | 
				
			||||||
				Priority:  &highPriority,
 | 
					 | 
				
			||||||
				Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
 | 
					 | 
				
			||||||
					v1.ResourceCPU:    *resource.NewMilliQuantity(300, resource.DecimalSI),
 | 
					 | 
				
			||||||
					v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			}),
 | 
					 | 
				
			||||||
			preemptedPodIndexes:           map[int]struct{}{0: {}},
 | 
					 | 
				
			||||||
			enablePodDisruptionConditions: true,
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:       "basic pod preemption",
 | 
								name:       "basic pod preemption",
 | 
				
			||||||
			initTokens: maxTokens,
 | 
								initTokens: maxTokens,
 | 
				
			||||||
@@ -484,7 +454,6 @@ func TestPreemption(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for _, test := range tests {
 | 
						for _, test := range tests {
 | 
				
			||||||
		t.Run(test.name, func(t *testing.T) {
 | 
							t.Run(test.name, func(t *testing.T) {
 | 
				
			||||||
			featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
 | 
					 | 
				
			||||||
			filter.Tokens = test.initTokens
 | 
								filter.Tokens = test.initTokens
 | 
				
			||||||
			filter.EnablePreFilter = test.enablePreFilter
 | 
								filter.EnablePreFilter = test.enablePreFilter
 | 
				
			||||||
			filter.Unresolvable = test.unresolvable
 | 
								filter.Unresolvable = test.unresolvable
 | 
				
			||||||
@@ -513,10 +482,8 @@ func TestPreemption(t *testing.T) {
 | 
				
			|||||||
						t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name)
 | 
											t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name)
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
					_, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
 | 
										_, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
 | 
				
			||||||
					if test.enablePodDisruptionConditions && cond == nil {
 | 
										if cond == nil {
 | 
				
			||||||
						t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
 | 
											t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
 | 
				
			||||||
					} else if test.enablePodDisruptionConditions == false && cond != nil {
 | 
					 | 
				
			||||||
						t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
 | 
					 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				} else {
 | 
									} else {
 | 
				
			||||||
					if p.DeletionTimestamp != nil {
 | 
										if p.DeletionTimestamp != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user