mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-03 03:38:15 +00:00
Add pod disruption conditions for kubelet initiated failures
This commit is contained in:
@@ -34,11 +34,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeconfigmap "k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
@@ -1400,22 +1403,204 @@ func deleteAction() core.DeleteAction {
|
||||
|
||||
func TestMergePodStatus(t *testing.T) {
|
||||
useCases := []struct {
|
||||
desc string
|
||||
hasRunningContainers bool
|
||||
oldPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||
newPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||
expectPodStatus v1.PodStatus
|
||||
desc string
|
||||
enablePodDisruptionConditions bool
|
||||
hasRunningContainers bool
|
||||
oldPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||
newPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||
expectPodStatus v1.PodStatus
|
||||
}{
|
||||
{
|
||||
"no change",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
getPodStatus(),
|
||||
},
|
||||
{
|
||||
"add DisruptionTarget condition when transitioning into failed phase; PodDisruptionConditions enabled",
|
||||
true,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Phase = v1.PodFailed
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
})
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
},
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "PodFailed",
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "PodFailed",
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"don't add DisruptionTarget condition when transitioning into failed phase, but there are might still be running containers; PodDisruptionConditions enabled",
|
||||
true,
|
||||
true,
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Phase = v1.PodFailed
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
})
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"preserve DisruptionTarget condition; PodDisruptionConditions enabled",
|
||||
true,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
})
|
||||
return input
|
||||
},
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"preserve DisruptionTarget condition; PodDisruptionConditions disabled",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
})
|
||||
return input
|
||||
},
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"override DisruptionTarget condition; PodDisruptionConditions enabled",
|
||||
true,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "EvictedByEvictionAPI",
|
||||
})
|
||||
return input
|
||||
},
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
})
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.AlphaNoCompatGuaranteeDisruptionTarget,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "TerminationByKubelet",
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"readiness changes",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions[0].Status = v1.ConditionFalse
|
||||
@@ -1439,6 +1624,7 @@ func TestMergePodStatus(t *testing.T) {
|
||||
{
|
||||
"additional pod condition",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
@@ -1469,6 +1655,7 @@ func TestMergePodStatus(t *testing.T) {
|
||||
{
|
||||
"additional pod condition and readiness changes",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
@@ -1502,6 +1689,7 @@ func TestMergePodStatus(t *testing.T) {
|
||||
{
|
||||
"additional pod condition changes",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
@@ -1538,6 +1726,7 @@ func TestMergePodStatus(t *testing.T) {
|
||||
{
|
||||
"phase is transitioning to failed and no containers running",
|
||||
false,
|
||||
false,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Phase = v1.PodRunning
|
||||
input.Reason = "Unknown"
|
||||
@@ -1556,10 +1745,12 @@ func TestMergePodStatus(t *testing.T) {
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "PodFailed",
|
||||
},
|
||||
{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "PodFailed",
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
@@ -1572,6 +1763,7 @@ func TestMergePodStatus(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"phase is transitioning to failed and containers running",
|
||||
false,
|
||||
true,
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Phase = v1.PodRunning
|
||||
@@ -1605,6 +1797,7 @@ func TestMergePodStatus(t *testing.T) {
|
||||
|
||||
for _, tc := range useCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)()
|
||||
output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers)
|
||||
if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) {
|
||||
t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output))
|
||||
@@ -1630,7 +1823,7 @@ func conditionsEqual(left, right []v1.PodCondition) bool {
|
||||
for _, r := range right {
|
||||
if l.Type == r.Type {
|
||||
found = true
|
||||
if l.Status != r.Status {
|
||||
if l.Status != r.Status || l.Reason != r.Reason {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user