Fixing CurrentReplicas and CurrentRevision in completeRollingUpdate

This commit is contained in:
adil ghaffar
2023-09-18 09:42:46 +03:00
committed by muhammad adil ghaffar
parent 0241da314e
commit 00c21ced3a
3 changed files with 329 additions and 2 deletions

View File

@@ -43,6 +43,7 @@ import (
"k8s.io/kubernetes/pkg/controlplane"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/utils/ptr"
)
const (
@@ -489,6 +490,146 @@ func TestAutodeleteOwnerRefs(t *testing.T) {
}
}
func TestDeletingPodForRollingUpdatePartition(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
closeFn, rm, informers, c := scSetup(ctx, t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(c, "test-deleting-pod-for-rolling-update-partition", t)
defer framework.DeleteNamespaceOrDie(c, ns, t)
cancel := runControllerAndInformers(rm, informers)
defer cancel()
labelMap := labelMap()
sts := newSTS("sts", ns.Name, 2)
sts.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
return &appsv1.RollingUpdateStatefulSetStrategy{
Partition: ptr.To[int32](1),
}
}(),
}
stss, _ := createSTSsPods(t, c, []*appsv1.StatefulSet{sts}, []*v1.Pod{})
sts = stss[0]
waitSTSStable(t, c, sts)
// Verify STS creates 2 pods
podClient := c.CoreV1().Pods(ns.Name)
pods := getPods(t, podClient, labelMap)
if len(pods.Items) != 2 {
t.Fatalf("len(pods) = %d, want 2", len(pods.Items))
}
// Setting all pods in Running, Ready, and Available
setPodsReadyCondition(t, c, &v1.PodList{Items: pods.Items}, v1.ConditionTrue, time.Now())
// 1. Roll out a new image.
oldImage := sts.Spec.Template.Spec.Containers[0].Image
newImage := "new-image"
if oldImage == newImage {
t.Fatalf("bad test setup, statefulSet %s roll out with the same image", sts.Name)
}
// Set finalizers for the pod-0 to trigger pod recreation failure while the status UpdateRevision is bumped
pod0 := &pods.Items[0]
updatePod(t, podClient, pod0.Name, func(pod *v1.Pod) {
pod.Finalizers = []string{"fake.example.com/blockDeletion"}
})
stsClient := c.AppsV1().StatefulSets(ns.Name)
_ = updateSTS(t, stsClient, sts.Name, func(sts *appsv1.StatefulSet) {
sts.Spec.Template.Spec.Containers[0].Image = newImage
})
// Await for the pod-1 to be recreated, while pod-0 remains running
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
pods := getPods(t, podClient, labelMap)
recreatedPods := v1.PodList{}
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodPending {
recreatedPods.Items = append(recreatedPods.Items, pod)
}
}
setPodsReadyCondition(t, c, &v1.PodList{Items: recreatedPods.Items}, v1.ConditionTrue, time.Now())
return ss.Status.UpdatedReplicas == *ss.Spec.Replicas-*sts.Spec.UpdateStrategy.RollingUpdate.Partition && ss.Status.Replicas == *ss.Spec.Replicas && ss.Status.ReadyReplicas == *ss.Spec.Replicas, nil
}); err != nil {
t.Fatalf("failed to await for pod-1 to be recreated by sts %s: %v", sts.Name, err)
}
// Mark pod-0 as terminal and not ready
updatePodStatus(t, podClient, pod0.Name, func(pod *v1.Pod) {
pod.Status.Phase = v1.PodFailed
})
// Make sure pod-0 gets deletion timestamp so that it is recreated
if err := c.CoreV1().Pods(ns.Name).Delete(context.TODO(), pod0.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("error deleting pod %s: %v", pod0.Name, err)
}
// Await for pod-0 to be not ready
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return ss.Status.ReadyReplicas == *ss.Spec.Replicas-1, nil
}); err != nil {
t.Fatalf("failed to await for pod-0 to be not counted as ready in status of sts %s: %v", sts.Name, err)
}
// Remove the finalizer to allow recreation
updatePod(t, podClient, pod0.Name, func(pod *v1.Pod) {
pod.Finalizers = []string{}
})
// Await for pod-0 to be recreated and make it running
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
pods := getPods(t, podClient, labelMap)
recreatedPods := v1.PodList{}
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodPending {
recreatedPods.Items = append(recreatedPods.Items, pod)
}
}
setPodsReadyCondition(t, c, &v1.PodList{Items: recreatedPods.Items}, v1.ConditionTrue, time.Now().Add(-120*time.Minute))
return len(recreatedPods.Items) > 0, nil
}); err != nil {
t.Fatalf("failed to await for pod-0 to be recreated by sts %s: %v", sts.Name, err)
}
// Await for all stateful set status to record all replicas as ready
if err := wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) {
ss, err := stsClient.Get(ctx, sts.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return ss.Status.ReadyReplicas == *ss.Spec.Replicas, nil
}); err != nil {
t.Fatalf("failed to verify .Spec.Template.Spec.Containers[0].Image is updated for sts %s: %v", sts.Name, err)
}
// Verify 3 pods exist
pods = getPods(t, podClient, labelMap)
if len(pods.Items) != int(*sts.Spec.Replicas) {
t.Fatalf("Unexpected number of pods")
}
// Verify pod images
for i := range pods.Items {
if i < int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition) {
if pods.Items[i].Spec.Containers[0].Image != oldImage {
t.Fatalf("Pod %s has image %s not equal to old image %s", pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, oldImage)
}
} else {
if pods.Items[i].Spec.Containers[0].Image != newImage {
t.Fatalf("Pod %s has image %s not equal to new image %s", pods.Items[i].Name, pods.Items[i].Spec.Containers[0].Image, newImage)
}
}
}
}
func TestStatefulSetStartOrdinal(t *testing.T) {
tests := []struct {
ordinals *appsv1.StatefulSetOrdinals