mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #129489 from elizabeth-dev/replace-network-e2e-replicationcontrollers
test(network): replace jig.CreateRC with jig.CreateDeployment
This commit is contained in:
		@@ -737,9 +737,10 @@ func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	ns := f.Namespace.Name
 | 
			
		||||
	c := f.ClientSet
 | 
			
		||||
	// Create webserver pods.
 | 
			
		||||
	deploymentPodLabels := map[string]string{"name": "sample-pod"}
 | 
			
		||||
	podName := "sample-pod"
 | 
			
		||||
	deploymentPodLabels := map[string]string{"name": podName}
 | 
			
		||||
	rsPodLabels := map[string]string{
 | 
			
		||||
		"name": "sample-pod",
 | 
			
		||||
		"name": podName,
 | 
			
		||||
		"pod":  WebserverImageName,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -754,7 +755,13 @@ func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
	// Verify that the required pods have come up.
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx,
 | 
			
		||||
		c,
 | 
			
		||||
		ns,
 | 
			
		||||
		podName,
 | 
			
		||||
		labels.SelectorFromSet(map[string]string{"name": podName}),
 | 
			
		||||
		false,
 | 
			
		||||
		replicas)
 | 
			
		||||
	framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
 | 
			
		||||
 | 
			
		||||
	// Create a deployment to delete webserver pods and instead bring up agnhost pods.
 | 
			
		||||
@@ -820,9 +827,10 @@ func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	ns := f.Namespace.Name
 | 
			
		||||
	c := f.ClientSet
 | 
			
		||||
	// Create webserver pods.
 | 
			
		||||
	deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
 | 
			
		||||
	podName := "cleanup-pod"
 | 
			
		||||
	deploymentPodLabels := map[string]string{"name": podName}
 | 
			
		||||
	rsPodLabels := map[string]string{
 | 
			
		||||
		"name": "cleanup-pod",
 | 
			
		||||
		"name": podName,
 | 
			
		||||
		"pod":  WebserverImageName,
 | 
			
		||||
	}
 | 
			
		||||
	rsName := "test-cleanup-controller"
 | 
			
		||||
@@ -832,7 +840,13 @@ func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Verify that the required pods have come up.
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, "cleanup-pod", false, replicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx,
 | 
			
		||||
		c,
 | 
			
		||||
		ns,
 | 
			
		||||
		podName,
 | 
			
		||||
		labels.SelectorFromSet(map[string]string{"name": podName}),
 | 
			
		||||
		false,
 | 
			
		||||
		replicas)
 | 
			
		||||
	framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
 | 
			
		||||
 | 
			
		||||
	// Create a deployment to delete webserver pods and instead bring up agnhost pods.
 | 
			
		||||
@@ -903,7 +917,13 @@ func testRolloverDeployment(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	_, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
	// Verify that the required pods have come up.
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, false, rsReplicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx,
 | 
			
		||||
		c,
 | 
			
		||||
		ns,
 | 
			
		||||
		podName,
 | 
			
		||||
		labels.SelectorFromSet(map[string]string{"name": podName}),
 | 
			
		||||
		false,
 | 
			
		||||
		rsReplicas)
 | 
			
		||||
	framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
 | 
			
		||||
 | 
			
		||||
	// Wait for replica set to become ready before adopting it.
 | 
			
		||||
@@ -1202,7 +1222,7 @@ func testProportionalScalingDeployment(ctx context.Context, f *framework.Framewo
 | 
			
		||||
 | 
			
		||||
	// Verify that the required pods have come up.
 | 
			
		||||
	framework.Logf("Waiting for all required pods to come up")
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, labels.SelectorFromSet(podLabels), false, *(deployment.Spec.Replicas))
 | 
			
		||||
	framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
 | 
			
		||||
 | 
			
		||||
	framework.Logf("Waiting for deployment %q to complete", deployment.Name)
 | 
			
		||||
 
 | 
			
		||||
@@ -494,7 +494,7 @@ var _ = SIGDescribe("DisruptionController", func() {
 | 
			
		||||
				waitForPdbToObserveHealthyPods(ctx, cs, ns, replicas, replicas-1)
 | 
			
		||||
			} else {
 | 
			
		||||
				ginkgo.By("Wait for pods to be running and not ready")
 | 
			
		||||
				err := e2epod.VerifyPodsRunning(ctx, cs, ns, rsName, false, replicas)
 | 
			
		||||
				err := e2epod.VerifyPodsRunning(ctx, cs, ns, rsName, labels.SelectorFromSet(rs.Labels), false, replicas)
 | 
			
		||||
				framework.ExpectNoError(err)
 | 
			
		||||
				waitForPdbToObserveHealthyPods(ctx, cs, ns, 0, replicas-1)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -28,6 +28,7 @@ import (
 | 
			
		||||
	apierrors "k8s.io/apimachinery/pkg/api/errors"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/runtime/schema"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	utilrand "k8s.io/apimachinery/pkg/util/rand"
 | 
			
		||||
@@ -485,6 +486,7 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
 | 
			
		||||
// The image serves its hostname which is checked for each replica.
 | 
			
		||||
func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
 | 
			
		||||
	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
 | 
			
		||||
	rcLabels := map[string]string{"name": name}
 | 
			
		||||
	replicas := int32(1)
 | 
			
		||||
 | 
			
		||||
	// Create a replication controller for a service
 | 
			
		||||
@@ -492,14 +494,14 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
 | 
			
		||||
	// The source for the Docker container kubernetes/serve_hostname is
 | 
			
		||||
	// in contrib/for-demos/serve_hostname
 | 
			
		||||
	ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
 | 
			
		||||
	newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
 | 
			
		||||
	newRC := newRC(name, replicas, rcLabels, name, image, []string{"serve-hostname"})
 | 
			
		||||
	newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
 | 
			
		||||
	_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, newRC, metav1.CreateOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Check that pods for the new RC were created.
 | 
			
		||||
	// TODO: Maybe switch PodsCreated to just check owner references.
 | 
			
		||||
	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
 | 
			
		||||
	pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, name, replicas, labels.SelectorFromSet(rcLabels))
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Wait for the pods to enter the running state and are Ready. Waiting loops until the pods
 | 
			
		||||
@@ -529,7 +531,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
 | 
			
		||||
 | 
			
		||||
	// Verify that something is listening.
 | 
			
		||||
	framework.Logf("Trying to dial the pod")
 | 
			
		||||
	framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
 | 
			
		||||
	framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, labels.SelectorFromSet(rcLabels), true, 2*time.Minute, pods))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// 1. Create a quota restricting pods in the current namespace to 2.
 | 
			
		||||
@@ -666,15 +668,16 @@ func testRCAdoptMatchingOrphans(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
 | 
			
		||||
func testRCReleaseControlledNotMatching(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	name := "pod-release"
 | 
			
		||||
	rcLabels := map[string]string{"name": name}
 | 
			
		||||
	ginkgo.By("Given a ReplicationController is created")
 | 
			
		||||
	replicas := int32(1)
 | 
			
		||||
	rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
 | 
			
		||||
	rcSt.Spec.Selector = map[string]string{"name": name}
 | 
			
		||||
	rcSt := newRC(name, replicas, rcLabels, name, WebserverImage, nil)
 | 
			
		||||
	rcSt.Spec.Selector = rcLabels
 | 
			
		||||
	rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("When the matched label of one of its pods change")
 | 
			
		||||
	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas)
 | 
			
		||||
	pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas, labels.SelectorFromSet(rcLabels))
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	p := pods.Items[0]
 | 
			
		||||
 
 | 
			
		||||
@@ -183,20 +183,21 @@ var _ = SIGDescribe("ReplicaSet", func() {
 | 
			
		||||
// image serves its hostname which is checked for each replica.
 | 
			
		||||
func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
 | 
			
		||||
	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
 | 
			
		||||
	rsLabels := map[string]string{"name": name}
 | 
			
		||||
	replicas := int32(1)
 | 
			
		||||
 | 
			
		||||
	// Create a ReplicaSet for a service that serves its hostname.
 | 
			
		||||
	// The source for the Docker container kubernetes/serve_hostname is
 | 
			
		||||
	// in contrib/for-demos/serve_hostname
 | 
			
		||||
	framework.Logf("Creating ReplicaSet %s", name)
 | 
			
		||||
	newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
 | 
			
		||||
	newRS := newRS(name, replicas, rsLabels, name, image, []string{"serve-hostname"})
 | 
			
		||||
	newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
 | 
			
		||||
	_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, newRS, metav1.CreateOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Check that pods for the new RS were created.
 | 
			
		||||
	// TODO: Maybe switch PodsCreated to just check owner references.
 | 
			
		||||
	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
 | 
			
		||||
	pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, name, replicas, labels.SelectorFromSet(rsLabels))
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Wait for the pods to enter the running state. Waiting loops until the pods
 | 
			
		||||
@@ -226,7 +227,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
 | 
			
		||||
 | 
			
		||||
	// Verify that something is listening.
 | 
			
		||||
	framework.Logf("Trying to dial the pod")
 | 
			
		||||
	framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
 | 
			
		||||
	framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, labels.SelectorFromSet(rsLabels), true, 2*time.Minute, pods))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// 1. Create a quota restricting pods in the current namespace to 2.
 | 
			
		||||
@@ -317,13 +318,12 @@ func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
 | 
			
		||||
func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	name := "pod-adoption-release"
 | 
			
		||||
	rsLabels := map[string]string{"name": name}
 | 
			
		||||
	ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
 | 
			
		||||
	p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
			Labels: map[string]string{
 | 
			
		||||
				"name": name,
 | 
			
		||||
			},
 | 
			
		||||
			Name:   name,
 | 
			
		||||
			Labels: rsLabels,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.PodSpec{
 | 
			
		||||
			Containers: []v1.Container{
 | 
			
		||||
@@ -337,8 +337,8 @@ func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("When a replicaset with a matching selector is created")
 | 
			
		||||
	replicas := int32(1)
 | 
			
		||||
	rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
 | 
			
		||||
	rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
 | 
			
		||||
	rsSt := newRS(name, replicas, rsLabels, name, WebserverImage, nil)
 | 
			
		||||
	rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: rsLabels}
 | 
			
		||||
	rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, rsSt, metav1.CreateOptions{})
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
@@ -362,7 +362,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("When the matched label of one of its pods change")
 | 
			
		||||
	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas)
 | 
			
		||||
	pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas, labels.SelectorFromSet(rsLabels))
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	p = &pods.Items[0]
 | 
			
		||||
@@ -403,8 +403,9 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	c := f.ClientSet
 | 
			
		||||
 | 
			
		||||
	// Create webserver pods.
 | 
			
		||||
	podName := "sample-pod"
 | 
			
		||||
	rsPodLabels := map[string]string{
 | 
			
		||||
		"name": "sample-pod",
 | 
			
		||||
		"name": podName,
 | 
			
		||||
		"pod":  WebserverImageName,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -416,7 +417,7 @@ func testRSScaleSubresources(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Verify that the required pods have come up.
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
 | 
			
		||||
	framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("getting scale subresource")
 | 
			
		||||
@@ -468,8 +469,9 @@ func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	zero := int64(0)
 | 
			
		||||
 | 
			
		||||
	// Create webserver pods.
 | 
			
		||||
	podName := "sample-pod"
 | 
			
		||||
	rsPodLabels := map[string]string{
 | 
			
		||||
		"name": "sample-pod",
 | 
			
		||||
		"name": podName,
 | 
			
		||||
		"pod":  WebserverImageName,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -494,7 +496,7 @@ func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Verify that the required pods have come up.
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
 | 
			
		||||
	framework.ExpectNoError(err, "Failed to create pods: %s", err)
 | 
			
		||||
 | 
			
		||||
	// Scale the ReplicaSet
 | 
			
		||||
@@ -564,8 +566,9 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	e2eValue := rand.String(5)
 | 
			
		||||
 | 
			
		||||
	// Define ReplicaSet Labels
 | 
			
		||||
	podName := "sample-pod"
 | 
			
		||||
	rsPodLabels := map[string]string{
 | 
			
		||||
		"name": "sample-pod",
 | 
			
		||||
		"name": podName,
 | 
			
		||||
		"pod":  WebserverImageName,
 | 
			
		||||
		"e2e":  e2eValue,
 | 
			
		||||
	}
 | 
			
		||||
@@ -576,7 +579,7 @@ func listRSDeleteCollection(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("Verify that the required pods have come up")
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
 | 
			
		||||
	framework.ExpectNoError(err, "Failed to create pods: %s", err)
 | 
			
		||||
	r, err := rsClient.Get(ctx, rsName, metav1.GetOptions{})
 | 
			
		||||
	framework.ExpectNoError(err, "failed to get ReplicaSets")
 | 
			
		||||
@@ -603,8 +606,9 @@ func testRSStatus(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	rsClient := c.AppsV1().ReplicaSets(ns)
 | 
			
		||||
 | 
			
		||||
	// Define ReplicaSet Labels
 | 
			
		||||
	podName := "sample-pod"
 | 
			
		||||
	rsPodLabels := map[string]string{
 | 
			
		||||
		"name": "sample-pod",
 | 
			
		||||
		"name": podName,
 | 
			
		||||
		"pod":  WebserverImageName,
 | 
			
		||||
	}
 | 
			
		||||
	labelSelector := labels.SelectorFromSet(rsPodLabels).String()
 | 
			
		||||
@@ -627,7 +631,7 @@ func testRSStatus(ctx context.Context, f *framework.Framework) {
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("Verify that the required pods have come up.")
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas)
 | 
			
		||||
	err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, labels.SelectorFromSet(map[string]string{"name": podName}), false, replicas)
 | 
			
		||||
	framework.ExpectNoError(err, "Failed to create pods: %s", err)
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("Getting /status")
 | 
			
		||||
 
 | 
			
		||||
@@ -29,6 +29,7 @@ import (
 | 
			
		||||
	"github.com/onsi/ginkgo/v2"
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
	clientset "k8s.io/client-go/kubernetes"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/common"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/feature"
 | 
			
		||||
@@ -75,14 +76,14 @@ func removeWorkerNodes(zone string) error {
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func verifyRCs(ctx context.Context, c clientset.Interface, ns string, names []string) {
 | 
			
		||||
	for _, name := range names {
 | 
			
		||||
		framework.ExpectNoError(e2epod.VerifyPods(ctx, c, ns, name, true, 1))
 | 
			
		||||
func verifyRCs(ctx context.Context, c clientset.Interface, ns string, labelSets []map[string]string) {
 | 
			
		||||
	for _, rcLabels := range labelSets {
 | 
			
		||||
		framework.ExpectNoError(e2epod.VerifyPods(ctx, c, ns, labels.FormatLabels(rcLabels), labels.SelectorFromSet(rcLabels), true, 1))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createNewRC(c clientset.Interface, ns string, name string) {
 | 
			
		||||
	_, err := common.NewRCByName(c, ns, name, 1, nil, nil)
 | 
			
		||||
func createNewRC(c clientset.Interface, ns string, name string, rcLabels map[string]string) {
 | 
			
		||||
	_, err := common.NewRCByName(c, ns, name, 1, nil, nil, rcLabels)
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -168,7 +169,7 @@ var _ = SIGDescribe("HA-master", feature.HAMaster, func() {
 | 
			
		||||
	var ns string
 | 
			
		||||
	var additionalReplicaZones []string
 | 
			
		||||
	var additionalNodesZones []string
 | 
			
		||||
	var existingRCs []string
 | 
			
		||||
	var existingRCLabelSets []map[string]string
 | 
			
		||||
 | 
			
		||||
	ginkgo.BeforeEach(func(ctx context.Context) {
 | 
			
		||||
		e2eskipper.SkipUnlessProviderIs("gce")
 | 
			
		||||
@@ -176,7 +177,7 @@ var _ = SIGDescribe("HA-master", feature.HAMaster, func() {
 | 
			
		||||
		ns = f.Namespace.Name
 | 
			
		||||
		framework.ExpectNoError(waitForMasters(ctx, framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
 | 
			
		||||
		additionalReplicaZones = make([]string, 0)
 | 
			
		||||
		existingRCs = make([]string, 0)
 | 
			
		||||
		existingRCLabelSets = make([]map[string]string, 0)
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	ginkgo.AfterEach(func(ctx context.Context) {
 | 
			
		||||
@@ -222,10 +223,13 @@ var _ = SIGDescribe("HA-master", feature.HAMaster, func() {
 | 
			
		||||
		framework.ExpectNoError(e2enode.AllNodesReady(ctx, c, 5*time.Minute))
 | 
			
		||||
 | 
			
		||||
		// Verify that API server works correctly with HA master.
 | 
			
		||||
		rcName := "ha-master-" + strconv.Itoa(len(existingRCs))
 | 
			
		||||
		createNewRC(c, ns, rcName)
 | 
			
		||||
		existingRCs = append(existingRCs, rcName)
 | 
			
		||||
		verifyRCs(ctx, c, ns, existingRCs)
 | 
			
		||||
		rcName := "ha-master-" + strconv.Itoa(len(existingRCLabelSets))
 | 
			
		||||
		rcLabels := map[string]string{"name": rcName}
 | 
			
		||||
 | 
			
		||||
		createNewRC(c, ns, rcName, rcLabels)
 | 
			
		||||
		existingRCLabelSets = append(existingRCLabelSets, rcLabels)
 | 
			
		||||
 | 
			
		||||
		verifyRCs(ctx, c, ns, existingRCLabelSets)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	f.It("survive addition/removal replicas same zone", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) {
 | 
			
		||||
 
 | 
			
		||||
@@ -23,6 +23,7 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
	clientset "k8s.io/client-go/kubernetes"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/common"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
@@ -97,11 +98,14 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() {
 | 
			
		||||
			// Create a replication controller for a service that serves its hostname.
 | 
			
		||||
			// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
 | 
			
		||||
			name := "my-hostname-delete-node"
 | 
			
		||||
			rcLabels := map[string]string{"name": name}
 | 
			
		||||
			numNodes, err := e2enode.TotalRegistered(ctx, c)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			originalNodeCount = int32(numNodes)
 | 
			
		||||
			common.NewRCByName(c, ns, name, originalNodeCount, nil, nil)
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)
 | 
			
		||||
			_, err = common.NewRCByName(c, ns, name, originalNodeCount, nil, nil, rcLabels)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, labels.SelectorFromSet(rcLabels), true, originalNodeCount)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
 | 
			
		||||
@@ -118,7 +122,7 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() {
 | 
			
		||||
			time.Sleep(f.Timeouts.PodStartShort)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("verifying whether the pods from the removed node are recreated")
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, labels.SelectorFromSet(rcLabels), true, originalNodeCount)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
@@ -127,12 +131,17 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() {
 | 
			
		||||
			// Create a replication controller for a service that serves its hostname.
 | 
			
		||||
			// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
 | 
			
		||||
			name := "my-hostname-add-node"
 | 
			
		||||
			common.NewSVCByName(c, ns, name)
 | 
			
		||||
			rcLabels := map[string]string{"name": name}
 | 
			
		||||
			err := common.NewSVCByName(c, ns, name, rcLabels)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			numNodes, err := e2enode.TotalRegistered(ctx, c)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			originalNodeCount = int32(numNodes)
 | 
			
		||||
			common.NewRCByName(c, ns, name, originalNodeCount, nil, nil)
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)
 | 
			
		||||
			_, err = common.NewRCByName(c, ns, name, originalNodeCount, nil, nil, rcLabels)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, labels.SelectorFromSet(rcLabels), true, originalNodeCount)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
 | 
			
		||||
@@ -147,7 +156,7 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() {
 | 
			
		||||
			ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
 | 
			
		||||
			err = resizeRC(ctx, c, ns, name, originalNodeCount+1)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount+1)
 | 
			
		||||
			err = e2epod.VerifyPods(ctx, c, ns, name, labels.SelectorFromSet(rcLabels), true, originalNodeCount+1)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
		})
 | 
			
		||||
	})
 | 
			
		||||
 
 | 
			
		||||
@@ -120,16 +120,14 @@ func SubstituteImageName(content string) string {
 | 
			
		||||
	return contentWithImageName.String()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func svcByName(name string, port int) *v1.Service {
 | 
			
		||||
func svcByName(name string, port int, selector map[string]string) *v1.Service {
 | 
			
		||||
	return &v1.Service{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.ServiceSpec{
 | 
			
		||||
			Type: v1.ServiceTypeNodePort,
 | 
			
		||||
			Selector: map[string]string{
 | 
			
		||||
				"name": name,
 | 
			
		||||
			},
 | 
			
		||||
			Type:     v1.ServiceTypeNodePort,
 | 
			
		||||
			Selector: selector,
 | 
			
		||||
			Ports: []v1.ServicePort{{
 | 
			
		||||
				Port:       int32(port),
 | 
			
		||||
				TargetPort: intstr.FromInt32(int32(port)),
 | 
			
		||||
@@ -138,15 +136,15 @@ func svcByName(name string, port int) *v1.Service {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewSVCByName creates a service by name.
 | 
			
		||||
func NewSVCByName(c clientset.Interface, ns, name string) error {
 | 
			
		||||
// NewSVCByName creates a service with the specified selector.
 | 
			
		||||
func NewSVCByName(c clientset.Interface, ns, name string, selector map[string]string) error {
 | 
			
		||||
	const testPort = 9376
 | 
			
		||||
	_, err := c.CoreV1().Services(ns).Create(context.TODO(), svcByName(name, testPort), metav1.CreateOptions{})
 | 
			
		||||
	_, err := c.CoreV1().Services(ns).Create(context.TODO(), svcByName(name, testPort, selector), metav1.CreateOptions{})
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewRCByName creates a replication controller with a selector by name of name.
 | 
			
		||||
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64, containerArgs []string) (*v1.ReplicationController, error) {
 | 
			
		||||
// NewRCByName creates a replication controller with a selector by a specified set of labels.
 | 
			
		||||
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64, containerArgs []string, rcLabels map[string]string) (*v1.ReplicationController, error) {
 | 
			
		||||
	ginkgo.By(fmt.Sprintf("creating replication controller %s", name))
 | 
			
		||||
 | 
			
		||||
	if containerArgs == nil {
 | 
			
		||||
@@ -154,7 +152,7 @@ func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rcByNamePort(
 | 
			
		||||
		name, replicas, imageutils.GetE2EImage(imageutils.Agnhost), containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod), metav1.CreateOptions{})
 | 
			
		||||
		name, replicas, imageutils.GetE2EImage(imageutils.Agnhost), containerArgs, 9376, v1.ProtocolTCP, rcLabels, gracePeriod), metav1.CreateOptions{})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RestartNodes restarts specific nodes.
 | 
			
		||||
 
 | 
			
		||||
@@ -59,12 +59,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
 | 
			
		||||
	gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PodsCreated returns a pod list matched by the given name.
 | 
			
		||||
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
 | 
			
		||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
			
		||||
	return PodsCreatedByLabel(ctx, c, ns, name, replicas, label)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PodsCreatedByLabel returns a created pod list matched by the given label.
 | 
			
		||||
func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
 | 
			
		||||
	timeout := 2 * time.Minute
 | 
			
		||||
@@ -95,26 +89,32 @@ func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name str
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// VerifyPods checks if the specified pod is responding.
 | 
			
		||||
func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
 | 
			
		||||
	return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, true)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// VerifyPodsRunning checks if the specified pod is running.
 | 
			
		||||
func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
 | 
			
		||||
	return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, false)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
 | 
			
		||||
	pods, err := PodsCreated(ctx, c, ns, name, replicas)
 | 
			
		||||
func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, selector labels.Selector, wantName bool, replicas int32) error {
 | 
			
		||||
	pods, err := PodsCreatedByLabel(ctx, c, ns, name, replicas, selector)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return podsRunningMaybeResponding(ctx, c, ns, name, selector, pods, wantName, true)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// VerifyPodsRunning checks if the specified pod is running.
 | 
			
		||||
func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, selector labels.Selector, wantName bool, replicas int32) error {
 | 
			
		||||
	pods, err := PodsCreatedByLabel(ctx, c, ns, name, replicas, selector)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return podsRunningMaybeResponding(ctx, c, ns, name, selector, pods, wantName, false)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func podsRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns string, name string, selector labels.Selector, pods *v1.PodList, wantName bool, checkResponding bool) error {
 | 
			
		||||
	e := podsRunning(ctx, c, pods)
 | 
			
		||||
	if len(e) > 0 {
 | 
			
		||||
		return fmt.Errorf("failed to wait for pods running: %v", e)
 | 
			
		||||
	}
 | 
			
		||||
	if checkResponding {
 | 
			
		||||
		return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
 | 
			
		||||
		return WaitForPodsResponding(ctx, c, ns, name, selector, wantName, podRespondingTimeout, pods)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -604,13 +604,12 @@ func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, p
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForPodsResponding waits for the pods to response.
 | 
			
		||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, wantName bool, timeout time.Duration, pods *v1.PodList) error {
 | 
			
		||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, selector labels.Selector, wantName bool, timeout time.Duration, pods *v1.PodList) error {
 | 
			
		||||
	if timeout == 0 {
 | 
			
		||||
		timeout = podRespondingTimeout
 | 
			
		||||
	}
 | 
			
		||||
	ginkgo.By("trying to dial each unique pod")
 | 
			
		||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": controllerName}))
 | 
			
		||||
	options := metav1.ListOptions{LabelSelector: label.String()}
 | 
			
		||||
	options := metav1.ListOptions{LabelSelector: selector.String()}
 | 
			
		||||
 | 
			
		||||
	type response struct {
 | 
			
		||||
		podName  string
 | 
			
		||||
 
 | 
			
		||||
@@ -40,8 +40,6 @@ func ByNameContainer(name string, replicas int32, labels map[string]string, c v1
 | 
			
		||||
 | 
			
		||||
	zeroGracePeriod := int64(0)
 | 
			
		||||
 | 
			
		||||
	// Add "name": name to the labels, overwriting if it exists.
 | 
			
		||||
	labels["name"] = name
 | 
			
		||||
	if gracePeriod == nil {
 | 
			
		||||
		gracePeriod = &zeroGracePeriod
 | 
			
		||||
	}
 | 
			
		||||
@@ -55,9 +53,7 @@ func ByNameContainer(name string, replicas int32, labels map[string]string, c v1
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.ReplicationControllerSpec{
 | 
			
		||||
			Replicas: pointer.Int32(replicas),
 | 
			
		||||
			Selector: map[string]string{
 | 
			
		||||
				"name": name,
 | 
			
		||||
			},
 | 
			
		||||
			Selector: labels,
 | 
			
		||||
			Template: &v1.PodTemplateSpec{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Labels: labels,
 | 
			
		||||
 
 | 
			
		||||
@@ -19,13 +19,13 @@ package network
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
 | 
			
		||||
	appsv1 "k8s.io/api/apps/v1"
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	apierrors "k8s.io/apimachinery/pkg/api/errors"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/intstr"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
			
		||||
	clientset "k8s.io/client-go/kubernetes"
 | 
			
		||||
	"k8s.io/client-go/util/retry"
 | 
			
		||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
			
		||||
 | 
			
		||||
	"github.com/onsi/ginkgo/v2"
 | 
			
		||||
@@ -40,10 +40,10 @@ type TestFixture struct {
 | 
			
		||||
	TestID string
 | 
			
		||||
	Labels map[string]string
 | 
			
		||||
 | 
			
		||||
	rcs      map[string]bool
 | 
			
		||||
	services map[string]bool
 | 
			
		||||
	Name     string
 | 
			
		||||
	Image    string
 | 
			
		||||
	deployments map[string]bool
 | 
			
		||||
	services    map[string]bool
 | 
			
		||||
	Name        string
 | 
			
		||||
	Image       string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewServerTest creates a new TestFixture for the tests.
 | 
			
		||||
@@ -57,7 +57,7 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str
 | 
			
		||||
		"testid": t.TestID,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t.rcs = make(map[string]bool)
 | 
			
		||||
	t.deployments = make(map[string]bool)
 | 
			
		||||
	t.services = make(map[string]bool)
 | 
			
		||||
 | 
			
		||||
	t.Name = "webserver"
 | 
			
		||||
@@ -84,13 +84,12 @@ func (t *TestFixture) BuildServiceSpec() *v1.Service {
 | 
			
		||||
	return service
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateRC creates a replication controller and records it for cleanup.
 | 
			
		||||
func (t *TestFixture) CreateRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
 | 
			
		||||
	rc, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{})
 | 
			
		||||
func (t *TestFixture) CreateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) {
 | 
			
		||||
	deployment, err := t.Client.AppsV1().Deployments(t.Namespace).Create(context.TODO(), deployment, metav1.CreateOptions{})
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		t.rcs[rc.Name] = true
 | 
			
		||||
		t.deployments[deployment.Name] = true
 | 
			
		||||
	}
 | 
			
		||||
	return rc, err
 | 
			
		||||
	return deployment, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateService creates a service, and record it for cleanup
 | 
			
		||||
@@ -114,33 +113,10 @@ func (t *TestFixture) DeleteService(serviceName string) error {
 | 
			
		||||
// Cleanup cleans all ReplicationControllers and Services which this object holds.
 | 
			
		||||
func (t *TestFixture) Cleanup() []error {
 | 
			
		||||
	var errs []error
 | 
			
		||||
	for rcName := range t.rcs {
 | 
			
		||||
		ginkgo.By("stopping RC " + rcName + " in namespace " + t.Namespace)
 | 
			
		||||
		err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
 | 
			
		||||
			// First, resize the RC to 0.
 | 
			
		||||
			old, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Get(context.TODO(), rcName, metav1.GetOptions{})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				if apierrors.IsNotFound(err) {
 | 
			
		||||
					return nil
 | 
			
		||||
				}
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
			x := int32(0)
 | 
			
		||||
			old.Spec.Replicas = &x
 | 
			
		||||
			if _, err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Update(context.TODO(), old, metav1.UpdateOptions{}); err != nil {
 | 
			
		||||
				if apierrors.IsNotFound(err) {
 | 
			
		||||
					return nil
 | 
			
		||||
				}
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
			return nil
 | 
			
		||||
		})
 | 
			
		||||
	for deploymentName := range t.deployments {
 | 
			
		||||
		ginkgo.By("deleting deployment " + deploymentName + " in namespace " + t.Namespace)
 | 
			
		||||
		err := t.Client.AppsV1().Deployments(t.Namespace).Delete(context.TODO(), deploymentName, metav1.DeleteOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			errs = append(errs, err)
 | 
			
		||||
		}
 | 
			
		||||
		// TODO(mikedanese): Wait.
 | 
			
		||||
		// Then, delete the RC altogether.
 | 
			
		||||
		if err := t.Client.CoreV1().ReplicationControllers(t.Namespace).Delete(context.TODO(), rcName, metav1.DeleteOptions{}); err != nil {
 | 
			
		||||
			if !apierrors.IsNotFound(err) {
 | 
			
		||||
				errs = append(errs, err)
 | 
			
		||||
			}
 | 
			
		||||
 
 | 
			
		||||
@@ -1785,11 +1785,18 @@ var _ = common.SIGDescribe("Services", func() {
 | 
			
		||||
				PublishNotReadyAddresses: true,
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		rcSpec := e2erc.ByNameContainer(t.Name, 1, t.Labels, v1.Container{
 | 
			
		||||
 | 
			
		||||
		deploymentSpec := e2edeployment.NewDeployment(t.Name,
 | 
			
		||||
			1,
 | 
			
		||||
			t.Labels,
 | 
			
		||||
			t.Name,
 | 
			
		||||
			t.Image,
 | 
			
		||||
			appsv1.RecreateDeploymentStrategyType)
 | 
			
		||||
		deploymentSpec.Spec.Template.Spec.Containers[0] = v1.Container{
 | 
			
		||||
			Args:  []string{"netexec", fmt.Sprintf("--http-port=%d", port)},
 | 
			
		||||
			Name:  t.Name,
 | 
			
		||||
			Image: t.Image,
 | 
			
		||||
			Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: v1.ProtocolTCP}},
 | 
			
		||||
			Ports: []v1.ContainerPort{{ContainerPort: port, Protocol: v1.ProtocolTCP}},
 | 
			
		||||
			ReadinessProbe: &v1.Probe{
 | 
			
		||||
				ProbeHandler: v1.ProbeHandler{
 | 
			
		||||
					Exec: &v1.ExecAction{
 | 
			
		||||
@@ -1804,19 +1811,19 @@ var _ = common.SIGDescribe("Services", func() {
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		}, nil)
 | 
			
		||||
		rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds
 | 
			
		||||
		}
 | 
			
		||||
		deploymentSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds
 | 
			
		||||
 | 
			
		||||
		ginkgo.By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector))
 | 
			
		||||
		_, err := t.CreateRC(rcSpec)
 | 
			
		||||
		ginkgo.By(fmt.Sprintf("creating Deployment %v with selectors %v", deploymentSpec.Name, deploymentSpec.Spec.Selector))
 | 
			
		||||
		_, err := t.CreateDeployment(deploymentSpec)
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
		ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
 | 
			
		||||
		_, err = t.CreateService(service)
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
		ginkgo.By("Verifying pods for RC " + t.Name)
 | 
			
		||||
		framework.ExpectNoError(e2epod.VerifyPods(ctx, t.Client, t.Namespace, t.Name, false, 1))
 | 
			
		||||
		ginkgo.By("Verifying pods for Deployment " + t.Name)
 | 
			
		||||
		framework.ExpectNoError(e2epod.VerifyPods(ctx, t.Client, t.Namespace, t.Name, labels.SelectorFromSet(t.Labels), false, 1))
 | 
			
		||||
 | 
			
		||||
		svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
 | 
			
		||||
		ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName)
 | 
			
		||||
@@ -1837,8 +1844,11 @@ var _ = common.SIGDescribe("Services", func() {
 | 
			
		||||
			framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ginkgo.By("Scaling down replication controller to zero")
 | 
			
		||||
		e2erc.ScaleRC(ctx, f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
 | 
			
		||||
		ginkgo.By("Scaling down deployment to zero")
 | 
			
		||||
		_, err = e2edeployment.UpdateDeploymentWithRetries(f.ClientSet, t.Namespace, t.Name, func(deployment *appsv1.Deployment) {
 | 
			
		||||
			deployment.Spec.Replicas = ptr.To[int32](0)
 | 
			
		||||
		})
 | 
			
		||||
		framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
		ginkgo.By("Update service to not tolerate unready services")
 | 
			
		||||
		_, err = e2eservice.UpdateService(ctx, f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
 | 
			
		||||
@@ -1881,7 +1891,7 @@ var _ = common.SIGDescribe("Services", func() {
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ginkgo.By("Remove pods immediately")
 | 
			
		||||
		label := labels.SelectorFromSet(labels.Set(t.Labels))
 | 
			
		||||
		label := labels.SelectorFromSet(t.Labels)
 | 
			
		||||
		options := metav1.ListOptions{LabelSelector: label.String()}
 | 
			
		||||
		podClient := t.Client.CoreV1().Pods(f.Namespace.Name)
 | 
			
		||||
		pods, err := podClient.List(ctx, options)
 | 
			
		||||
 
 | 
			
		||||
@@ -185,6 +185,8 @@ func checkZoneSpreading(ctx context.Context, c clientset.Interface, pods *v1.Pod
 | 
			
		||||
// controller get spread evenly across available zones
 | 
			
		||||
func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount int32, zoneNames sets.Set[string], image string, args []string) {
 | 
			
		||||
	name := "ubelite-spread-rc-" + string(uuid.NewUUID())
 | 
			
		||||
	rcLabels := map[string]string{"name": name}
 | 
			
		||||
 | 
			
		||||
	ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
 | 
			
		||||
	controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, &v1.ReplicationController{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -193,12 +195,10 @@ func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount in
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.ReplicationControllerSpec{
 | 
			
		||||
			Replicas: &replicaCount,
 | 
			
		||||
			Selector: map[string]string{
 | 
			
		||||
				"name": name,
 | 
			
		||||
			},
 | 
			
		||||
			Selector: rcLabels,
 | 
			
		||||
			Template: &v1.PodTemplateSpec{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Labels: map[string]string{"name": name},
 | 
			
		||||
					Labels: rcLabels,
 | 
			
		||||
				},
 | 
			
		||||
				Spec: v1.PodSpec{
 | 
			
		||||
					Containers: []v1.Container{
 | 
			
		||||
@@ -222,8 +222,8 @@ func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount in
 | 
			
		||||
		}
 | 
			
		||||
	}()
 | 
			
		||||
	// List the pods, making sure we observe all the replicas.
 | 
			
		||||
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
			
		||||
	_, err = e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicaCount)
 | 
			
		||||
	selector := labels.SelectorFromSet(rcLabels)
 | 
			
		||||
	_, err = e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, name, replicaCount, selector)
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
	// Wait for all of them to be scheduled
 | 
			
		||||
 
 | 
			
		||||
@@ -23,6 +23,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/intstr"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
@@ -333,6 +334,9 @@ func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volume
 | 
			
		||||
	const nodeHostnameLabelKey = "kubernetes.io/hostname"
 | 
			
		||||
 | 
			
		||||
	rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
 | 
			
		||||
	rcLabels := map[string]string{
 | 
			
		||||
		"name": rcName,
 | 
			
		||||
	}
 | 
			
		||||
	targetNode, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
 | 
			
		||||
	framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
@@ -361,12 +365,10 @@ func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volume
 | 
			
		||||
		},
 | 
			
		||||
		Spec: v1.ReplicationControllerSpec{
 | 
			
		||||
			Replicas: &podCount,
 | 
			
		||||
			Selector: map[string]string{
 | 
			
		||||
				"name": rcName,
 | 
			
		||||
			},
 | 
			
		||||
			Selector: rcLabels,
 | 
			
		||||
			Template: &v1.PodTemplateSpec{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Labels: map[string]string{"name": rcName},
 | 
			
		||||
					Labels: rcLabels,
 | 
			
		||||
				},
 | 
			
		||||
				Spec: v1.PodSpec{
 | 
			
		||||
					Containers: []v1.Container{
 | 
			
		||||
@@ -388,7 +390,7 @@ func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volume
 | 
			
		||||
 | 
			
		||||
	ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName)
 | 
			
		||||
 | 
			
		||||
	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rcName, podCount)
 | 
			
		||||
	pods, err := e2epod.PodsCreatedByLabel(ctx, f.ClientSet, f.Namespace.Name, rcName, podCount, labels.SelectorFromSet(rcLabels))
 | 
			
		||||
	framework.ExpectNoError(err, "error creating pods")
 | 
			
		||||
 | 
			
		||||
	ginkgo.By("Ensuring each pod is running")
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user