mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 03:38:15 +00:00 
			
		
		
		
	Merge pull request #42996 from kargakis/update-rs-test
Automatic merge from submit-queue test: update replica set e2e test Should reduce the flakiness for https://github.com/kubernetes/kubernetes/issues/30981, https://github.com/kubernetes/kubernetes/issues/36429, and https://github.com/kubernetes/kubernetes/issues/32023 by reducing the replicas from 2 to 1 and also make the test failure clearer. @kubernetes/sig-apps-test-failures
This commit is contained in:
		@@ -73,7 +73,7 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	checkExistingRCRecovers(f)
 | 
						checkExistingRCRecovers(f)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
 | 
						testReplicationControllerServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// For this duration, etcd will be failed by executing a failCommand on the master.
 | 
					// For this duration, etcd will be failed by executing a failCommand on the master.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -39,18 +39,18 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
 | 
				
			|||||||
	f := framework.NewDefaultFramework("replication-controller")
 | 
						f := framework.NewDefaultFramework("replication-controller")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should serve a basic image on each replica with a public image [Conformance]", func() {
 | 
						It("should serve a basic image on each replica with a public image [Conformance]", func() {
 | 
				
			||||||
		ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
 | 
							testReplicationControllerServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should serve a basic image on each replica with a private image", func() {
 | 
						It("should serve a basic image on each replica with a private image", func() {
 | 
				
			||||||
		// requires private images
 | 
							// requires private images
 | 
				
			||||||
		framework.SkipUnlessProviderIs("gce", "gke")
 | 
							framework.SkipUnlessProviderIs("gce", "gke")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
 | 
							testReplicationControllerServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should surface a failure condition on a common issue like exceeded quota", func() {
 | 
						It("should surface a failure condition on a common issue like exceeded quota", func() {
 | 
				
			||||||
		rcConditionCheck(f)
 | 
							testReplicationControllerConditionCheck(f)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should adopt matching pods on creation", func() {
 | 
						It("should adopt matching pods on creation", func() {
 | 
				
			||||||
@@ -91,70 +91,57 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
 | 
				
			|||||||
// A basic test to check the deployment of an image using
 | 
					// A basic test to check the deployment of an image using
 | 
				
			||||||
// a replication controller. The image serves its hostname
 | 
					// a replication controller. The image serves its hostname
 | 
				
			||||||
// which is checked for each replica.
 | 
					// which is checked for each replica.
 | 
				
			||||||
func ServeImageOrFail(f *framework.Framework, test string, image string) {
 | 
					func testReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
 | 
				
			||||||
	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
 | 
						name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
 | 
				
			||||||
	replicas := int32(2)
 | 
						replicas := int32(1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create a replication controller for a service
 | 
						// Create a replication controller for a service
 | 
				
			||||||
	// that serves its hostname.
 | 
						// that serves its hostname.
 | 
				
			||||||
	// The source for the Docker containter kubernetes/serve_hostname is
 | 
						// The source for the Docker containter kubernetes/serve_hostname is
 | 
				
			||||||
	// in contrib/for-demos/serve_hostname
 | 
						// in contrib/for-demos/serve_hostname
 | 
				
			||||||
	By(fmt.Sprintf("Creating replication controller %s", name))
 | 
						By(fmt.Sprintf("Creating replication controller %s", name))
 | 
				
			||||||
	controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
 | 
						newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
 | 
				
			||||||
		ObjectMeta: metav1.ObjectMeta{
 | 
						newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
 | 
				
			||||||
			Name: name,
 | 
						_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(newRC)
 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		Spec: v1.ReplicationControllerSpec{
 | 
					 | 
				
			||||||
			Replicas: func(i int32) *int32 { return &i }(replicas),
 | 
					 | 
				
			||||||
			Selector: map[string]string{
 | 
					 | 
				
			||||||
				"name": name,
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			Template: &v1.PodTemplateSpec{
 | 
					 | 
				
			||||||
				ObjectMeta: metav1.ObjectMeta{
 | 
					 | 
				
			||||||
					Labels: map[string]string{"name": name},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Spec: v1.PodSpec{
 | 
					 | 
				
			||||||
					Containers: []v1.Container{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  name,
 | 
					 | 
				
			||||||
							Image: image,
 | 
					 | 
				
			||||||
							Ports: []v1.ContainerPort{{ContainerPort: 9376}},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// Cleanup the replication controller when we are done.
 | 
					 | 
				
			||||||
	defer func() {
 | 
					 | 
				
			||||||
		// Resize the replication controller to zero to get rid of pods.
 | 
					 | 
				
			||||||
		if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
 | 
					 | 
				
			||||||
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// List the pods, making sure we observe all the replicas.
 | 
					 | 
				
			||||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Check that pods for the new RC were created.
 | 
				
			||||||
 | 
						// TODO: Maybe switch PodsCreated to just check owner references.
 | 
				
			||||||
	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
 | 
						pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
 | 
				
			||||||
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	By("Ensuring each pod is running")
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for the pods to enter the running state. Waiting loops until the pods
 | 
						// Wait for the pods to enter the running state. Waiting loops until the pods
 | 
				
			||||||
	// are running so non-running pods cause a timeout for this test.
 | 
						// are running so non-running pods cause a timeout for this test.
 | 
				
			||||||
 | 
						framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
 | 
				
			||||||
 | 
						running := int32(0)
 | 
				
			||||||
	for _, pod := range pods.Items {
 | 
						for _, pod := range pods.Items {
 | 
				
			||||||
		if pod.DeletionTimestamp != nil {
 | 
							if pod.DeletionTimestamp != nil {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		err = f.WaitForPodRunning(pod.Name)
 | 
							err = f.WaitForPodRunning(pod.Name)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
 | 
				
			||||||
 | 
								if getErr == nil {
 | 
				
			||||||
 | 
									err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
							framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
 | 
				
			||||||
 | 
							running++
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Sanity check
 | 
				
			||||||
 | 
						if running != replicas {
 | 
				
			||||||
 | 
							Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that something is listening.
 | 
						// Verify that something is listening.
 | 
				
			||||||
	By("Trying to dial each unique pod")
 | 
						framework.Logf("Trying to dial the pod")
 | 
				
			||||||
	retryTimeout := 2 * time.Minute
 | 
						retryTimeout := 2 * time.Minute
 | 
				
			||||||
	retryInterval := 5 * time.Second
 | 
						retryInterval := 5 * time.Second
 | 
				
			||||||
 | 
						label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
				
			||||||
	err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
 | 
						err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
 | 
							framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
 | 
				
			||||||
@@ -165,12 +152,12 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
 | 
				
			|||||||
// 2. Create a replication controller that wants to run 3 pods.
 | 
					// 2. Create a replication controller that wants to run 3 pods.
 | 
				
			||||||
// 3. Check replication controller conditions for a ReplicaFailure condition.
 | 
					// 3. Check replication controller conditions for a ReplicaFailure condition.
 | 
				
			||||||
// 4. Relax quota or scale down the controller and observe the condition is gone.
 | 
					// 4. Relax quota or scale down the controller and observe the condition is gone.
 | 
				
			||||||
func rcConditionCheck(f *framework.Framework) {
 | 
					func testReplicationControllerConditionCheck(f *framework.Framework) {
 | 
				
			||||||
	c := f.ClientSet
 | 
						c := f.ClientSet
 | 
				
			||||||
	namespace := f.Namespace.Name
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	name := "condition-test"
 | 
						name := "condition-test"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
 | 
						framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
 | 
				
			||||||
	quota := newPodQuota(name, "2")
 | 
						quota := newPodQuota(name, "2")
 | 
				
			||||||
	_, err := c.Core().ResourceQuotas(namespace).Create(quota)
 | 
						_, err := c.Core().ResourceQuotas(namespace).Create(quota)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -43,7 +43,7 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
 | 
				
			|||||||
			Name: rsName,
 | 
								Name: rsName,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		Spec: extensions.ReplicaSetSpec{
 | 
							Spec: extensions.ReplicaSetSpec{
 | 
				
			||||||
			Replicas: func(i int32) *int32 { return &i }(replicas),
 | 
								Replicas: &replicas,
 | 
				
			||||||
			Template: v1.PodTemplateSpec{
 | 
								Template: v1.PodTemplateSpec{
 | 
				
			||||||
				ObjectMeta: metav1.ObjectMeta{
 | 
									ObjectMeta: metav1.ObjectMeta{
 | 
				
			||||||
					Labels: rsPodLabels,
 | 
										Labels: rsPodLabels,
 | 
				
			||||||
@@ -79,18 +79,18 @@ var _ = framework.KubeDescribe("ReplicaSet", func() {
 | 
				
			|||||||
	f := framework.NewDefaultFramework("replicaset")
 | 
						f := framework.NewDefaultFramework("replicaset")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should serve a basic image on each replica with a public image [Conformance]", func() {
 | 
						It("should serve a basic image on each replica with a public image [Conformance]", func() {
 | 
				
			||||||
		ReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
 | 
							testReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should serve a basic image on each replica with a private image", func() {
 | 
						It("should serve a basic image on each replica with a private image", func() {
 | 
				
			||||||
		// requires private images
 | 
							// requires private images
 | 
				
			||||||
		framework.SkipUnlessProviderIs("gce", "gke")
 | 
							framework.SkipUnlessProviderIs("gce", "gke")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ReplicaSetServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
 | 
							testReplicaSetServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should surface a failure condition on a common issue like exceeded quota", func() {
 | 
						It("should surface a failure condition on a common issue like exceeded quota", func() {
 | 
				
			||||||
		rsConditionCheck(f)
 | 
							testReplicaSetConditionCheck(f)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	It("should adopt matching pods on creation", func() {
 | 
						It("should adopt matching pods on creation", func() {
 | 
				
			||||||
@@ -104,70 +104,56 @@ var _ = framework.KubeDescribe("ReplicaSet", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// A basic test to check the deployment of an image using a ReplicaSet. The
 | 
					// A basic test to check the deployment of an image using a ReplicaSet. The
 | 
				
			||||||
// image serves its hostname which is checked for each replica.
 | 
					// image serves its hostname which is checked for each replica.
 | 
				
			||||||
func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
 | 
					func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
 | 
				
			||||||
	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
 | 
						name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
 | 
				
			||||||
	replicas := int32(2)
 | 
						replicas := int32(1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create a ReplicaSet for a service that serves its hostname.
 | 
						// Create a ReplicaSet for a service that serves its hostname.
 | 
				
			||||||
	// The source for the Docker containter kubernetes/serve_hostname is
 | 
						// The source for the Docker containter kubernetes/serve_hostname is
 | 
				
			||||||
	// in contrib/for-demos/serve_hostname
 | 
						// in contrib/for-demos/serve_hostname
 | 
				
			||||||
	By(fmt.Sprintf("Creating ReplicaSet %s", name))
 | 
						framework.Logf("Creating ReplicaSet %s", name)
 | 
				
			||||||
	rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
 | 
						newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
 | 
				
			||||||
		ObjectMeta: metav1.ObjectMeta{
 | 
						newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
 | 
				
			||||||
			Name: name,
 | 
						_, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(newRS)
 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		Spec: extensions.ReplicaSetSpec{
 | 
					 | 
				
			||||||
			Replicas: func(i int32) *int32 { return &i }(replicas),
 | 
					 | 
				
			||||||
			Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
 | 
					 | 
				
			||||||
				"name": name,
 | 
					 | 
				
			||||||
			}},
 | 
					 | 
				
			||||||
			Template: v1.PodTemplateSpec{
 | 
					 | 
				
			||||||
				ObjectMeta: metav1.ObjectMeta{
 | 
					 | 
				
			||||||
					Labels: map[string]string{"name": name},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				Spec: v1.PodSpec{
 | 
					 | 
				
			||||||
					Containers: []v1.Container{
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							Name:  name,
 | 
					 | 
				
			||||||
							Image: image,
 | 
					 | 
				
			||||||
							Ports: []v1.ContainerPort{{ContainerPort: 9376}},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// Cleanup the ReplicaSet when we are done.
 | 
					 | 
				
			||||||
	defer func() {
 | 
					 | 
				
			||||||
		// Resize the ReplicaSet to zero to get rid of pods.
 | 
					 | 
				
			||||||
		if err := framework.DeleteReplicaSet(f.ClientSet, f.InternalClientset, f.Namespace.Name, rs.Name); err != nil {
 | 
					 | 
				
			||||||
			framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// List the pods, making sure we observe all the replicas.
 | 
					 | 
				
			||||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Check that pods for the new RS were created.
 | 
				
			||||||
 | 
						// TODO: Maybe switch PodsCreated to just check owner references.
 | 
				
			||||||
	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
 | 
						pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	By("Ensuring each pod is running")
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Wait for the pods to enter the running state. Waiting loops until the pods
 | 
						// Wait for the pods to enter the running state. Waiting loops until the pods
 | 
				
			||||||
	// are running so non-running pods cause a timeout for this test.
 | 
						// are running so non-running pods cause a timeout for this test.
 | 
				
			||||||
 | 
						framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
 | 
				
			||||||
 | 
						running := int32(0)
 | 
				
			||||||
	for _, pod := range pods.Items {
 | 
						for _, pod := range pods.Items {
 | 
				
			||||||
		if pod.DeletionTimestamp != nil {
 | 
							if pod.DeletionTimestamp != nil {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		err = f.WaitForPodRunning(pod.Name)
 | 
							err = f.WaitForPodRunning(pod.Name)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
 | 
				
			||||||
 | 
								if getErr == nil {
 | 
				
			||||||
 | 
									err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
							framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
 | 
				
			||||||
 | 
							running++
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Sanity check
 | 
				
			||||||
 | 
						if running != replicas {
 | 
				
			||||||
 | 
							Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that something is listening.
 | 
						// Verify that something is listening.
 | 
				
			||||||
	By("Trying to dial each unique pod")
 | 
						framework.Logf("Trying to dial the pod")
 | 
				
			||||||
	retryTimeout := 2 * time.Minute
 | 
						retryTimeout := 2 * time.Minute
 | 
				
			||||||
	retryInterval := 5 * time.Second
 | 
						retryInterval := 5 * time.Second
 | 
				
			||||||
 | 
						label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
				
			||||||
	err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
 | 
						err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
 | 
							framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
 | 
				
			||||||
@@ -178,7 +164,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
 | 
				
			|||||||
// 2. Create a replica set that wants to run 3 pods.
 | 
					// 2. Create a replica set that wants to run 3 pods.
 | 
				
			||||||
// 3. Check replica set conditions for a ReplicaFailure condition.
 | 
					// 3. Check replica set conditions for a ReplicaFailure condition.
 | 
				
			||||||
// 4. Scale down the replica set and observe the condition is gone.
 | 
					// 4. Scale down the replica set and observe the condition is gone.
 | 
				
			||||||
func rsConditionCheck(f *framework.Framework) {
 | 
					func testReplicaSetConditionCheck(f *framework.Framework) {
 | 
				
			||||||
	c := f.ClientSet
 | 
						c := f.ClientSet
 | 
				
			||||||
	namespace := f.Namespace.Name
 | 
						namespace := f.Namespace.Name
 | 
				
			||||||
	name := "condition-test"
 | 
						name := "condition-test"
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user