Update deployment e2e test to check violated rollingupdate strategy

This commit is contained in:
Janet Kuo
2016-08-01 17:04:00 -07:00
parent a82c55213b
commit 808041cbd4
3 changed files with 99 additions and 28 deletions

View File

@@ -601,12 +601,12 @@ func CountAvailablePodsForReplicaSets(podList *api.PodList, rss []*extensions.Re
}
// GetAvailablePodsForDeployment returns the number of available pods (listed from clientset) corresponding to the given deployment.
func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment, minReadySeconds int32) (int32, error) {
func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment) (int32, error) {
podList, err := listPods(deployment, c)
if err != nil {
return 0, err
}
return countAvailablePods(podList.Items, minReadySeconds), nil
return countAvailablePods(podList.Items, deployment.Spec.MinReadySeconds), nil
}
func countAvailablePods(pods []api.Pod, minReadySeconds int32) int32 {

View File

@@ -83,7 +83,7 @@ var _ = framework.KubeDescribe("Deployment", func() {
It("paused deployment should be able to scale", func() {
testScalePausedDeployment(f)
})
It("scaled rollout should not block on annotation check", func() {
It("scaled rollout deployment should not block on annotation check", func() {
testScaledRolloutDeployment(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
@@ -236,7 +236,7 @@ func testNewDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
@@ -285,7 +285,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
// There should be 1 old RS (nginx-controller, which is adopted)
@@ -341,7 +341,7 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", redisImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that.
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
@@ -397,7 +397,7 @@ func testRecreateDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected. We use events to verify that.
@@ -557,7 +557,7 @@ func testRolloverDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
}
@@ -685,7 +685,7 @@ func testRollbackDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create"
@@ -711,7 +711,7 @@ func testRollbackDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update"
@@ -734,7 +734,7 @@ func testRollbackDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "create", after the rollback
@@ -755,7 +755,7 @@ func testRollbackDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// Current newRS annotation should be "update", after the rollback
@@ -804,7 +804,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
// Check that the replica set we created still doesn't contain revision information
@@ -846,7 +846,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// 4. Update the deploymentRollback to rollback to revision 1
@@ -866,7 +866,7 @@ func testRollbackDeploymentRSNoRevision(f *framework.Framework) {
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatus(c, deployment)
Expect(err).NotTo(HaveOccurred())
// 5. Update the deploymentRollback to rollback to revision 10
@@ -938,7 +938,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
// The RS and pods should be relabeled before the status is updated by syncRollingUpdateDeployment
err = framework.WaitForDeploymentStatus(c, deploy, true)
err = framework.WaitForDeploymentStatus(c, deploy)
Expect(err).NotTo(HaveOccurred())
// There should be no old RSs (overlapping RS)
@@ -1043,7 +1043,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatusValid(c, deployment, true)
Expect(err).NotTo(HaveOccurred())
first, err := deploymentutil.GetNewReplicaSet(deployment, c)
@@ -1064,7 +1064,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
err = framework.WaitForDeploymentStatus(c, deployment, false)
err = framework.WaitForDeploymentStatusValid(c, deployment, false)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Checking that the replica sets for %q are synced", deploymentName))
@@ -1105,7 +1105,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
}
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatusValid(c, deployment, true)
Expect(err).NotTo(HaveOccurred())
// Update the deployment with a non-existent image so that the new replica set will be blocked.
@@ -1123,7 +1123,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
err = framework.WaitForDeploymentStatus(c, deployment, false)
err = framework.WaitForDeploymentStatusValid(c, deployment, false)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Checking that the replica sets for %q are synced", deploymentName))
@@ -1164,6 +1164,6 @@ func testScaledRolloutDeployment(f *framework.Framework) {
}
By(fmt.Sprintf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)))
err = framework.WaitForDeploymentStatus(c, deployment, true)
err = framework.WaitForDeploymentStatusValid(c, deployment, true)
Expect(err).NotTo(HaveOccurred())
}

View File

@@ -3167,9 +3167,9 @@ func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) erro
})
}
// Waits for the deployment to reach desired state.
// Returns an error if minAvailable or maxCreated is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, expectComplete bool) error {
// Waits for the deployment status to sync (i.e. max unavailable and max surge aren't violated anymore).
// If expectComplete, wait until all its replicas become up-to-date.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, expectComplete bool) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
@@ -3190,6 +3190,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
Logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
@@ -3197,11 +3198,12 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
Logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment, deployment.Spec.MinReadySeconds)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
if err != nil {
return false, err
}
@@ -3211,7 +3213,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
Logf(reason)
return false, nil
}
minAvailable := deployment.Spec.Replicas - deploymentutil.MaxUnavailable(*deployment)
minAvailable := deploymentutil.MinAvailable(deployment)
if totalAvailable < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
Logf(reason)
@@ -3235,7 +3237,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment, deployment.Spec.MinReadySeconds)
logPodsOfDeployment(c, deployment)
err = fmt.Errorf("%s", reason)
}
if err != nil {
@@ -3244,6 +3246,74 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment, ex
return nil
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
totalAvailable, err := deploymentutil.GetAvailablePodsForDeployment(c, deployment)
if err != nil {
return false, err
}
maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if totalAvailable < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", totalAvailable, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == deployment.Spec.Replicas &&
deployment.Status.UpdatedReplicas == deployment.Spec.Replicas &&
deploymentutil.GetReplicaCountForReplicaSets(oldRSs) == 0 &&
deploymentutil.GetReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}) == deployment.Spec.Replicas {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
@@ -3385,7 +3455,8 @@ func WaitForObservedDeployment(c *clientset.Clientset, ns, deploymentName string
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, minReadySeconds int32) {
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
minReadySeconds := deployment.Spec.MinReadySeconds
podList, err := deploymentutil.ListPods(deployment,
func(namespace string, options api.ListOptions) (*api.PodList, error) {
return c.Core().Pods(namespace).List(options)