Adding events to deployment

This commit is contained in:
nikhiljindal
2015-09-29 16:55:06 -07:00
parent 00ab055fa1
commit c6deb442f9
6 changed files with 259 additions and 94 deletions

View File

@@ -21,8 +21,10 @@ import (
"math"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
@@ -31,14 +33,20 @@ import (
)
type DeploymentController struct {
client client.Interface
expClient client.ExperimentalInterface
client client.Interface
expClient client.ExperimentalInterface
eventRecorder record.EventRecorder
}
func New(client client.Interface) *DeploymentController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(client.Events(""))
return &DeploymentController{
client: client,
expClient: client.Experimental(),
client: client,
expClient: client.Experimental(),
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
}
}
@@ -88,9 +96,7 @@ func (d *DeploymentController) reconcileRollingUpdateDeployment(deployment exper
return err
}
allRCs := []*api.ReplicationController{}
allRCs = append(allRCs, oldRCs...)
allRCs = append(allRCs, newRC)
allRCs := append(oldRCs, newRC)
// Scale up, if we can.
scaledUp, err := d.scaleUp(allRCs, newRC, deployment)
@@ -149,26 +155,6 @@ func (d *DeploymentController) getNewRC(deployment experimental.Deployment) (*ap
return createdRC, nil
}
func (d *DeploymentController) getPodsForRCs(replicationControllers []*api.ReplicationController) ([]api.Pod, error) {
allPods := []api.Pod{}
for _, rc := range replicationControllers {
podList, err := d.client.Pods(rc.ObjectMeta.Namespace).List(labels.SelectorFromSet(rc.Spec.Selector), fields.Everything())
if err != nil {
return allPods, fmt.Errorf("error listing pods: %v", err)
}
allPods = append(allPods, podList.Items...)
}
return allPods, nil
}
func (d *DeploymentController) getReplicaCountForRCs(replicationControllers []*api.ReplicationController) int {
totalReplicaCount := 0
for _, rc := range replicationControllers {
totalReplicaCount += rc.Spec.Replicas
}
return totalReplicaCount
}
func (d *DeploymentController) scaleUp(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
if newRC.Spec.Replicas == deployment.Spec.Replicas {
// Scaling up not required.
@@ -182,11 +168,7 @@ func (d *DeploymentController) scaleUp(allRCs []*api.ReplicationController, newR
maxSurge = util.GetValueFromPercent(maxSurge, deployment.Spec.Replicas)
}
// Find the total number of pods
allPods, err := d.getPodsForRCs(allRCs)
if err != nil {
return false, err
}
currentPodCount := len(allPods)
currentPodCount := deploymentUtil.GetReplicaCountForRCs(allRCs)
// Check if we can scale up.
maxTotalPods := deployment.Spec.Replicas + maxSurge
if currentPodCount >= maxTotalPods {
@@ -196,12 +178,16 @@ func (d *DeploymentController) scaleUp(allRCs []*api.ReplicationController, newR
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
scaleUpCount = int(math.Min(float64(scaleUpCount), float64(deployment.Spec.Replicas-newRC.Spec.Replicas)))
_, err = d.scaleRC(newRC, newRC.Spec.Replicas+scaleUpCount)
newReplicasCount := newRC.Spec.Replicas + scaleUpCount
_, err = d.scaleRC(newRC, newReplicasCount)
if err == nil {
d.eventRecorder.Eventf(&deployment, "ScalingRC", "Scaled up rc %s to %d", newRC.Name, newReplicasCount)
}
return true, err
}
func (d *DeploymentController) scaleDown(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
oldPodsCount := d.getReplicaCountForRCs(oldRCs)
oldPodsCount := deploymentUtil.GetReplicaCountForRCs(oldRCs)
if oldPodsCount == 0 {
// Cant scale down further
return false, nil
@@ -216,13 +202,9 @@ func (d *DeploymentController) scaleDown(allRCs []*api.ReplicationController, ol
// Check if we can scale down.
minAvailable := deployment.Spec.Replicas - maxUnavailable
// Find the number of ready pods.
// TODO: Use MinReadySeconds once https://github.com/kubernetes/kubernetes/pull/12894 is merged.
readyPodCount := 0
allPods, err := d.getPodsForRCs(allRCs)
for _, pod := range allPods {
if api.IsPodReady(&pod) {
readyPodCount++
}
readyPodCount, err := deploymentUtil.GetAvailablePodsForRCs(d.client, allRCs)
if err != nil {
return false, fmt.Errorf("could not find available pods: %v", err)
}
if readyPodCount <= minAvailable {
@@ -241,18 +223,20 @@ func (d *DeploymentController) scaleDown(allRCs []*api.ReplicationController, ol
}
// Scale down.
scaleDownCount := int(math.Min(float64(targetRC.Spec.Replicas), float64(totalScaleDownCount)))
_, err = d.scaleRC(targetRC, targetRC.Spec.Replicas-scaleDownCount)
newReplicasCount := targetRC.Spec.Replicas - scaleDownCount
_, err = d.scaleRC(targetRC, newReplicasCount)
if err != nil {
return false, err
}
d.eventRecorder.Eventf(&deployment, "ScalingRC", "Scaled down rc %s to %d", targetRC.Name, newReplicasCount)
totalScaleDownCount -= scaleDownCount
}
return true, err
}
func (d *DeploymentController) updateDeploymentStatus(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) error {
totalReplicas := d.getReplicaCountForRCs(allRCs)
updatedReplicas := d.getReplicaCountForRCs([]*api.ReplicationController{newRC})
totalReplicas := deploymentUtil.GetReplicaCountForRCs(allRCs)
updatedReplicas := deploymentUtil.GetReplicaCountForRCs([]*api.ReplicationController{newRC})
newDeployment := deployment
// TODO: Reconcile this with API definition. API definition talks about ready pods, while this just computes created pods.
newDeployment.Status = experimental.DeploymentStatus{