use versiond group clients from client-go

This commit is contained in:
supereagle
2017-07-28 15:54:13 +08:00
parent 9feabbdaeb
commit b694d51842
37 changed files with 163 additions and 163 deletions

View File

@@ -100,7 +100,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
}
var err error
d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
return err
}
@@ -167,7 +167,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet,
return nil, err
}
// 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label.
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name,
func(updated *extensions.ReplicaSet) error {
// Precondition: the RS doesn't contain the new hash in its pod template label.
if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
@@ -207,7 +207,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet,
// 3. Update rs label and selector to include the new hash label
// Copy the old selector, so that we can scrub out any orphaned pods
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error {
// Precondition: the RS doesn't contain the new hash in its label and selector.
if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
return utilerrors.ErrPreconditionViolated
@@ -251,7 +251,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
if annotationsUpdated || minReadySecondsNeedsUpdate {
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
}
// Should use the revision in existingNewRS's annotation, since it set by before
@@ -269,7 +269,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
if needsUpdate {
var err error
if d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d); err != nil {
if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
return nil, err
}
}
@@ -315,7 +315,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
createdRS, err := dc.client.Extensions().ReplicaSets(d.Namespace).Create(&newRS)
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
// Fetch a copy of the ReplicaSet. If its PodTemplateSpec is semantically deep equal
@@ -338,7 +338,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
*d.Status.CollisionCount++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
_, dErr := dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
if dErr == nil {
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
@@ -355,7 +355,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
_, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
}
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err
@@ -372,7 +372,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
needsUpdate = true
}
if needsUpdate {
_, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
_, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
}
return createdRS, err
}
@@ -508,7 +508,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
var err error
if sizeNeedsUpdate || annotationsNeedUpdate {
*(rsCopy.Spec.Replicas) = newScale
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
if err == nil && sizeNeedsUpdate {
scaled = true
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
@@ -546,7 +546,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
continue
}
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.
return err
@@ -566,7 +566,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
_, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}