mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 03:08:15 +00:00
rename legacy to core
This commit is contained in:
@@ -321,7 +321,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
|
||||
if labels.Set(pod.Labels).AsSelector().Empty() {
|
||||
return fmt.Errorf("unable to create pods, no labels")
|
||||
}
|
||||
if newPod, err := r.KubeClient.Legacy().Pods(namespace).Create(pod); err != nil {
|
||||
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
|
||||
return fmt.Errorf("unable to create pods: %v", err)
|
||||
} else {
|
||||
@@ -336,7 +336,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
if err := r.KubeClient.Legacy().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
} else {
|
||||
@@ -449,7 +449,7 @@ func SyncAllPodsWithStore(kubeClient clientset.Interface, store cache.Store) {
|
||||
var err error
|
||||
listOptions := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
|
||||
for {
|
||||
if allPods, err = kubeClient.Legacy().Pods(api.NamespaceAll).List(listOptions); err != nil {
|
||||
if allPods, err = kubeClient.Core().Pods(api.NamespaceAll).List(listOptions); err != nil {
|
||||
glog.Warningf("Retrying pod list: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
unversioned_extensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -98,7 +98,7 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -146,10 +146,10 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
dsc.podStore.Store, dsc.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dsc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return dsc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dsc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return dsc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -164,10 +164,10 @@ func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dsc.kubeClient.Legacy().Nodes().List(options)
|
||||
return dsc.kubeClient.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dsc.kubeClient.Legacy().Nodes().Watch(options)
|
||||
return dsc.kubeClient.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -99,7 +99,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{client.Legacy().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{client.Core().Events("")})
|
||||
|
||||
dc := &DeploymentController{
|
||||
client: client,
|
||||
@@ -144,10 +144,10 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||
dc.rcStore.Store, dc.rcController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.client.Legacy().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
return dc.client.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.client.Legacy().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
return dc.client.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
@@ -162,10 +162,10 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
||||
dc.podStore.Store, dc.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.client.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return dc.client.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.client.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return dc.client.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -693,7 +693,7 @@ func (dc *DeploymentController) getNewRC(deployment extensions.Deployment, maxOl
|
||||
glog.V(4).Infof("update existingNewRC %s revision to %s - %+v\n", existingNewRC.Name, newRevision)
|
||||
}
|
||||
if annotationChanged {
|
||||
return dc.client.Legacy().ReplicationControllers(deployment.ObjectMeta.Namespace).Update(existingNewRC)
|
||||
return dc.client.Core().ReplicationControllers(deployment.ObjectMeta.Namespace).Update(existingNewRC)
|
||||
}
|
||||
return existingNewRC, nil
|
||||
}
|
||||
@@ -740,7 +740,7 @@ func (dc *DeploymentController) getNewRC(deployment extensions.Deployment, maxOl
|
||||
Template: &newRCTemplate,
|
||||
},
|
||||
}
|
||||
createdRC, err := dc.client.Legacy().ReplicationControllers(namespace).Create(&newRC)
|
||||
createdRC, err := dc.client.Core().ReplicationControllers(namespace).Create(&newRC)
|
||||
if err != nil {
|
||||
dc.rcExpectations.DeleteExpectations(dKey)
|
||||
return nil, fmt.Errorf("error creating replication controller: %v", err)
|
||||
@@ -764,7 +764,7 @@ func (dc *DeploymentController) updateRCRevision(rc api.ReplicationController, r
|
||||
rc.Annotations = make(map[string]string)
|
||||
}
|
||||
rc.Annotations[deploymentutil.RevisionAnnotation] = revision
|
||||
_, err := dc.client.Legacy().ReplicationControllers(rc.ObjectMeta.Namespace).Update(&rc)
|
||||
_, err := dc.client.Core().ReplicationControllers(rc.ObjectMeta.Namespace).Update(&rc)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -913,7 +913,7 @@ func (dc *DeploymentController) cleanupOldRcs(oldRCs []*api.ReplicationControlle
|
||||
if controller.Spec.Replicas != 0 || controller.Generation > controller.Status.ObservedGeneration {
|
||||
continue
|
||||
}
|
||||
if err := dc.client.Legacy().ReplicationControllers(controller.Namespace).Delete(controller.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
if err := dc.client.Core().ReplicationControllers(controller.Namespace).Delete(controller.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("Failed deleting old rc %v for deployment %v: %v", controller.Name, deployment.Name, err)
|
||||
errList = append(errList, err)
|
||||
}
|
||||
@@ -967,7 +967,7 @@ func (dc *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationControl
|
||||
func (dc *DeploymentController) scaleRC(rc *api.ReplicationController, newScale int) (*api.ReplicationController, error) {
|
||||
// TODO: Using client for now, update to use store when it is ready.
|
||||
rc.Spec.Replicas = newScale
|
||||
return dc.client.Legacy().ReplicationControllers(rc.ObjectMeta.Namespace).Update(rc)
|
||||
return dc.client.Core().ReplicationControllers(rc.ObjectMeta.Namespace).Update(rc)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) updateDeployment(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
|
||||
@@ -62,10 +62,10 @@ func NewEndpointController(client *clientset.Clientset, resyncPeriod controller.
|
||||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Legacy().Services(api.NamespaceAll).List(options)
|
||||
return e.client.Core().Services(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Legacy().Services(api.NamespaceAll).Watch(options)
|
||||
return e.client.Core().Services(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
@@ -83,10 +83,10 @@ func NewEndpointController(client *clientset.Clientset, resyncPeriod controller.
|
||||
e.podStore.Store, e.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return e.client.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return e.client.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
||||
@@ -53,7 +53,7 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
|
||||
kubeClient: kubeClient,
|
||||
threshold: threshold,
|
||||
deletePod: func(namespace, name string) error {
|
||||
return kubeClient.Legacy().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
|
||||
return kubeClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
|
||||
},
|
||||
}
|
||||
|
||||
@@ -63,11 +63,11 @@ func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFun
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = terminatedSelector
|
||||
return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return gcc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = terminatedSelector
|
||||
return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return gcc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
@@ -74,7 +74,7 @@ func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.Re
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
|
||||
jm := &JobController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -113,10 +113,10 @@ func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.Re
|
||||
jm.podStore.Store, jm.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return jm.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return jm.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return jm.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return jm.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
||||
@@ -47,10 +47,10 @@ func NewNamespaceController(kubeClient clientset.Interface, versions *unversione
|
||||
_, controller = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Legacy().Namespaces().List(options)
|
||||
return kubeClient.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Legacy().Namespaces().Watch(options)
|
||||
return kubeClient.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
@@ -140,7 +140,7 @@ func finalizeNamespaceFunc(kubeClient clientset.Interface, namespace *api.Namesp
|
||||
for _, value := range finalizerSet.List() {
|
||||
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))
|
||||
}
|
||||
namespace, err := kubeClient.Legacy().Namespaces().Finalize(&namespaceFinalize)
|
||||
namespace, err := kubeClient.Core().Namespaces().Finalize(&namespaceFinalize)
|
||||
if err != nil {
|
||||
// it was removed already, so life is good
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -253,7 +253,7 @@ func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespa
|
||||
if !errors.IsConflict(err) {
|
||||
return nil, err
|
||||
}
|
||||
latestNamespace, err = kubeClient.Legacy().Namespaces().Get(latestNamespace.Name)
|
||||
latestNamespace, err = kubeClient.Core().Namespaces().Get(latestNamespace.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *api.Na
|
||||
newNamespace.ObjectMeta = namespace.ObjectMeta
|
||||
newNamespace.Status = namespace.Status
|
||||
newNamespace.Status.Phase = api.NamespaceTerminating
|
||||
return kubeClient.Legacy().Namespaces().UpdateStatus(&newNamespace)
|
||||
return kubeClient.Core().Namespaces().UpdateStatus(&newNamespace)
|
||||
}
|
||||
|
||||
// syncNamespace orchestrates deletion of a Namespace and its associated content.
|
||||
@@ -282,7 +282,7 @@ func syncNamespace(kubeClient clientset.Interface, versions *unversioned.APIVers
|
||||
// multiple controllers may edit a namespace during termination
|
||||
// first get the latest state of the namespace before proceeding
|
||||
// if the namespace was deleted already, don't do anything
|
||||
namespace, err := kubeClient.Legacy().Namespaces().Get(namespace.Name)
|
||||
namespace, err := kubeClient.Core().Namespaces().Get(namespace.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
@@ -304,7 +304,7 @@ func syncNamespace(kubeClient clientset.Interface, versions *unversioned.APIVers
|
||||
|
||||
// if the namespace is already finalized, delete it
|
||||
if finalized(namespace) {
|
||||
err = kubeClient.Legacy().Namespaces().Delete(namespace.Name, nil)
|
||||
err = kubeClient.Core().Namespaces().Delete(namespace.Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -328,7 +328,7 @@ func syncNamespace(kubeClient clientset.Interface, versions *unversioned.APIVers
|
||||
|
||||
// now check if all finalizers have reported that we delete now
|
||||
if finalized(result) {
|
||||
err = kubeClient.Legacy().Namespaces().Delete(namespace.Name, nil)
|
||||
err = kubeClient.Core().Namespaces().Delete(namespace.Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -338,12 +338,12 @@ func syncNamespace(kubeClient clientset.Interface, versions *unversioned.APIVers
|
||||
}
|
||||
|
||||
func deleteLimitRanges(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().LimitRanges(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().LimitRanges(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Legacy().LimitRanges(ns).Delete(items.Items[i].Name, nil)
|
||||
err := kubeClient.Core().LimitRanges(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -352,12 +352,12 @@ func deleteLimitRanges(kubeClient clientset.Interface, ns string) error {
|
||||
}
|
||||
|
||||
func deleteResourceQuotas(kubeClient clientset.Interface, ns string) error {
|
||||
resourceQuotas, err := kubeClient.Legacy().ResourceQuotas(ns).List(api.ListOptions{})
|
||||
resourceQuotas, err := kubeClient.Core().ResourceQuotas(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range resourceQuotas.Items {
|
||||
err := kubeClient.Legacy().ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name, nil)
|
||||
err := kubeClient.Core().ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -366,12 +366,12 @@ func deleteResourceQuotas(kubeClient clientset.Interface, ns string) error {
|
||||
}
|
||||
|
||||
func deleteServiceAccounts(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().ServiceAccounts(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().ServiceAccounts(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Legacy().ServiceAccounts(ns).Delete(items.Items[i].Name, nil)
|
||||
err := kubeClient.Core().ServiceAccounts(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -380,12 +380,12 @@ func deleteServiceAccounts(kubeClient clientset.Interface, ns string) error {
|
||||
}
|
||||
|
||||
func deleteServices(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().Services(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().Services(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Legacy().Services(ns).Delete(items.Items[i].Name, nil)
|
||||
err := kubeClient.Core().Services(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -394,12 +394,12 @@ func deleteServices(kubeClient clientset.Interface, ns string) error {
|
||||
}
|
||||
|
||||
func deleteReplicationControllers(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().ReplicationControllers(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().ReplicationControllers(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Legacy().ReplicationControllers(ns).Delete(items.Items[i].Name, nil)
|
||||
err := kubeClient.Core().ReplicationControllers(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -408,7 +408,7 @@ func deleteReplicationControllers(kubeClient clientset.Interface, ns string) err
|
||||
}
|
||||
|
||||
func deletePods(kubeClient clientset.Interface, ns string, before unversioned.Time) (int64, error) {
|
||||
items, err := kubeClient.Legacy().Pods(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().Pods(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -425,7 +425,7 @@ func deletePods(kubeClient clientset.Interface, ns string, before unversioned.Ti
|
||||
estimate = grace
|
||||
}
|
||||
}
|
||||
err := kubeClient.Legacy().Pods(ns).Delete(items.Items[i].Name, deleteOptions)
|
||||
err := kubeClient.Core().Pods(ns).Delete(items.Items[i].Name, deleteOptions)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return 0, err
|
||||
}
|
||||
@@ -437,16 +437,16 @@ func deletePods(kubeClient clientset.Interface, ns string, before unversioned.Ti
|
||||
}
|
||||
|
||||
func deleteEvents(kubeClient clientset.Interface, ns string) error {
|
||||
return kubeClient.Legacy().Events(ns).DeleteCollection(nil, api.ListOptions{})
|
||||
return kubeClient.Core().Events(ns).DeleteCollection(nil, api.ListOptions{})
|
||||
}
|
||||
|
||||
func deleteSecrets(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().Secrets(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().Secrets(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Legacy().Secrets(ns).Delete(items.Items[i].Name, nil)
|
||||
err := kubeClient.Core().Secrets(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -455,12 +455,12 @@ func deleteSecrets(kubeClient clientset.Interface, ns string) error {
|
||||
}
|
||||
|
||||
func deletePersistentVolumeClaims(kubeClient clientset.Interface, ns string) error {
|
||||
items, err := kubeClient.Legacy().PersistentVolumeClaims(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().PersistentVolumeClaims(ns).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range items.Items {
|
||||
err := kubeClient.Legacy().PersistentVolumeClaims(ns).Delete(items.Items[i].Name, nil)
|
||||
err := kubeClient.Core().PersistentVolumeClaims(ns).Delete(items.Items[i].Name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -138,7 +138,7 @@ func NewNodeController(
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@@ -171,10 +171,10 @@ func NewNodeController(
|
||||
nc.podStore.Store, nc.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return nc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return nc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -187,10 +187,10 @@ func NewNodeController(
|
||||
nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return nc.kubeClient.Legacy().Nodes().List(options)
|
||||
return nc.kubeClient.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return nc.kubeClient.Legacy().Nodes().Watch(options)
|
||||
return nc.kubeClient.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
@@ -367,7 +367,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
|
||||
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) {
|
||||
var zero int64
|
||||
err := c.Legacy().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
@@ -377,7 +377,7 @@ func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) {
|
||||
// post "NodeReady==ConditionUnknown". It also evicts all pods if node is not ready or
|
||||
// not reachable for a long period of time.
|
||||
func (nc *NodeController) monitorNodeStatus() error {
|
||||
nodes, err := nc.kubeClient.Legacy().Nodes().List(api.ListOptions{})
|
||||
nodes, err := nc.kubeClient.Core().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -421,7 +421,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
break
|
||||
}
|
||||
name := node.Name
|
||||
node, err = nc.kubeClient.Legacy().Nodes().Get(name)
|
||||
node, err = nc.kubeClient.Core().Nodes().Get(name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
|
||||
break
|
||||
@@ -487,7 +487,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := nc.kubeClient.Legacy().Nodes().Delete(node.Name, nil); err != nil {
|
||||
if err := nc.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil {
|
||||
glog.Errorf("Unable to delete node %s: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
@@ -520,7 +520,7 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
|
||||
}
|
||||
glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR)
|
||||
node.Spec.PodCIDR = podCIDR
|
||||
if _, err := nc.kubeClient.Legacy().Nodes().Update(&node); err != nil {
|
||||
if _, err := nc.kubeClient.Core().Nodes().Update(&node); err != nil {
|
||||
nc.recordNodeStatusChange(&node, "CIDRAssignmentFailed")
|
||||
}
|
||||
}
|
||||
@@ -698,7 +698,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
}
|
||||
|
||||
if !api.Semantic.DeepEqual(nc.getCondition(&node.Status, api.NodeReady), &lastReadyCondition) {
|
||||
if _, err = nc.kubeClient.Legacy().Nodes().UpdateStatus(node); err != nil {
|
||||
if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
return gracePeriod, lastReadyCondition, readyCondition, err
|
||||
} else {
|
||||
@@ -720,7 +720,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
func (nc *NodeController) hasPods(nodeName string) (bool, error) {
|
||||
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
pods, err := nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -755,7 +755,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
||||
remaining := false
|
||||
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
pods, err := nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
if err != nil {
|
||||
return remaining, err
|
||||
}
|
||||
@@ -781,7 +781,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
||||
|
||||
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
|
||||
nc.recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
||||
if err := nc.kubeClient.Legacy().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
if err := nc.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
remaining = true
|
||||
@@ -794,7 +794,7 @@ func (nc *NodeController) deletePods(nodeName string) (bool, error) {
|
||||
func (nc *NodeController) markAllPodsNotReady(nodeName string) error {
|
||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(client.PodHost, nodeName)}
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(opts)
|
||||
pods, err := nc.kubeClient.Core().Pods(api.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -810,7 +810,7 @@ func (nc *NodeController) markAllPodsNotReady(nodeName string) error {
|
||||
if cond.Type == api.PodReady {
|
||||
pod.Status.Conditions[i].Status = api.ConditionFalse
|
||||
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
||||
pod, err := nc.kubeClient.Legacy().Pods(pod.Namespace).UpdateStatus(&pod)
|
||||
pod, err := nc.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to updated status for pod %q: %v", format.Pod(pod), err)
|
||||
errMsg = append(errMsg, fmt.Sprintf("%v", err))
|
||||
@@ -836,7 +836,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool,
|
||||
|
||||
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := nc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
pods, err := nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
if err != nil {
|
||||
return false, nextAttempt, err
|
||||
}
|
||||
@@ -865,7 +865,7 @@ func (nc *NodeController) terminatePods(nodeName string, since time.Time) (bool,
|
||||
remaining = 0
|
||||
glog.V(2).Infof("Removing pod %v after %s grace period", pod.Name, grace)
|
||||
nc.recordNodeEvent(nodeName, api.EventTypeNormal, "TerminatingEvictedPod", fmt.Sprintf("Pod %s has exceeded the grace period for deletion after being evicted from Node %q and is being force killed", pod.Name, nodeName))
|
||||
if err := nc.kubeClient.Legacy().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
|
||||
if err := nc.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
|
||||
glog.Errorf("Error completing deletion of pod %s: %v", pod.Name, err)
|
||||
complete = false
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/testing/fake"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
@@ -63,15 +63,15 @@ type FakeNodeHandler struct {
|
||||
}
|
||||
|
||||
type FakeLegacyHandler struct {
|
||||
unversioned_legacy.LegacyInterface
|
||||
unversioned_core.CoreInterface
|
||||
n *FakeNodeHandler
|
||||
}
|
||||
|
||||
func (c *FakeNodeHandler) Legacy() unversioned_legacy.LegacyInterface {
|
||||
return &FakeLegacyHandler{c.Clientset.Legacy(), c}
|
||||
func (c *FakeNodeHandler) Core() unversioned_core.CoreInterface {
|
||||
return &FakeLegacyHandler{c.Clientset.Core(), c}
|
||||
}
|
||||
|
||||
func (m *FakeLegacyHandler) Nodes() unversioned_legacy.NodeInterface {
|
||||
func (m *FakeLegacyHandler) Nodes() unversioned_core.NodeInterface {
|
||||
return m.n
|
||||
}
|
||||
|
||||
|
||||
@@ -55,10 +55,10 @@ func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod t
|
||||
_, volumeController := framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Legacy().PersistentVolumes().List(options)
|
||||
return kubeClient.Core().PersistentVolumes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Legacy().PersistentVolumes().Watch(options)
|
||||
return kubeClient.Core().PersistentVolumes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolume{},
|
||||
@@ -73,10 +73,10 @@ func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod t
|
||||
_, claimController := framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Legacy().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Legacy().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
|
||||
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
@@ -462,29 +462,29 @@ type realBinderClient struct {
|
||||
}
|
||||
|
||||
func (c *realBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Get(name)
|
||||
return c.client.Core().PersistentVolumes().Get(name)
|
||||
}
|
||||
|
||||
func (c *realBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Update(volume)
|
||||
return c.client.Core().PersistentVolumes().Update(volume)
|
||||
}
|
||||
|
||||
func (c *realBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
|
||||
return c.client.Legacy().PersistentVolumes().Delete(volume.Name, nil)
|
||||
return c.client.Core().PersistentVolumes().Delete(volume.Name, nil)
|
||||
}
|
||||
|
||||
func (c *realBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().UpdateStatus(volume)
|
||||
return c.client.Core().PersistentVolumes().UpdateStatus(volume)
|
||||
}
|
||||
|
||||
func (c *realBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(namespace).Get(name)
|
||||
return c.client.Core().PersistentVolumeClaims(namespace).Get(name)
|
||||
}
|
||||
|
||||
func (c *realBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
return c.client.Core().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
}
|
||||
|
||||
func (c *realBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
|
||||
return c.client.Core().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
|
||||
}
|
||||
|
||||
@@ -300,7 +300,7 @@ func TestExampleObjects(t *testing.T) {
|
||||
clientset.AddReactor("*", "*", core.ObjectReaction(o, api.RESTMapper))
|
||||
|
||||
if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolumeClaim{}) {
|
||||
pvc, err := clientset.Legacy().PersistentVolumeClaims("ns").Get("doesntmatter")
|
||||
pvc, err := clientset.Core().PersistentVolumeClaims("ns").Get("doesntmatter")
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving object: %v", err)
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func TestExampleObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolume{}) {
|
||||
pv, err := clientset.Legacy().PersistentVolumes().Get("doesntmatter")
|
||||
pv, err := clientset.Core().PersistentVolumes().Get("doesntmatter")
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving object: %v", err)
|
||||
}
|
||||
@@ -366,7 +366,7 @@ func TestBindingWithExamples(t *testing.T) {
|
||||
clientset := &fake.Clientset{}
|
||||
clientset.AddReactor("*", "*", core.ObjectReaction(o, api.RESTMapper))
|
||||
|
||||
pv, err := clientset.Legacy().PersistentVolumes().Get("any")
|
||||
pv, err := clientset.Core().PersistentVolumes().Get("any")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting PV from client: %v", err)
|
||||
}
|
||||
@@ -381,7 +381,7 @@ func TestBindingWithExamples(t *testing.T) {
|
||||
// Test that !Pending gets correctly added
|
||||
pv.Status.Phase = api.VolumeAvailable
|
||||
|
||||
claim, error := clientset.Legacy().PersistentVolumeClaims("ns").Get("any")
|
||||
claim, error := clientset.Core().PersistentVolumeClaims("ns").Get("any")
|
||||
if error != nil {
|
||||
t.Errorf("Unexpected error getting PVC from client: %v", err)
|
||||
}
|
||||
|
||||
@@ -382,51 +382,51 @@ type realControllerClient struct {
|
||||
}
|
||||
|
||||
func (c *realControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Get(name)
|
||||
return c.client.Core().PersistentVolumes().Get(name)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) {
|
||||
return c.client.Legacy().PersistentVolumes().List(options)
|
||||
return c.client.Core().PersistentVolumes().List(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Watch(options)
|
||||
return c.client.Core().PersistentVolumes().Watch(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Create(pv)
|
||||
return c.client.Core().PersistentVolumes().Create(pv)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Update(volume)
|
||||
return c.client.Core().PersistentVolumes().Update(volume)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
|
||||
return c.client.Legacy().PersistentVolumes().Delete(volume.Name, nil)
|
||||
return c.client.Core().PersistentVolumes().Delete(volume.Name, nil)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().UpdateStatus(volume)
|
||||
return c.client.Core().PersistentVolumes().UpdateStatus(volume)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(namespace).Get(name)
|
||||
return c.client.Core().PersistentVolumeClaims(namespace).Get(name)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(namespace).List(options)
|
||||
return c.client.Core().PersistentVolumeClaims(namespace).List(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(namespace).Watch(options)
|
||||
return c.client.Core().PersistentVolumeClaims(namespace).Watch(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
return c.client.Core().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
|
||||
return c.client.Core().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) GetKubeClient() clientset.Interface {
|
||||
|
||||
@@ -64,10 +64,10 @@ func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time
|
||||
_, volumeController := framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Legacy().PersistentVolumes().List(options)
|
||||
return kubeClient.Core().PersistentVolumes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Legacy().PersistentVolumes().Watch(options)
|
||||
return kubeClient.Core().PersistentVolumes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolume{},
|
||||
@@ -258,19 +258,19 @@ type realRecyclerClient struct {
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Get(name)
|
||||
return c.client.Core().PersistentVolumes().Get(name)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().Update(volume)
|
||||
return c.client.Core().PersistentVolumes().Update(volume)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
|
||||
return c.client.Legacy().PersistentVolumes().Delete(volume.Name, nil)
|
||||
return c.client.Core().PersistentVolumes().Delete(volume.Name, nil)
|
||||
}
|
||||
|
||||
func (c *realRecyclerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.Legacy().PersistentVolumes().UpdateStatus(volume)
|
||||
return c.client.Core().PersistentVolumes().UpdateStatus(volume)
|
||||
}
|
||||
|
||||
// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes.
|
||||
|
||||
@@ -28,8 +28,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
unversioned_extensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
@@ -54,7 +54,7 @@ type HorizontalController struct {
|
||||
var downscaleForbiddenWindow = 5 * time.Minute
|
||||
var upscaleForbiddenWindow = 3 * time.Minute
|
||||
|
||||
func NewHorizontalController(evtNamespacer unversioned_legacy.EventsGetter, scaleNamespacer unversioned_extensions.ScalesGetter, hpaNamespacer unversioned_extensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient) *HorizontalController {
|
||||
func NewHorizontalController(evtNamespacer unversioned_core.EventsGetter, scaleNamespacer unversioned_extensions.ScalesGetter, hpaNamespacer unversioned_extensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(evtNamespacer.Events(""))
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
@@ -217,7 +217,7 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
||||
func (tc *testCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
hpaController := NewHorizontalController(testClient.Legacy(), testClient.Extensions(), testClient.Extensions(), metricsClient)
|
||||
hpaController := NewHorizontalController(testClient.Core(), testClient.Extensions(), testClient.Extensions(), metricsClient)
|
||||
err := hpaController.reconcileAutoscalers()
|
||||
assert.Equal(t, nil, err)
|
||||
if tc.verifyEvents {
|
||||
|
||||
@@ -113,7 +113,7 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
||||
avgRequest int64, timestamp time.Time, err error) {
|
||||
|
||||
labelSelector := labels.SelectorFromSet(labels.Set(selector))
|
||||
podList, err := h.client.Legacy().Pods(namespace).
|
||||
podList, err := h.client.Core().Pods(namespace).
|
||||
List(api.ListOptions{LabelSelector: labelSelector})
|
||||
|
||||
if err != nil {
|
||||
@@ -152,7 +152,7 @@ func (h *HeapsterMetricsClient) GetCustomMetric(customMetricName string, namespa
|
||||
metricSpec := getHeapsterCustomMetricDefinition(customMetricName)
|
||||
|
||||
labelSelector := labels.SelectorFromSet(labels.Set(selector))
|
||||
podList, err := h.client.Legacy().Pods(namespace).List(api.ListOptions{LabelSelector: labelSelector})
|
||||
podList, err := h.client.Core().Pods(namespace).List(api.ListOptions{LabelSelector: labelSelector})
|
||||
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
|
||||
@@ -179,7 +179,7 @@ func (h *HeapsterMetricsClient) getForPods(metricSpec metricDefinition, namespac
|
||||
strings.Join(podNames, ","),
|
||||
metricSpec.name)
|
||||
|
||||
resultRaw, err := h.client.Legacy().Services(h.heapsterNamespace).
|
||||
resultRaw, err := h.client.Core().Services(h.heapsterNamespace).
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
|
||||
DoRaw()
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -93,7 +93,7 @@ type ReplicaSetController struct {
|
||||
func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicaSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
|
||||
rsc := &ReplicaSetController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -150,10 +150,10 @@ func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod contro
|
||||
rsc.podStore.Store, rsc.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rsc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return rsc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rsc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return rsc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -95,7 +95,7 @@ type ReplicationManager struct {
|
||||
func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
|
||||
rm := &ReplicationManager{
|
||||
kubeClient: kubeClient,
|
||||
@@ -111,10 +111,10 @@ func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controll
|
||||
rm.rcStore.Store, rm.rcController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
@@ -152,10 +152,10 @@ func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controll
|
||||
rm.podStore.Store, rm.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return rm.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rm.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return rm.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -453,7 +453,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||
}
|
||||
|
||||
// Always updates status as pods come up or die.
|
||||
if err := updateReplicaCount(rm.kubeClient.Legacy().ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
|
||||
if err := updateReplicaCount(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
|
||||
// Multiple things could lead to this update failing. Requeuing the controller ensures
|
||||
// we retry with some fairness.
|
||||
glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rc.Namespace, rc.Name, err)
|
||||
|
||||
@@ -600,7 +600,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &api.ReplicationController{}, fmt.Errorf("Fake error")
|
||||
})
|
||||
fakeRCClient := c.Legacy().ReplicationControllers("default")
|
||||
fakeRCClient := c.Core().ReplicationControllers("default")
|
||||
numReplicas := 10
|
||||
updateReplicaCount(fakeRCClient, *rc, numReplicas)
|
||||
updates, gets := 0, 0
|
||||
|
||||
@@ -21,11 +21,11 @@ package replication
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
)
|
||||
|
||||
// updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
|
||||
func updateReplicaCount(rcClient unversioned_legacy.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
|
||||
func updateReplicaCount(rcClient unversioned_core.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
|
||||
// This is the steady state. It happens when the rc doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
|
||||
@@ -66,10 +66,10 @@ func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod con
|
||||
rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Legacy().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Legacy().ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ResourceQuota{},
|
||||
@@ -106,10 +106,10 @@ func NewResourceQuotaController(kubeClient clientset.Interface, resyncPeriod con
|
||||
rq.podStore.Store, rq.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
return rq.kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
return rq.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -265,7 +265,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
|
||||
pods := &api.PodList{}
|
||||
if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
|
||||
pods, err = rq.kubeClient.Legacy().Pods(usage.Namespace).List(api.ListOptions{})
|
||||
pods, err = rq.kubeClient.Core().Pods(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -288,31 +288,31 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
case api.ResourcePods:
|
||||
value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
|
||||
case api.ResourceServices:
|
||||
items, err := rq.kubeClient.Legacy().Services(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Core().Services(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceReplicationControllers:
|
||||
items, err := rq.kubeClient.Legacy().ReplicationControllers(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Core().ReplicationControllers(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceQuotas:
|
||||
items, err := rq.kubeClient.Legacy().ResourceQuotas(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Core().ResourceQuotas(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourceSecrets:
|
||||
items, err := rq.kubeClient.Legacy().Secrets(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Core().Secrets(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
|
||||
case api.ResourcePersistentVolumeClaims:
|
||||
items, err := rq.kubeClient.Legacy().PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
|
||||
items, err := rq.kubeClient.Core().PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -334,7 +334,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e
|
||||
|
||||
// update the usage only if it changed
|
||||
if dirty {
|
||||
_, err = rq.kubeClient.Legacy().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -59,7 +59,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
||||
}
|
||||
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
|
||||
// and reduce the number of lists needed.
|
||||
nodeList, err := rc.kubeClient.Legacy().Nodes().List(api.ListOptions{})
|
||||
nodeList, err := rc.kubeClient.Core().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing nodes: %v", err)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_2"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
@@ -82,7 +82,7 @@ type ServiceController struct {
|
||||
// (like load balancers) in sync with the registry.
|
||||
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{kubeClient.Core().Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
|
||||
|
||||
return &ServiceController{
|
||||
@@ -134,13 +134,13 @@ func (s *ServiceController) Run(serviceSyncPeriod, nodeSyncPeriod time.Duration)
|
||||
}),
|
||||
s.cache,
|
||||
)
|
||||
lw := cache.NewListWatchFromClient(s.kubeClient.(*clientset.Clientset).LegacyClient, "services", api.NamespaceAll, fields.Everything())
|
||||
lw := cache.NewListWatchFromClient(s.kubeClient.(*clientset.Clientset).CoreClient, "services", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(lw, &api.Service{}, serviceQueue, serviceSyncPeriod).Run()
|
||||
for i := 0; i < workerGoroutines; i++ {
|
||||
go s.watchServices(serviceQueue)
|
||||
}
|
||||
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*clientset.Clientset).LegacyClient, "nodes", api.NamespaceAll, fields.Everything())
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*clientset.Clientset).CoreClient, "nodes", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run()
|
||||
go s.nodeSyncLoop(nodeSyncPeriod)
|
||||
return nil
|
||||
@@ -344,7 +344,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
|
||||
func (s *ServiceController) persistUpdate(service *api.Service) error {
|
||||
var err error
|
||||
for i := 0; i < clientRetryCount; i++ {
|
||||
_, err = s.kubeClient.Legacy().Services(service.Namespace).UpdateStatus(service)
|
||||
_, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -82,11 +82,11 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = accountSelector
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = accountSelector
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
@@ -100,10 +100,10 @@ func NewServiceAccountsController(cl clientset.Interface, options ServiceAccount
|
||||
e.namespaces, e.namespaceController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Legacy().Namespaces().List(options)
|
||||
return e.client.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Legacy().Namespaces().Watch(options)
|
||||
return e.client.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
@@ -217,7 +217,7 @@ func (e *ServiceAccountsController) createServiceAccountIfNeeded(sa api.ServiceA
|
||||
// createDefaultServiceAccount creates a default ServiceAccount in the specified namespace
|
||||
func (e *ServiceAccountsController) createServiceAccount(sa api.ServiceAccount, namespace string) {
|
||||
sa.Namespace = namespace
|
||||
if _, err := e.client.Legacy().ServiceAccounts(namespace).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
|
||||
if _, err := e.client.Core().ServiceAccounts(namespace).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) {
|
||||
glog.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,10 +41,10 @@ func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTok
|
||||
return clientGetter{c}
|
||||
}
|
||||
func (c clientGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) {
|
||||
return c.client.Legacy().ServiceAccounts(namespace).Get(name)
|
||||
return c.client.Core().ServiceAccounts(namespace).Get(name)
|
||||
}
|
||||
func (c clientGetter) GetSecret(namespace, name string) (*api.Secret, error) {
|
||||
return c.client.Legacy().Secrets(namespace).Get(name)
|
||||
return c.client.Core().Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
// registryGetter implements ServiceAccountTokenGetter using a service account and secret registry
|
||||
|
||||
@@ -72,10 +72,10 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Legacy().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
@@ -93,11 +93,11 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return e.client.Legacy().Secrets(api.NamespaceAll).List(options)
|
||||
return e.client.Core().Secrets(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return e.client.Legacy().Secrets(api.NamespaceAll).Watch(options)
|
||||
return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
@@ -292,7 +292,7 @@ func (e *TokensController) createSecretIfNeeded(serviceAccount *api.ServiceAccou
|
||||
func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) error {
|
||||
// We don't want to update the cache's copy of the service account
|
||||
// so add the secret to a freshly retrieved copy of the service account
|
||||
serviceAccounts := e.client.Legacy().ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccounts := e.client.Core().ServiceAccounts(serviceAccount.Namespace)
|
||||
liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -330,7 +330,7 @@ func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) erro
|
||||
}
|
||||
|
||||
// Save the secret
|
||||
if _, err := e.client.Legacy().Secrets(serviceAccount.Namespace).Create(secret); err != nil {
|
||||
if _, err := e.client.Core().Secrets(serviceAccount.Namespace).Create(secret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ func (e *TokensController) createSecret(serviceAccount *api.ServiceAccount) erro
|
||||
if err != nil {
|
||||
// we weren't able to use the token, try to clean it up.
|
||||
glog.V(2).Infof("Deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
if err := e.client.Legacy().Secrets(secret.Namespace).Delete(secret.Name, nil); err != nil {
|
||||
if err := e.client.Core().Secrets(secret.Namespace).Delete(secret.Name, nil); err != nil {
|
||||
glog.Error(err) // if we fail, just log it
|
||||
}
|
||||
}
|
||||
@@ -390,7 +390,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
|
||||
secret.Annotations[api.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||
|
||||
// Save the secret
|
||||
if _, err := e.client.Legacy().Secrets(secret.Namespace).Update(secret); err != nil {
|
||||
if _, err := e.client.Core().Secrets(secret.Namespace).Update(secret); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -398,7 +398,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
|
||||
|
||||
// deleteSecret deletes the given secret
|
||||
func (e *TokensController) deleteSecret(secret *api.Secret) error {
|
||||
return e.client.Legacy().Secrets(secret.Namespace).Delete(secret.Name, nil)
|
||||
return e.client.Core().Secrets(secret.Namespace).Delete(secret.Name, nil)
|
||||
}
|
||||
|
||||
// removeSecretReferenceIfNeeded updates the given ServiceAccount to remove a reference to the given secretName if needed.
|
||||
@@ -411,7 +411,7 @@ func (e *TokensController) removeSecretReferenceIfNeeded(serviceAccount *api.Ser
|
||||
|
||||
// We don't want to update the cache's copy of the service account
|
||||
// so remove the secret from a freshly retrieved copy of the service account
|
||||
serviceAccounts := e.client.Legacy().ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccounts := e.client.Core().ServiceAccounts(serviceAccount.Namespace)
|
||||
serviceAccount, err := serviceAccounts.Get(serviceAccount.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -461,7 +461,7 @@ func (e *TokensController) getServiceAccount(secret *api.Secret, fetchOnCacheMis
|
||||
}
|
||||
|
||||
if fetchOnCacheMiss {
|
||||
serviceAccount, err := e.client.Legacy().ServiceAccounts(secret.Namespace).Get(name)
|
||||
serviceAccount, err := e.client.Core().ServiceAccounts(secret.Namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user