mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #20312 from caesarxuchao/replace-many-controllers
Auto commit by PR queue bot
This commit is contained in:
		@@ -38,6 +38,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
						"k8s.io/kubernetes/pkg/api/v1"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
@@ -133,6 +134,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cl := client.NewOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						cl := client.NewOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// TODO: caesarxuchao: hacky way to specify version of Experimental client.
 | 
						// TODO: caesarxuchao: hacky way to specify version of Experimental client.
 | 
				
			||||||
	// We will fix this by supporting multiple group versions in Config
 | 
						// We will fix this by supporting multiple group versions in Config
 | 
				
			||||||
@@ -188,7 +190,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
 | 
				
			|||||||
		Run(3, util.NeverStop)
 | 
							Run(3, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// TODO: Write an integration test for the replication controllers watch.
 | 
						// TODO: Write an integration test for the replication controllers watch.
 | 
				
			||||||
	go replicationcontroller.NewReplicationManager(cl, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
 | 
						go replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
 | 
				
			||||||
		Run(3, util.NeverStop)
 | 
							Run(3, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
 | 
						nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -36,6 +36,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
 | 
						"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/leaderelection"
 | 
						"k8s.io/kubernetes/pkg/client/leaderelection"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
@@ -96,16 +97,6 @@ func ResyncPeriod(s *options.CMServer) func() time.Duration {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func clientForUserAgentOrDie(config client.Config, userAgent string) *client.Client {
 | 
					 | 
				
			||||||
	fullUserAgent := client.DefaultKubernetesUserAgent() + "/" + userAgent
 | 
					 | 
				
			||||||
	config.UserAgent = fullUserAgent
 | 
					 | 
				
			||||||
	kubeClient, err := client.New(&config)
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		glog.Fatalf("Invalid API configuration: %v", err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return kubeClient
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// Run runs the CMServer.  This should never exit.
 | 
					// Run runs the CMServer.  This should never exit.
 | 
				
			||||||
func Run(s *options.CMServer) error {
 | 
					func Run(s *options.CMServer) error {
 | 
				
			||||||
	kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
 | 
						kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
 | 
				
			||||||
@@ -182,17 +173,17 @@ func Run(s *options.CMServer) error {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *client.Config, stop <-chan struct{}) error {
 | 
					func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *client.Config, stop <-chan struct{}) error {
 | 
				
			||||||
	go endpointcontroller.NewEndpointController(clientForUserAgentOrDie(*kubeconfig, "endpoint-controller"), ResyncPeriod(s)).
 | 
						go endpointcontroller.NewEndpointController(client.NewOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")), ResyncPeriod(s)).
 | 
				
			||||||
		Run(s.ConcurrentEndpointSyncs, util.NeverStop)
 | 
							Run(s.ConcurrentEndpointSyncs, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go replicationcontroller.NewReplicationManager(
 | 
						go replicationcontroller.NewReplicationManager(
 | 
				
			||||||
		clientForUserAgentOrDie(*kubeconfig, "replication-controller"),
 | 
							clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")),
 | 
				
			||||||
		ResyncPeriod(s),
 | 
							ResyncPeriod(s),
 | 
				
			||||||
		replicationcontroller.BurstReplicas,
 | 
							replicationcontroller.BurstReplicas,
 | 
				
			||||||
	).Run(s.ConcurrentRCSyncs, util.NeverStop)
 | 
						).Run(s.ConcurrentRCSyncs, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if s.TerminatedPodGCThreshold > 0 {
 | 
						if s.TerminatedPodGCThreshold > 0 {
 | 
				
			||||||
		go gc.New(clientForUserAgentOrDie(*kubeconfig, "garbage-collector"), ResyncPeriod(s), s.TerminatedPodGCThreshold).
 | 
							go gc.New(client.NewOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
 | 
				
			||||||
			Run(util.NeverStop)
 | 
								Run(util.NeverStop)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -201,13 +192,13 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
 | 
							glog.Fatalf("Cloud provider could not be initialized: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nodeController := nodecontroller.NewNodeController(cloud, clientForUserAgentOrDie(*kubeconfig, "node-controller"),
 | 
						nodeController := nodecontroller.NewNodeController(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
 | 
				
			||||||
		s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
							s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
				
			||||||
		util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
							util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
				
			||||||
		s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
 | 
							s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
 | 
				
			||||||
	nodeController.Run(s.NodeSyncPeriod)
 | 
						nodeController.Run(s.NodeSyncPeriod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	serviceController := servicecontroller.New(cloud, clientForUserAgentOrDie(*kubeconfig, "service-controller"), s.ClusterName)
 | 
						serviceController := servicecontroller.New(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
 | 
				
			||||||
	if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
 | 
						if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
 | 
				
			||||||
		glog.Errorf("Failed to start service controller: %v", err)
 | 
							glog.Errorf("Failed to start service controller: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -218,7 +209,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
		} else if routes, ok := cloud.Routes(); !ok {
 | 
							} else if routes, ok := cloud.Routes(); !ok {
 | 
				
			||||||
			glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
 | 
								glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			routeController := routecontroller.New(routes, clientForUserAgentOrDie(*kubeconfig, "route-controller"), s.ClusterName, &s.ClusterCIDR)
 | 
								routeController := routecontroller.New(routes, client.NewOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, &s.ClusterCIDR)
 | 
				
			||||||
			routeController.Run(s.NodeSyncPeriod)
 | 
								routeController.Run(s.NodeSyncPeriod)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
@@ -226,7 +217,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go resourcequotacontroller.NewResourceQuotaController(
 | 
						go resourcequotacontroller.NewResourceQuotaController(
 | 
				
			||||||
		clientForUserAgentOrDie(*kubeconfig, "resourcequota-controller"),
 | 
							client.NewOrDie(client.AddUserAgent(kubeconfig, "resourcequota-controller")),
 | 
				
			||||||
		controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
 | 
							controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// If apiserver is not running we should wait for some time and fail only then. This is particularly
 | 
						// If apiserver is not running we should wait for some time and fail only then. This is particularly
 | 
				
			||||||
@@ -249,7 +240,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
		glog.Fatalf("Failed to get supported resources from server: %v", err)
 | 
							glog.Fatalf("Failed to get supported resources from server: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespacecontroller.NewNamespaceController(clientForUserAgentOrDie(*kubeconfig, "namespace-controller"), versions, s.NamespaceSyncPeriod).Run()
 | 
						namespacecontroller.NewNamespaceController(client.NewOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), versions, s.NamespaceSyncPeriod).Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	groupVersion := "extensions/v1beta1"
 | 
						groupVersion := "extensions/v1beta1"
 | 
				
			||||||
	resources, found := resourceMap[groupVersion]
 | 
						resources, found := resourceMap[groupVersion]
 | 
				
			||||||
@@ -258,7 +249,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
		glog.Infof("Starting %s apis", groupVersion)
 | 
							glog.Infof("Starting %s apis", groupVersion)
 | 
				
			||||||
		if containsResource(resources, "horizontalpodautoscalers") {
 | 
							if containsResource(resources, "horizontalpodautoscalers") {
 | 
				
			||||||
			glog.Infof("Starting horizontal pod controller.")
 | 
								glog.Infof("Starting horizontal pod controller.")
 | 
				
			||||||
			hpaClient := clientForUserAgentOrDie(*kubeconfig, "horizontal-pod-autoscaler")
 | 
								hpaClient := client.NewOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
 | 
				
			||||||
			metricsClient := metrics.NewHeapsterMetricsClient(
 | 
								metricsClient := metrics.NewHeapsterMetricsClient(
 | 
				
			||||||
				hpaClient,
 | 
									hpaClient,
 | 
				
			||||||
				metrics.DefaultHeapsterNamespace,
 | 
									metrics.DefaultHeapsterNamespace,
 | 
				
			||||||
@@ -272,19 +263,19 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		if containsResource(resources, "daemonsets") {
 | 
							if containsResource(resources, "daemonsets") {
 | 
				
			||||||
			glog.Infof("Starting daemon set controller")
 | 
								glog.Infof("Starting daemon set controller")
 | 
				
			||||||
			go daemon.NewDaemonSetsController(clientForUserAgentOrDie(*kubeconfig, "daemon-set-controller"), ResyncPeriod(s)).
 | 
								go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s)).
 | 
				
			||||||
				Run(s.ConcurrentDSCSyncs, util.NeverStop)
 | 
									Run(s.ConcurrentDSCSyncs, util.NeverStop)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if containsResource(resources, "jobs") {
 | 
							if containsResource(resources, "jobs") {
 | 
				
			||||||
			glog.Infof("Starting job controller")
 | 
								glog.Infof("Starting job controller")
 | 
				
			||||||
			go job.NewJobController(clientForUserAgentOrDie(*kubeconfig, "job-controller"), ResyncPeriod(s)).
 | 
								go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), ResyncPeriod(s)).
 | 
				
			||||||
				Run(s.ConcurrentJobSyncs, util.NeverStop)
 | 
									Run(s.ConcurrentJobSyncs, util.NeverStop)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if containsResource(resources, "deployments") {
 | 
							if containsResource(resources, "deployments") {
 | 
				
			||||||
			glog.Infof("Starting deployment controller")
 | 
								glog.Infof("Starting deployment controller")
 | 
				
			||||||
			go deployment.NewDeploymentController(clientForUserAgentOrDie(*kubeconfig, "deployment-controller"), ResyncPeriod(s)).
 | 
								go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
 | 
				
			||||||
				Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
 | 
									Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -295,17 +286,17 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
		glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
 | 
							glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-binder"), s.PVClaimBinderSyncPeriod)
 | 
						pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod)
 | 
				
			||||||
	pvclaimBinder.Run()
 | 
						pvclaimBinder.Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-recycler"), s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
 | 
						pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")), s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
 | 
							glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pvRecycler.Run()
 | 
						pvRecycler.Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if provisioner != nil {
 | 
						if provisioner != nil {
 | 
				
			||||||
		pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-provisioner")), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
 | 
							pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
 | 
								glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -332,7 +323,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
			glog.Errorf("Error reading key for service account token controller: %v", err)
 | 
								glog.Errorf("Error reading key for service account token controller: %v", err)
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			serviceaccountcontroller.NewTokensController(
 | 
								serviceaccountcontroller.NewTokensController(
 | 
				
			||||||
				clientForUserAgentOrDie(*kubeconfig, "tokens-controller"),
 | 
									client.NewOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
 | 
				
			||||||
				serviceaccountcontroller.TokensControllerOptions{
 | 
									serviceaccountcontroller.TokensControllerOptions{
 | 
				
			||||||
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
 | 
										TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
 | 
				
			||||||
					RootCA:         rootCA,
 | 
										RootCA:         rootCA,
 | 
				
			||||||
@@ -342,7 +333,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	serviceaccountcontroller.NewServiceAccountsController(
 | 
						serviceaccountcontroller.NewServiceAccountsController(
 | 
				
			||||||
		clientForUserAgentOrDie(*kubeconfig, "service-account-controller"),
 | 
							client.NewOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
 | 
				
			||||||
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
 | 
							serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
 | 
				
			||||||
	).Run()
 | 
						).Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -38,6 +38,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/capabilities"
 | 
						"k8s.io/kubernetes/pkg/capabilities"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/chaosclient"
 | 
						"k8s.io/kubernetes/pkg/client/chaosclient"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
 | 
						clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
 | 
				
			||||||
@@ -732,8 +733,11 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
 | 
				
			|||||||
	// used by kubelet. Since NewMainKubelet expects a client interface, we need to make sure we are not passing
 | 
						// used by kubelet. Since NewMainKubelet expects a client interface, we need to make sure we are not passing
 | 
				
			||||||
	// a nil pointer to it when what we really want is a nil interface.
 | 
						// a nil pointer to it when what we really want is a nil interface.
 | 
				
			||||||
	var kubeClient client.Interface
 | 
						var kubeClient client.Interface
 | 
				
			||||||
 | 
						var c clientset.Interface
 | 
				
			||||||
	if kc.KubeClient != nil {
 | 
						if kc.KubeClient != nil {
 | 
				
			||||||
		kubeClient = kc.KubeClient
 | 
							kubeClient = kc.KubeClient
 | 
				
			||||||
 | 
							// TODO: remove this when we've refactored kubelet to only use clientset.
 | 
				
			||||||
 | 
							c = clientset.FromUnversionedClient(kc.KubeClient)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	gcPolicy := kubecontainer.ContainerGCPolicy{
 | 
						gcPolicy := kubecontainer.ContainerGCPolicy{
 | 
				
			||||||
@@ -755,6 +759,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
 | 
				
			|||||||
		kc.NodeName,
 | 
							kc.NodeName,
 | 
				
			||||||
		kc.DockerClient,
 | 
							kc.DockerClient,
 | 
				
			||||||
		kubeClient,
 | 
							kubeClient,
 | 
				
			||||||
 | 
							c,
 | 
				
			||||||
		kc.RootDirectory,
 | 
							kc.RootDirectory,
 | 
				
			||||||
		kc.PodInfraContainerImage,
 | 
							kc.PodInfraContainerImage,
 | 
				
			||||||
		kc.SyncFrequency,
 | 
							kc.SyncFrequency,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,6 +29,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
 | 
						"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
 | 
				
			||||||
	"k8s.io/kubernetes/contrib/mesos/pkg/node"
 | 
						"k8s.io/kubernetes/contrib/mesos/pkg/node"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
 | 
				
			||||||
	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
 | 
						clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
 | 
				
			||||||
@@ -126,14 +127,14 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
		glog.Fatal(server.ListenAndServe())
 | 
							glog.Fatal(server.ListenAndServe())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	endpoints := s.createEndpointController(clientForUserAgentOrDie(*kubeconfig, "endpoint-controller"))
 | 
						endpoints := s.createEndpointController(client.NewOrDie(client.AddUserAgent(kubeconfig, "endpoint-controller")))
 | 
				
			||||||
	go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
 | 
						go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go replicationcontroller.NewReplicationManager(clientForUserAgentOrDie(*kubeconfig, "replication-controller"), s.resyncPeriod, replicationcontroller.BurstReplicas).
 | 
						go replicationcontroller.NewReplicationManager(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas).
 | 
				
			||||||
		Run(s.ConcurrentRCSyncs, util.NeverStop)
 | 
							Run(s.ConcurrentRCSyncs, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if s.TerminatedPodGCThreshold > 0 {
 | 
						if s.TerminatedPodGCThreshold > 0 {
 | 
				
			||||||
		go gc.New(clientForUserAgentOrDie(*kubeconfig, "garbage-collector"), s.resyncPeriod, s.TerminatedPodGCThreshold).
 | 
							go gc.New(client.NewOrDie(client.AddUserAgent(kubeconfig, "garbage-collector")), s.resyncPeriod, s.TerminatedPodGCThreshold).
 | 
				
			||||||
			Run(util.NeverStop)
 | 
								Run(util.NeverStop)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -146,18 +147,18 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
 | 
							glog.Fatalf("Cloud provider could not be initialized: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nodeController := nodecontroller.NewNodeController(cloud, clientForUserAgentOrDie(*kubeconfig, "node-controller"),
 | 
						nodeController := nodecontroller.NewNodeController(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "node-controller")),
 | 
				
			||||||
		s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
							s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
				
			||||||
		util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
							util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
 | 
				
			||||||
		s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
 | 
							s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
 | 
				
			||||||
	nodeController.Run(s.NodeSyncPeriod)
 | 
						nodeController.Run(s.NodeSyncPeriod)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nodeStatusUpdaterController := node.NewStatusUpdater(clientForUserAgentOrDie(*kubeconfig, "node-status-controller"), s.NodeMonitorPeriod, time.Now)
 | 
						nodeStatusUpdaterController := node.NewStatusUpdater(client.NewOrDie(client.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod, time.Now)
 | 
				
			||||||
	if err := nodeStatusUpdaterController.Run(util.NeverStop); err != nil {
 | 
						if err := nodeStatusUpdaterController.Run(util.NeverStop); err != nil {
 | 
				
			||||||
		glog.Fatalf("Failed to start node status update controller: %v", err)
 | 
							glog.Fatalf("Failed to start node status update controller: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	serviceController := servicecontroller.New(cloud, clientForUserAgentOrDie(*kubeconfig, "service-controller"), s.ClusterName)
 | 
						serviceController := servicecontroller.New(cloud, client.NewOrDie(client.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
 | 
				
			||||||
	if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
 | 
						if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
 | 
				
			||||||
		glog.Errorf("Failed to start service controller: %v", err)
 | 
							glog.Errorf("Failed to start service controller: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -167,12 +168,12 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
		if !ok {
 | 
							if !ok {
 | 
				
			||||||
			glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
 | 
								glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		routeController := routecontroller.New(routes, clientForUserAgentOrDie(*kubeconfig, "route-controller"), s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
 | 
							routeController := routecontroller.New(routes, client.NewOrDie(client.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
 | 
				
			||||||
		routeController.Run(s.NodeSyncPeriod)
 | 
							routeController.Run(s.NodeSyncPeriod)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	go resourcequotacontroller.NewResourceQuotaController(
 | 
						go resourcequotacontroller.NewResourceQuotaController(
 | 
				
			||||||
		clientForUserAgentOrDie(*kubeconfig, "resource-quota-controller"), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
 | 
							client.NewOrDie(client.AddUserAgent(kubeconfig, "resource-quota-controller")), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// If apiserver is not running we should wait for some time and fail only then. This is particularly
 | 
						// If apiserver is not running we should wait for some time and fail only then. This is particularly
 | 
				
			||||||
	// important when we start apiserver and controller manager at the same time.
 | 
						// important when we start apiserver and controller manager at the same time.
 | 
				
			||||||
@@ -194,7 +195,7 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
		glog.Fatalf("Failed to get supported resources from server: %v", err)
 | 
							glog.Fatalf("Failed to get supported resources from server: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespaceController := namespacecontroller.NewNamespaceController(clientForUserAgentOrDie(*kubeconfig, "namespace-controller"), &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
 | 
						namespaceController := namespacecontroller.NewNamespaceController(client.NewOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
 | 
				
			||||||
	namespaceController.Run()
 | 
						namespaceController.Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	groupVersion := "extensions/v1beta1"
 | 
						groupVersion := "extensions/v1beta1"
 | 
				
			||||||
@@ -204,7 +205,7 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
		glog.Infof("Starting %s apis", groupVersion)
 | 
							glog.Infof("Starting %s apis", groupVersion)
 | 
				
			||||||
		if containsResource(resources, "horizontalpodautoscalers") {
 | 
							if containsResource(resources, "horizontalpodautoscalers") {
 | 
				
			||||||
			glog.Infof("Starting horizontal pod controller.")
 | 
								glog.Infof("Starting horizontal pod controller.")
 | 
				
			||||||
			hpaClient := clientForUserAgentOrDie(*kubeconfig, "horizontal-pod-autoscaler")
 | 
								hpaClient := client.NewOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
 | 
				
			||||||
			metricsClient := metrics.NewHeapsterMetricsClient(
 | 
								metricsClient := metrics.NewHeapsterMetricsClient(
 | 
				
			||||||
				hpaClient,
 | 
									hpaClient,
 | 
				
			||||||
				metrics.DefaultHeapsterNamespace,
 | 
									metrics.DefaultHeapsterNamespace,
 | 
				
			||||||
@@ -218,19 +219,19 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		if containsResource(resources, "daemonsets") {
 | 
							if containsResource(resources, "daemonsets") {
 | 
				
			||||||
			glog.Infof("Starting daemon set controller")
 | 
								glog.Infof("Starting daemon set controller")
 | 
				
			||||||
			go daemon.NewDaemonSetsController(clientForUserAgentOrDie(*kubeconfig, "daemon-set-controller"), s.resyncPeriod).
 | 
								go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod).
 | 
				
			||||||
				Run(s.ConcurrentDSCSyncs, util.NeverStop)
 | 
									Run(s.ConcurrentDSCSyncs, util.NeverStop)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if containsResource(resources, "jobs") {
 | 
							if containsResource(resources, "jobs") {
 | 
				
			||||||
			glog.Infof("Starting job controller")
 | 
								glog.Infof("Starting job controller")
 | 
				
			||||||
			go job.NewJobController(clientForUserAgentOrDie(*kubeconfig, "job-controller"), s.resyncPeriod).
 | 
								go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
 | 
				
			||||||
				Run(s.ConcurrentJobSyncs, util.NeverStop)
 | 
									Run(s.ConcurrentJobSyncs, util.NeverStop)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if containsResource(resources, "deployments") {
 | 
							if containsResource(resources, "deployments") {
 | 
				
			||||||
			glog.Infof("Starting deployment controller")
 | 
								glog.Infof("Starting deployment controller")
 | 
				
			||||||
			go deployment.NewDeploymentController(clientForUserAgentOrDie(*kubeconfig, "deployment-controller"), s.resyncPeriod).
 | 
								go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
 | 
				
			||||||
				Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
 | 
									Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -241,17 +242,17 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
		glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
 | 
							glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-binder"), s.PVClaimBinderSyncPeriod)
 | 
						pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod)
 | 
				
			||||||
	pvclaimBinder.Run()
 | 
						pvclaimBinder.Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-recycler"), s.PVClaimBinderSyncPeriod, kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
 | 
						pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")), s.PVClaimBinderSyncPeriod, kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
 | 
							glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	pvRecycler.Run()
 | 
						pvRecycler.Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if provisioner != nil {
 | 
						if provisioner != nil {
 | 
				
			||||||
		pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-controller")), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
 | 
							pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
 | 
								glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -278,7 +279,7 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
			glog.Errorf("Error reading key for service account token controller: %v", err)
 | 
								glog.Errorf("Error reading key for service account token controller: %v", err)
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			serviceaccountcontroller.NewTokensController(
 | 
								serviceaccountcontroller.NewTokensController(
 | 
				
			||||||
				clientForUserAgentOrDie(*kubeconfig, "tokens-controller"),
 | 
									client.NewOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
 | 
				
			||||||
				serviceaccountcontroller.TokensControllerOptions{
 | 
									serviceaccountcontroller.TokensControllerOptions{
 | 
				
			||||||
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
 | 
										TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
 | 
				
			||||||
					RootCA:         rootCA,
 | 
										RootCA:         rootCA,
 | 
				
			||||||
@@ -288,23 +289,13 @@ func (s *CMServer) Run(_ []string) error {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	serviceaccountcontroller.NewServiceAccountsController(
 | 
						serviceaccountcontroller.NewServiceAccountsController(
 | 
				
			||||||
		clientForUserAgentOrDie(*kubeconfig, "service-account-controller"),
 | 
							client.NewOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
 | 
				
			||||||
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
 | 
							serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
 | 
				
			||||||
	).Run()
 | 
						).Run()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	select {}
 | 
						select {}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func clientForUserAgentOrDie(config client.Config, userAgent string) *client.Client {
 | 
					 | 
				
			||||||
	fullUserAgent := client.DefaultKubernetesUserAgent() + "/" + userAgent
 | 
					 | 
				
			||||||
	config.UserAgent = fullUserAgent
 | 
					 | 
				
			||||||
	kubeClient, err := client.New(&config)
 | 
					 | 
				
			||||||
	if err != nil {
 | 
					 | 
				
			||||||
		glog.Fatalf("Invalid API configuration: %v", err)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return kubeClient
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (s *CMServer) createEndpointController(client *client.Client) kmendpoint.EndpointController {
 | 
					func (s *CMServer) createEndpointController(client *client.Client) kmendpoint.EndpointController {
 | 
				
			||||||
	if s.UseHostPortEndpoints {
 | 
						if s.UseHostPortEndpoints {
 | 
				
			||||||
		glog.V(2).Infof("Creating hostIP:hostPort endpoint controller")
 | 
							glog.V(2).Infof("Creating hostIP:hostPort endpoint controller")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -830,6 +830,8 @@ const (
 | 
				
			|||||||
	LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
 | 
						LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// +genclient=true
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ReplicaSet represents the configuration of a replica set.
 | 
					// ReplicaSet represents the configuration of a replica set.
 | 
				
			||||||
type ReplicaSet struct {
 | 
					type ReplicaSet struct {
 | 
				
			||||||
	unversioned.TypeMeta `json:",inline"`
 | 
						unversioned.TypeMeta `json:",inline"`
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -0,0 +1,42 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2016 The Kubernetes Authors All rights reserved.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package release_1_1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						extensions_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
 | 
				
			||||||
 | 
						legacy_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FromUnversionedClient adapts a pkg/client/unversioned#Client to a Clientset.
 | 
				
			||||||
 | 
					// This function is temporary. We will remove it when everyone has moved to using
 | 
				
			||||||
 | 
					// Clientset. New code should NOT use this function.
 | 
				
			||||||
 | 
					func FromUnversionedClient(c *unversioned.Client) *Clientset {
 | 
				
			||||||
 | 
						var clientset Clientset
 | 
				
			||||||
 | 
						if c != nil {
 | 
				
			||||||
 | 
							clientset.LegacyClient = legacy_unversioned.New(c.RESTClient)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							clientset.LegacyClient = legacy_unversioned.New(nil)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if c != nil && c.ExtensionsClient != nil {
 | 
				
			||||||
 | 
							clientset.ExtensionsClient = extensions_unversioned.New(c.ExtensionsClient.RESTClient)
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							clientset.ExtensionsClient = extensions_unversioned.New(nil)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return &clientset
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -57,7 +57,6 @@ type ObjectScheme interface {
 | 
				
			|||||||
// ObjectRetriever interface to satisfy retrieval of lists or retrieval of single items.
 | 
					// ObjectRetriever interface to satisfy retrieval of lists or retrieval of single items.
 | 
				
			||||||
// TODO: add support for sub resources
 | 
					// TODO: add support for sub resources
 | 
				
			||||||
func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc {
 | 
					func ObjectReaction(o ObjectRetriever, mapper meta.RESTMapper) ReactionFunc {
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return func(action Action) (bool, runtime.Object, error) {
 | 
						return func(action Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		kind, err := mapper.KindFor(unversioned.GroupVersionResource{Resource: action.GetResource()})
 | 
							kind, err := mapper.KindFor(unversioned.GroupVersionResource{Resource: action.GetResource()})
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
@@ -153,12 +152,8 @@ func NewObjects(scheme ObjectScheme, decoder runtime.Decoder) ObjectRetriever {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (o objects) Kind(kind unversioned.GroupVersionKind, name string) (runtime.Object, error) {
 | 
					func (o objects) Kind(kind unversioned.GroupVersionKind, name string) (runtime.Object, error) {
 | 
				
			||||||
	// TODO our test clients deal in internal versions.  We need to plumb that knowledge down here
 | 
						kind.Version = runtime.APIVersionInternal
 | 
				
			||||||
	// we might do this via an extra function to the scheme to allow getting internal group versions
 | 
						empty, err := o.scheme.New(kind)
 | 
				
			||||||
	// I'm punting for now
 | 
					 | 
				
			||||||
	kind.Version = ""
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	empty, _ := o.scheme.New(kind)
 | 
					 | 
				
			||||||
	nilValue := reflect.Zero(reflect.TypeOf(empty)).Interface().(runtime.Object)
 | 
						nilValue := reflect.Zero(reflect.TypeOf(empty)).Interface().(runtime.Object)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	arr, ok := o.types[kind.Kind]
 | 
						arr, ok := o.types[kind.Kind]
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,7 +18,7 @@ package fake
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/testing/core"
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
	extensions_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
 | 
						extensions_unversioned "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
 | 
				
			||||||
	extensions_unversioned_fake "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake"
 | 
						extensions_unversioned_fake "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned/fake"
 | 
				
			||||||
@@ -45,14 +45,14 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
 | 
				
			|||||||
	return &Clientset{fakePtr}
 | 
						return &Clientset{fakePtr}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Clientset implements release_1_1.Interface. Meant to be embedded into a
 | 
					// Clientset implements clientset.Interface. Meant to be embedded into a
 | 
				
			||||||
// struct to get a default implementation. This makes faking out just the method
 | 
					// struct to get a default implementation. This makes faking out just the method
 | 
				
			||||||
// you want to test easier.
 | 
					// you want to test easier.
 | 
				
			||||||
type Clientset struct {
 | 
					type Clientset struct {
 | 
				
			||||||
	core.Fake
 | 
						core.Fake
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ release_1_1.Interface = &Clientset{}
 | 
					var _ clientset.Interface = &Clientset{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *Clientset) Legacy() legacy_unversioned.LegacyInterface {
 | 
					func (c *Clientset) Legacy() legacy_unversioned.LegacyInterface {
 | 
				
			||||||
	return &legacy_unversioned_fake.FakeLegacy{&c.Fake}
 | 
						return &legacy_unversioned_fake.FakeLegacy{&c.Fake}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -0,0 +1,28 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2016 The Kubernetes Authors All rights reserved.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package unversioned
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import "k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type DeploymentExpansion interface {
 | 
				
			||||||
 | 
						Rollback(*extensions.DeploymentRollback) error
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace.
 | 
				
			||||||
 | 
					func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error {
 | 
				
			||||||
 | 
						return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -28,6 +28,7 @@ type ExtensionsInterface interface {
 | 
				
			|||||||
	HorizontalPodAutoscalersGetter
 | 
						HorizontalPodAutoscalersGetter
 | 
				
			||||||
	IngressesGetter
 | 
						IngressesGetter
 | 
				
			||||||
	JobsGetter
 | 
						JobsGetter
 | 
				
			||||||
 | 
						ReplicaSetsGetter
 | 
				
			||||||
	ScalesGetter
 | 
						ScalesGetter
 | 
				
			||||||
	ThirdPartyResourcesGetter
 | 
						ThirdPartyResourcesGetter
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -57,6 +58,10 @@ func (c *ExtensionsClient) Jobs(namespace string) JobInterface {
 | 
				
			|||||||
	return newJobs(c, namespace)
 | 
						return newJobs(c, namespace)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface {
 | 
				
			||||||
 | 
						return newReplicaSets(c, namespace)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *ExtensionsClient) Scales(namespace string) ScaleInterface {
 | 
					func (c *ExtensionsClient) Scales(namespace string) ScaleInterface {
 | 
				
			||||||
	return newScales(c, namespace)
 | 
						return newScales(c, namespace)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -0,0 +1,33 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2014 The Kubernetes Authors All rights reserved.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package fake
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeDeployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error {
 | 
				
			||||||
 | 
						action := core.CreateActionImpl{}
 | 
				
			||||||
 | 
						action.Verb = "create"
 | 
				
			||||||
 | 
						action.Resource = "deployments"
 | 
				
			||||||
 | 
						action.Subresource = "rollback"
 | 
				
			||||||
 | 
						action.Object = deploymentRollback
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						_, err := c.Fake.Invokes(action, deploymentRollback)
 | 
				
			||||||
 | 
						return err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -45,6 +45,10 @@ func (c *FakeExtensions) Jobs(namespace string) unversioned.JobInterface {
 | 
				
			|||||||
	return &FakeJobs{c, namespace}
 | 
						return &FakeJobs{c, namespace}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeExtensions) ReplicaSets(namespace string) unversioned.ReplicaSetInterface {
 | 
				
			||||||
 | 
						return &FakeReplicaSets{c, namespace}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *FakeExtensions) Scales(namespace string) unversioned.ScaleInterface {
 | 
					func (c *FakeExtensions) Scales(namespace string) unversioned.ScaleInterface {
 | 
				
			||||||
	return &FakeScales{c, namespace}
 | 
						return &FakeScales{c, namespace}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -0,0 +1,113 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2016 The Kubernetes Authors All rights reserved.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package fake
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						api "k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
 | 
						extensions "k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
						core "k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
						labels "k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
 | 
						watch "k8s.io/kubernetes/pkg/watch"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FakeReplicaSets implements ReplicaSetInterface
 | 
				
			||||||
 | 
					type FakeReplicaSets struct {
 | 
				
			||||||
 | 
						Fake *FakeExtensions
 | 
				
			||||||
 | 
						ns   string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						obj, err := c.Fake.
 | 
				
			||||||
 | 
							Invokes(core.NewCreateAction("replicasets", c.ns, replicaSet), &extensions.ReplicaSet{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if obj == nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return obj.(*extensions.ReplicaSet), err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						obj, err := c.Fake.
 | 
				
			||||||
 | 
							Invokes(core.NewUpdateAction("replicasets", c.ns, replicaSet), &extensions.ReplicaSet{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if obj == nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return obj.(*extensions.ReplicaSet), err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) {
 | 
				
			||||||
 | 
						obj, err := c.Fake.
 | 
				
			||||||
 | 
							Invokes(core.NewUpdateSubresourceAction("replicasets", "status", c.ns, replicaSet), &extensions.ReplicaSet{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if obj == nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return obj.(*extensions.ReplicaSet), err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) Delete(name string, options *api.DeleteOptions) error {
 | 
				
			||||||
 | 
						_, err := c.Fake.
 | 
				
			||||||
 | 
							Invokes(core.NewDeleteAction("replicasets", c.ns, name), &extensions.ReplicaSet{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
 | 
				
			||||||
 | 
						action := core.NewDeleteCollectionAction("events", c.ns, listOptions)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						_, err := c.Fake.Invokes(action, &extensions.ReplicaSetList{})
 | 
				
			||||||
 | 
						return err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) Get(name string) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						obj, err := c.Fake.
 | 
				
			||||||
 | 
							Invokes(core.NewGetAction("replicasets", c.ns, name), &extensions.ReplicaSet{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if obj == nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return obj.(*extensions.ReplicaSet), err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) {
 | 
				
			||||||
 | 
						obj, err := c.Fake.
 | 
				
			||||||
 | 
							Invokes(core.NewListAction("replicasets", c.ns, opts), &extensions.ReplicaSetList{})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if obj == nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						label := opts.LabelSelector
 | 
				
			||||||
 | 
						if label == nil {
 | 
				
			||||||
 | 
							label = labels.Everything()
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						list := &extensions.ReplicaSetList{}
 | 
				
			||||||
 | 
						for _, item := range obj.(*extensions.ReplicaSetList).Items {
 | 
				
			||||||
 | 
							if label.Matches(labels.Set(item.Labels)) {
 | 
				
			||||||
 | 
								list.Items = append(list.Items, item)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return list, err
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Watch returns a watch.Interface that watches the requested replicaSets.
 | 
				
			||||||
 | 
					func (c *FakeReplicaSets) Watch(opts api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
 | 
						return c.Fake.
 | 
				
			||||||
 | 
							InvokesWatch(core.NewWatchAction("replicasets", c.ns, opts))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -18,8 +18,6 @@ package unversioned
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
type DaemonSetExpansion interface{}
 | 
					type DaemonSetExpansion interface{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type DeploymentExpansion interface{}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
type HorizontalPodAutoscalerExpansion interface{}
 | 
					type HorizontalPodAutoscalerExpansion interface{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type IngressExpansion interface{}
 | 
					type IngressExpansion interface{}
 | 
				
			||||||
@@ -29,3 +27,5 @@ type JobExpansion interface{}
 | 
				
			|||||||
type ScaleExpansion interface{}
 | 
					type ScaleExpansion interface{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type ThirdPartyResourceExpansion interface{}
 | 
					type ThirdPartyResourceExpansion interface{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type ReplicaSetExpansion interface{}
 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										150
									
								
								pkg/client/typed/generated/extensions/unversioned/replicaset.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										150
									
								
								pkg/client/typed/generated/extensions/unversioned/replicaset.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,150 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2016 The Kubernetes Authors All rights reserved.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package unversioned
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						api "k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
 | 
						extensions "k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
						watch "k8s.io/kubernetes/pkg/watch"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ReplicaSetsGetter has a method to return a ReplicaSetInterface.
 | 
				
			||||||
 | 
					// A group's client should implement this interface.
 | 
				
			||||||
 | 
					type ReplicaSetsGetter interface {
 | 
				
			||||||
 | 
						ReplicaSets(namespace string) ReplicaSetInterface
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ReplicaSetInterface has methods to work with ReplicaSet resources.
 | 
				
			||||||
 | 
					type ReplicaSetInterface interface {
 | 
				
			||||||
 | 
						Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error)
 | 
				
			||||||
 | 
						Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error)
 | 
				
			||||||
 | 
						UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error)
 | 
				
			||||||
 | 
						Delete(name string, options *api.DeleteOptions) error
 | 
				
			||||||
 | 
						DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error
 | 
				
			||||||
 | 
						Get(name string) (*extensions.ReplicaSet, error)
 | 
				
			||||||
 | 
						List(opts api.ListOptions) (*extensions.ReplicaSetList, error)
 | 
				
			||||||
 | 
						Watch(opts api.ListOptions) (watch.Interface, error)
 | 
				
			||||||
 | 
						ReplicaSetExpansion
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// replicaSets implements ReplicaSetInterface
 | 
				
			||||||
 | 
					type replicaSets struct {
 | 
				
			||||||
 | 
						client *ExtensionsClient
 | 
				
			||||||
 | 
						ns     string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// newReplicaSets returns a ReplicaSets
 | 
				
			||||||
 | 
					func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets {
 | 
				
			||||||
 | 
						return &replicaSets{
 | 
				
			||||||
 | 
							client: c,
 | 
				
			||||||
 | 
							ns:     namespace,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
 | 
				
			||||||
 | 
					func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						result = &extensions.ReplicaSet{}
 | 
				
			||||||
 | 
						err = c.client.Post().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							Body(replicaSet).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Into(result)
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
 | 
				
			||||||
 | 
					func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						result = &extensions.ReplicaSet{}
 | 
				
			||||||
 | 
						err = c.client.Put().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							Name(replicaSet.Name).
 | 
				
			||||||
 | 
							Body(replicaSet).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Into(result)
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						result = &extensions.ReplicaSet{}
 | 
				
			||||||
 | 
						err = c.client.Put().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							Name(replicaSet.Name).
 | 
				
			||||||
 | 
							SubResource("status").
 | 
				
			||||||
 | 
							Body(replicaSet).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Into(result)
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
 | 
				
			||||||
 | 
					func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error {
 | 
				
			||||||
 | 
						return c.client.Delete().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							Name(name).
 | 
				
			||||||
 | 
							Body(options).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Error()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// DeleteCollection deletes a collection of objects.
 | 
				
			||||||
 | 
					func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error {
 | 
				
			||||||
 | 
						return c.client.Delete().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							VersionedParams(&listOptions, api.ParameterCodec).
 | 
				
			||||||
 | 
							Body(options).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Error()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
 | 
				
			||||||
 | 
					func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) {
 | 
				
			||||||
 | 
						result = &extensions.ReplicaSet{}
 | 
				
			||||||
 | 
						err = c.client.Get().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							Name(name).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Into(result)
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
 | 
				
			||||||
 | 
					func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) {
 | 
				
			||||||
 | 
						result = &extensions.ReplicaSetList{}
 | 
				
			||||||
 | 
						err = c.client.Get().
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							VersionedParams(&opts, api.ParameterCodec).
 | 
				
			||||||
 | 
							Do().
 | 
				
			||||||
 | 
							Into(result)
 | 
				
			||||||
 | 
						return
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Watch returns a watch.Interface that watches the requested replicaSets.
 | 
				
			||||||
 | 
					func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
 | 
						return c.client.Get().
 | 
				
			||||||
 | 
							Prefix("watch").
 | 
				
			||||||
 | 
							Namespace(c.ns).
 | 
				
			||||||
 | 
							Resource("replicasets").
 | 
				
			||||||
 | 
							VersionedParams(&opts, api.ParameterCodec).
 | 
				
			||||||
 | 
							Watch()
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
@@ -137,3 +137,20 @@ func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, i
 | 
				
			|||||||
func GetInvolvedObjectNameFieldLabel(version string) string {
 | 
					func GetInvolvedObjectNameFieldLabel(version string) string {
 | 
				
			||||||
	return "involvedObject.name"
 | 
						return "involvedObject.name"
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset.
 | 
				
			||||||
 | 
					type EventSinkImpl struct {
 | 
				
			||||||
 | 
						Interface EventInterface
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (e *EventSinkImpl) Create(event *api.Event) (*api.Event, error) {
 | 
				
			||||||
 | 
						return e.Interface.CreateWithEventNamespace(event)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (e *EventSinkImpl) Update(event *api.Event) (*api.Event, error) {
 | 
				
			||||||
 | 
						return e.Interface.UpdateWithEventNamespace(event)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (e *EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) {
 | 
				
			||||||
 | 
						return e.Interface.Patch(event, data)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -620,3 +620,9 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	return nil, nil
 | 
						return nil, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func AddUserAgent(config *Config, userAgent string) *Config {
 | 
				
			||||||
 | 
						fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
 | 
				
			||||||
 | 
						config.UserAgent = fullUserAgent
 | 
				
			||||||
 | 
						return config
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,6 +27,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/fields"
 | 
						"k8s.io/kubernetes/pkg/fields"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/labels"
 | 
						"k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
@@ -53,6 +54,7 @@ type Response struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
type Client struct {
 | 
					type Client struct {
 | 
				
			||||||
	*client.Client
 | 
						*client.Client
 | 
				
			||||||
 | 
						Clientset *clientset.Clientset
 | 
				
			||||||
	Request   Request
 | 
						Request   Request
 | 
				
			||||||
	Response  Response
 | 
						Response  Response
 | 
				
			||||||
	Error     bool
 | 
						Error     bool
 | 
				
			||||||
@@ -86,6 +88,8 @@ func (c *Client) Setup(t *testing.T) *Client {
 | 
				
			|||||||
			Host:          c.server.URL,
 | 
								Host:          c.server.URL,
 | 
				
			||||||
			ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
 | 
								ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	c.QueryValidator = map[string]func(string, string) bool{}
 | 
						c.QueryValidator = map[string]func(string, string) bool{}
 | 
				
			||||||
	return c
 | 
						return c
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,8 +27,8 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/validation"
 | 
						"k8s.io/kubernetes/pkg/api/validation"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/fields"
 | 
						"k8s.io/kubernetes/pkg/fields"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/labels"
 | 
						"k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
@@ -237,7 +237,7 @@ type PodControlInterface interface {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// RealPodControl is the default implementation of PodControlInterface.
 | 
					// RealPodControl is the default implementation of PodControlInterface.
 | 
				
			||||||
type RealPodControl struct {
 | 
					type RealPodControl struct {
 | 
				
			||||||
	KubeClient client.Interface
 | 
						KubeClient clientset.Interface
 | 
				
			||||||
	Recorder   record.EventRecorder
 | 
						Recorder   record.EventRecorder
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -321,7 +321,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
 | 
				
			|||||||
	if labels.Set(pod.Labels).AsSelector().Empty() {
 | 
						if labels.Set(pod.Labels).AsSelector().Empty() {
 | 
				
			||||||
		return fmt.Errorf("unable to create pods, no labels")
 | 
							return fmt.Errorf("unable to create pods, no labels")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if newPod, err := r.KubeClient.Pods(namespace).Create(pod); err != nil {
 | 
						if newPod, err := r.KubeClient.Legacy().Pods(namespace).Create(pod); err != nil {
 | 
				
			||||||
		r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
 | 
							r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
 | 
				
			||||||
		return fmt.Errorf("unable to create pods: %v", err)
 | 
							return fmt.Errorf("unable to create pods: %v", err)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
@@ -336,7 +336,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("object does not have ObjectMeta, %v", err)
 | 
							return fmt.Errorf("object does not have ObjectMeta, %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := r.KubeClient.Pods(namespace).Delete(podID, nil); err != nil {
 | 
						if err := r.KubeClient.Legacy().Pods(namespace).Delete(podID, nil); err != nil {
 | 
				
			||||||
		r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err)
 | 
							r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err)
 | 
				
			||||||
		return fmt.Errorf("unable to delete pods: %v", err)
 | 
							return fmt.Errorf("unable to delete pods: %v", err)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
@@ -444,12 +444,12 @@ func FilterActivePods(pods []api.Pod) []*api.Pod {
 | 
				
			|||||||
//
 | 
					//
 | 
				
			||||||
// TODO: Extend this logic to load arbitrary local state for the controllers
 | 
					// TODO: Extend this logic to load arbitrary local state for the controllers
 | 
				
			||||||
// instead of just pods.
 | 
					// instead of just pods.
 | 
				
			||||||
func SyncAllPodsWithStore(kubeClient client.Interface, store cache.Store) {
 | 
					func SyncAllPodsWithStore(kubeClient clientset.Interface, store cache.Store) {
 | 
				
			||||||
	var allPods *api.PodList
 | 
						var allPods *api.PodList
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
	listOptions := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
 | 
						listOptions := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
 | 
				
			||||||
	for {
 | 
						for {
 | 
				
			||||||
		if allPods, err = kubeClient.Pods(api.NamespaceAll).List(listOptions); err != nil {
 | 
							if allPods, err = kubeClient.Legacy().Pods(api.NamespaceAll).List(listOptions); err != nil {
 | 
				
			||||||
			glog.Warningf("Retrying pod list: %v", err)
 | 
								glog.Warningf("Retrying pod list: %v", err)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,6 +30,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -192,10 +193,10 @@ func TestCreatePods(t *testing.T) {
 | 
				
			|||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	podControl := RealPodControl{
 | 
						podControl := RealPodControl{
 | 
				
			||||||
		KubeClient: client,
 | 
							KubeClient: clientset,
 | 
				
			||||||
		Recorder:   &record.FakeRecorder{},
 | 
							Recorder:   &record.FakeRecorder{},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,8 +27,10 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/validation"
 | 
						"k8s.io/kubernetes/pkg/api/validation"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						unversioned_extensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
 | 
				
			||||||
 | 
						unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/labels"
 | 
						"k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
@@ -60,7 +62,7 @@ const (
 | 
				
			|||||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
 | 
					// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
 | 
				
			||||||
// in the system with actual running pods.
 | 
					// in the system with actual running pods.
 | 
				
			||||||
type DaemonSetsController struct {
 | 
					type DaemonSetsController struct {
 | 
				
			||||||
	kubeClient client.Interface
 | 
						kubeClient clientset.Interface
 | 
				
			||||||
	podControl controller.PodControlInterface
 | 
						podControl controller.PodControlInterface
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// An dsc is temporarily suspended after creating/deleting these many replicas.
 | 
						// An dsc is temporarily suspended after creating/deleting these many replicas.
 | 
				
			||||||
@@ -91,10 +93,11 @@ type DaemonSetsController struct {
 | 
				
			|||||||
	queue *workqueue.Type
 | 
						queue *workqueue.Type
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc) *DaemonSetsController {
 | 
					func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *DaemonSetsController {
 | 
				
			||||||
	eventBroadcaster := record.NewBroadcaster()
 | 
						eventBroadcaster := record.NewBroadcaster()
 | 
				
			||||||
	eventBroadcaster.StartLogging(glog.Infof)
 | 
						eventBroadcaster.StartLogging(glog.Infof)
 | 
				
			||||||
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
 | 
						// TODO: remove the wrapper when every clients have moved to use the clientset.
 | 
				
			||||||
 | 
						eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dsc := &DaemonSetsController{
 | 
						dsc := &DaemonSetsController{
 | 
				
			||||||
		kubeClient: kubeClient,
 | 
							kubeClient: kubeClient,
 | 
				
			||||||
@@ -142,10 +145,10 @@ func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controlle
 | 
				
			|||||||
	dsc.podStore.Store, dsc.podController = framework.NewInformer(
 | 
						dsc.podStore.Store, dsc.podController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return dsc.kubeClient.Pods(api.NamespaceAll).List(options)
 | 
									return dsc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return dsc.kubeClient.Pods(api.NamespaceAll).Watch(options)
 | 
									return dsc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.Pod{},
 | 
							&api.Pod{},
 | 
				
			||||||
@@ -160,10 +163,10 @@ func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controlle
 | 
				
			|||||||
	dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
 | 
						dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return dsc.kubeClient.Nodes().List(options)
 | 
									return dsc.kubeClient.Legacy().Nodes().List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return dsc.kubeClient.Nodes().Watch(options)
 | 
									return dsc.kubeClient.Legacy().Nodes().Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.Node{},
 | 
							&api.Node{},
 | 
				
			||||||
@@ -463,7 +466,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
 | 
				
			|||||||
	deleteWait.Wait()
 | 
						deleteWait.Wait()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
 | 
					func storeDaemonSetStatus(dsClient unversioned_extensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
 | 
				
			||||||
	if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled {
 | 
						if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled {
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,6 +27,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -134,8 +135,8 @@ func addPods(podStore cache.Store, nodeName string, label map[string]string, num
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
 | 
					func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewDaemonSetsController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	podControl := &controller.FakePodControl{}
 | 
						podControl := &controller.FakePodControl{}
 | 
				
			||||||
	manager.podControl = podControl
 | 
						manager.podControl = podControl
 | 
				
			||||||
@@ -480,8 +481,8 @@ func TestDSManagerInit(t *testing.T) {
 | 
				
			|||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewDaemonSetsController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	manager.dsStore.Add(ds)
 | 
						manager.dsStore.Add(ds)
 | 
				
			||||||
	manager.nodeStore.Add(newNode(nodeName, nil))
 | 
						manager.nodeStore.Add(newNode(nodeName, nil))
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,8 +29,9 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/errors"
 | 
						"k8s.io/kubernetes/pkg/api/errors"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -56,8 +57,7 @@ const (
 | 
				
			|||||||
// DeploymentController is responsible for synchronizing Deployment objects stored
 | 
					// DeploymentController is responsible for synchronizing Deployment objects stored
 | 
				
			||||||
// in the system with actual running rcs and pods.
 | 
					// in the system with actual running rcs and pods.
 | 
				
			||||||
type DeploymentController struct {
 | 
					type DeploymentController struct {
 | 
				
			||||||
	client        client.Interface
 | 
						client        clientset.Interface
 | 
				
			||||||
	expClient     client.ExtensionsInterface
 | 
					 | 
				
			||||||
	eventRecorder record.EventRecorder
 | 
						eventRecorder record.EventRecorder
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// To allow injection of syncDeployment for testing.
 | 
						// To allow injection of syncDeployment for testing.
 | 
				
			||||||
@@ -94,14 +94,14 @@ type DeploymentController struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewDeploymentController creates a new DeploymentController.
 | 
					// NewDeploymentController creates a new DeploymentController.
 | 
				
			||||||
func NewDeploymentController(client client.Interface, resyncPeriod controller.ResyncPeriodFunc) *DeploymentController {
 | 
					func NewDeploymentController(client clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *DeploymentController {
 | 
				
			||||||
	eventBroadcaster := record.NewBroadcaster()
 | 
						eventBroadcaster := record.NewBroadcaster()
 | 
				
			||||||
	eventBroadcaster.StartLogging(glog.Infof)
 | 
						eventBroadcaster.StartLogging(glog.Infof)
 | 
				
			||||||
	eventBroadcaster.StartRecordingToSink(client.Events(""))
 | 
						// TODO: remove the wrapper when every clients have moved to use the clientset.
 | 
				
			||||||
 | 
						eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{client.Legacy().Events("")})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc := &DeploymentController{
 | 
						dc := &DeploymentController{
 | 
				
			||||||
		client:          client,
 | 
							client:          client,
 | 
				
			||||||
		expClient:       client.Extensions(),
 | 
					 | 
				
			||||||
		eventRecorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
 | 
							eventRecorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
 | 
				
			||||||
		queue:           workqueue.New(),
 | 
							queue:           workqueue.New(),
 | 
				
			||||||
		podExpectations: controller.NewControllerExpectations(),
 | 
							podExpectations: controller.NewControllerExpectations(),
 | 
				
			||||||
@@ -111,10 +111,10 @@ func NewDeploymentController(client client.Interface, resyncPeriod controller.Re
 | 
				
			|||||||
	dc.dStore.Store, dc.dController = framework.NewInformer(
 | 
						dc.dStore.Store, dc.dController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return dc.expClient.Deployments(api.NamespaceAll).List(options)
 | 
									return dc.client.Extensions().Deployments(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return dc.expClient.Deployments(api.NamespaceAll).Watch(options)
 | 
									return dc.client.Extensions().Deployments(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&extensions.Deployment{},
 | 
							&extensions.Deployment{},
 | 
				
			||||||
@@ -143,10 +143,10 @@ func NewDeploymentController(client client.Interface, resyncPeriod controller.Re
 | 
				
			|||||||
	dc.rcStore.Store, dc.rcController = framework.NewInformer(
 | 
						dc.rcStore.Store, dc.rcController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return dc.client.ReplicationControllers(api.NamespaceAll).List(options)
 | 
									return dc.client.Legacy().ReplicationControllers(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return dc.client.ReplicationControllers(api.NamespaceAll).Watch(options)
 | 
									return dc.client.Legacy().ReplicationControllers(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.ReplicationController{},
 | 
							&api.ReplicationController{},
 | 
				
			||||||
@@ -161,10 +161,10 @@ func NewDeploymentController(client client.Interface, resyncPeriod controller.Re
 | 
				
			|||||||
	dc.podStore.Store, dc.podController = framework.NewInformer(
 | 
						dc.podStore.Store, dc.podController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return dc.client.Pods(api.NamespaceAll).List(options)
 | 
									return dc.client.Legacy().Pods(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return dc.client.Pods(api.NamespaceAll).Watch(options)
 | 
									return dc.client.Legacy().Pods(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.Pod{},
 | 
							&api.Pod{},
 | 
				
			||||||
@@ -688,7 +688,7 @@ func (dc *DeploymentController) getNewRC(deployment extensions.Deployment, maxOl
 | 
				
			|||||||
		if existingNewRC.Annotations[deploymentutil.RevisionAnnotation] != newRevision {
 | 
							if existingNewRC.Annotations[deploymentutil.RevisionAnnotation] != newRevision {
 | 
				
			||||||
			existingNewRC.Annotations[deploymentutil.RevisionAnnotation] = newRevision
 | 
								existingNewRC.Annotations[deploymentutil.RevisionAnnotation] = newRevision
 | 
				
			||||||
			glog.V(4).Infof("update existingNewRC %s revision to %s - %+v\n", existingNewRC.Name, newRevision)
 | 
								glog.V(4).Infof("update existingNewRC %s revision to %s - %+v\n", existingNewRC.Name, newRevision)
 | 
				
			||||||
			return dc.client.ReplicationControllers(deployment.ObjectMeta.Namespace).Update(existingNewRC)
 | 
								return dc.client.Legacy().ReplicationControllers(deployment.ObjectMeta.Namespace).Update(existingNewRC)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return existingNewRC, nil
 | 
							return existingNewRC, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -728,7 +728,7 @@ func (dc *DeploymentController) getNewRC(deployment extensions.Deployment, maxOl
 | 
				
			|||||||
			Template: &newRCTemplate,
 | 
								Template: &newRCTemplate,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	createdRC, err := dc.client.ReplicationControllers(namespace).Create(&newRC)
 | 
						createdRC, err := dc.client.Legacy().ReplicationControllers(namespace).Create(&newRC)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		dc.rcExpectations.DeleteExpectations(dKey)
 | 
							dc.rcExpectations.DeleteExpectations(dKey)
 | 
				
			||||||
		return nil, fmt.Errorf("error creating replication controller: %v", err)
 | 
							return nil, fmt.Errorf("error creating replication controller: %v", err)
 | 
				
			||||||
@@ -752,7 +752,7 @@ func (dc *DeploymentController) updateRCRevision(rc api.ReplicationController, r
 | 
				
			|||||||
		rc.Annotations = make(map[string]string)
 | 
							rc.Annotations = make(map[string]string)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	rc.Annotations[deploymentutil.RevisionAnnotation] = revision
 | 
						rc.Annotations[deploymentutil.RevisionAnnotation] = revision
 | 
				
			||||||
	_, err := dc.client.ReplicationControllers(rc.ObjectMeta.Namespace).Update(&rc)
 | 
						_, err := dc.client.Legacy().ReplicationControllers(rc.ObjectMeta.Namespace).Update(&rc)
 | 
				
			||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -901,7 +901,7 @@ func (dc *DeploymentController) cleanupOldRcs(oldRCs []*api.ReplicationControlle
 | 
				
			|||||||
		if controller.Spec.Replicas != 0 || controller.Generation > controller.Status.ObservedGeneration {
 | 
							if controller.Spec.Replicas != 0 || controller.Generation > controller.Status.ObservedGeneration {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if err := dc.client.ReplicationControllers(controller.Namespace).Delete(controller.Name); err != nil && !errors.IsNotFound(err) {
 | 
							if err := dc.client.Legacy().ReplicationControllers(controller.Namespace).Delete(controller.Name, nil); err != nil && !errors.IsNotFound(err) {
 | 
				
			||||||
			glog.V(2).Infof("Failed deleting old rc %v for deployment %v: %v", controller.Name, deployment.Name, err)
 | 
								glog.V(2).Infof("Failed deleting old rc %v for deployment %v: %v", controller.Name, deployment.Name, err)
 | 
				
			||||||
			errList = append(errList, err)
 | 
								errList = append(errList, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -923,7 +923,7 @@ func (dc *DeploymentController) updateDeploymentStatus(allRCs []*api.Replication
 | 
				
			|||||||
		AvailableReplicas:   availableReplicas,
 | 
							AvailableReplicas:   availableReplicas,
 | 
				
			||||||
		UnavailableReplicas: unavailableReplicas,
 | 
							UnavailableReplicas: unavailableReplicas,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	_, err = dc.expClient.Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(&newDeployment)
 | 
						_, err = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(&newDeployment)
 | 
				
			||||||
	return err
 | 
						return err
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -958,12 +958,12 @@ func (dc *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationControl
 | 
				
			|||||||
func (dc *DeploymentController) scaleRC(rc *api.ReplicationController, newScale int) (*api.ReplicationController, error) {
 | 
					func (dc *DeploymentController) scaleRC(rc *api.ReplicationController, newScale int) (*api.ReplicationController, error) {
 | 
				
			||||||
	// TODO: Using client for now, update to use store when it is ready.
 | 
						// TODO: Using client for now, update to use store when it is ready.
 | 
				
			||||||
	rc.Spec.Replicas = newScale
 | 
						rc.Spec.Replicas = newScale
 | 
				
			||||||
	return dc.client.ReplicationControllers(rc.ObjectMeta.Namespace).Update(rc)
 | 
						return dc.client.Legacy().ReplicationControllers(rc.ObjectMeta.Namespace).Update(rc)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) updateDeployment(deployment *extensions.Deployment) (*extensions.Deployment, error) {
 | 
					func (dc *DeploymentController) updateDeployment(deployment *extensions.Deployment) (*extensions.Deployment, error) {
 | 
				
			||||||
	// TODO: Using client for now, update to use store when it is ready.
 | 
						// TODO: Using client for now, update to use store when it is ready.
 | 
				
			||||||
	return dc.expClient.Deployments(deployment.ObjectMeta.Namespace).Update(deployment)
 | 
						return dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).Update(deployment)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deployment, rc *api.ReplicationController) (d *extensions.Deployment, performedRollback bool, err error) {
 | 
					func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deployment, rc *api.ReplicationController) (d *extensions.Deployment, performedRollback bool, err error) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,6 +25,8 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	exp "k8s.io/kubernetes/pkg/apis/extensions"
 | 
						exp "k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -89,9 +91,9 @@ func TestDeploymentController_reconcileNewRC(t *testing.T) {
 | 
				
			|||||||
		oldRc := rc("foo-v2", test.oldReplicas, nil)
 | 
							oldRc := rc("foo-v2", test.oldReplicas, nil)
 | 
				
			||||||
		allRcs := []*api.ReplicationController{newRc, oldRc}
 | 
							allRcs := []*api.ReplicationController{newRc, oldRc}
 | 
				
			||||||
		deployment := deployment("foo", test.deploymentReplicas, test.maxSurge, intstr.FromInt(0))
 | 
							deployment := deployment("foo", test.deploymentReplicas, test.maxSurge, intstr.FromInt(0))
 | 
				
			||||||
		fake := &testclient.Fake{}
 | 
							fake := fake.Clientset{}
 | 
				
			||||||
		controller := &DeploymentController{
 | 
							controller := &DeploymentController{
 | 
				
			||||||
			client:        fake,
 | 
								client:        &fake,
 | 
				
			||||||
			eventRecorder: &record.FakeRecorder{},
 | 
								eventRecorder: &record.FakeRecorder{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		scaled, err := controller.reconcileNewRC(allRcs, newRc, deployment)
 | 
							scaled, err := controller.reconcileNewRC(allRcs, newRc, deployment)
 | 
				
			||||||
@@ -166,10 +168,10 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
 | 
				
			|||||||
		allRcs := []*api.ReplicationController{oldRc}
 | 
							allRcs := []*api.ReplicationController{oldRc}
 | 
				
			||||||
		oldRcs := []*api.ReplicationController{oldRc}
 | 
							oldRcs := []*api.ReplicationController{oldRc}
 | 
				
			||||||
		deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable)
 | 
							deployment := deployment("foo", test.deploymentReplicas, intstr.FromInt(0), test.maxUnavailable)
 | 
				
			||||||
		fake := &testclient.Fake{}
 | 
							fakeClientset := fake.Clientset{}
 | 
				
			||||||
		fake.AddReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
 | 
							fakeClientset.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
 | 
				
			||||||
			switch action.(type) {
 | 
								switch action.(type) {
 | 
				
			||||||
			case testclient.ListAction:
 | 
								case core.ListAction:
 | 
				
			||||||
				podList := &api.PodList{}
 | 
									podList := &api.PodList{}
 | 
				
			||||||
				for podIndex := 0; podIndex < test.readyPods; podIndex++ {
 | 
									for podIndex := 0; podIndex < test.readyPods; podIndex++ {
 | 
				
			||||||
					podList.Items = append(podList.Items, api.Pod{
 | 
										podList.Items = append(podList.Items, api.Pod{
 | 
				
			||||||
@@ -191,7 +193,7 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
 | 
				
			|||||||
			return false, nil, nil
 | 
								return false, nil, nil
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
		controller := &DeploymentController{
 | 
							controller := &DeploymentController{
 | 
				
			||||||
			client:        fake,
 | 
								client:        &fakeClientset,
 | 
				
			||||||
			eventRecorder: &record.FakeRecorder{},
 | 
								eventRecorder: &record.FakeRecorder{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		scaled, err := controller.reconcileOldRCs(allRcs, oldRcs, nil, deployment, false)
 | 
							scaled, err := controller.reconcileOldRCs(allRcs, oldRcs, nil, deployment, false)
 | 
				
			||||||
@@ -201,18 +203,18 @@ func TestDeploymentController_reconcileOldRCs(t *testing.T) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		if !test.scaleExpected {
 | 
							if !test.scaleExpected {
 | 
				
			||||||
			if scaled {
 | 
								if scaled {
 | 
				
			||||||
				t.Errorf("unexpected scaling: %v", fake.Actions())
 | 
									t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if test.scaleExpected && !scaled {
 | 
							if test.scaleExpected && !scaled {
 | 
				
			||||||
			t.Errorf("expected scaling to occur; actions: %v", fake.Actions())
 | 
								t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions())
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// There are both list and update actions logged, so extract the update
 | 
							// There are both list and update actions logged, so extract the update
 | 
				
			||||||
		// action for verification.
 | 
							// action for verification.
 | 
				
			||||||
		var updateAction testclient.UpdateAction
 | 
							var updateAction testclient.UpdateAction
 | 
				
			||||||
		for _, action := range fake.Actions() {
 | 
							for _, action := range fakeClientset.Actions() {
 | 
				
			||||||
			switch a := action.(type) {
 | 
								switch a := action.(type) {
 | 
				
			||||||
			case testclient.UpdateAction:
 | 
								case testclient.UpdateAction:
 | 
				
			||||||
				if updateAction != nil {
 | 
									if updateAction != nil {
 | 
				
			||||||
@@ -269,7 +271,7 @@ func TestDeploymentController_cleanupOldRCs(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i, test := range tests {
 | 
						for i, test := range tests {
 | 
				
			||||||
		fake := &testclient.Fake{}
 | 
							fake := &fake.Clientset{}
 | 
				
			||||||
		controller := NewDeploymentController(fake, controller.NoResyncPeriodFunc)
 | 
							controller := NewDeploymentController(fake, controller.NoResyncPeriodFunc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		controller.eventRecorder = &record.FakeRecorder{}
 | 
							controller.eventRecorder = &record.FakeRecorder{}
 | 
				
			||||||
@@ -395,8 +397,7 @@ func newListOptions() api.ListOptions {
 | 
				
			|||||||
type fixture struct {
 | 
					type fixture struct {
 | 
				
			||||||
	t *testing.T
 | 
						t *testing.T
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client *testclient.Fake
 | 
						client *fake.Clientset
 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Objects to put in the store.
 | 
						// Objects to put in the store.
 | 
				
			||||||
	dStore   []*exp.Deployment
 | 
						dStore   []*exp.Deployment
 | 
				
			||||||
	rcStore  []*api.ReplicationController
 | 
						rcStore  []*api.ReplicationController
 | 
				
			||||||
@@ -404,22 +405,22 @@ type fixture struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Actions expected to happen on the client. Objects from here are also
 | 
						// Actions expected to happen on the client. Objects from here are also
 | 
				
			||||||
	// preloaded into NewSimpleFake.
 | 
						// preloaded into NewSimpleFake.
 | 
				
			||||||
	actions []testclient.Action
 | 
						actions []core.Action
 | 
				
			||||||
	objects *api.List
 | 
						objects *api.List
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *fixture) expectUpdateDeploymentAction(d *exp.Deployment) {
 | 
					func (f *fixture) expectUpdateDeploymentAction(d *exp.Deployment) {
 | 
				
			||||||
	f.actions = append(f.actions, testclient.NewUpdateAction("deployments", d.Namespace, d))
 | 
						f.actions = append(f.actions, core.NewUpdateAction("deployments", d.Namespace, d))
 | 
				
			||||||
	f.objects.Items = append(f.objects.Items, d)
 | 
						f.objects.Items = append(f.objects.Items, d)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *fixture) expectCreateRCAction(rc *api.ReplicationController) {
 | 
					func (f *fixture) expectCreateRCAction(rc *api.ReplicationController) {
 | 
				
			||||||
	f.actions = append(f.actions, testclient.NewCreateAction("replicationcontrollers", rc.Namespace, rc))
 | 
						f.actions = append(f.actions, core.NewCreateAction("replicationcontrollers", rc.Namespace, rc))
 | 
				
			||||||
	f.objects.Items = append(f.objects.Items, rc)
 | 
						f.objects.Items = append(f.objects.Items, rc)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *fixture) expectUpdateRCAction(rc *api.ReplicationController) {
 | 
					func (f *fixture) expectUpdateRCAction(rc *api.ReplicationController) {
 | 
				
			||||||
	f.actions = append(f.actions, testclient.NewUpdateAction("replicationcontrollers", rc.Namespace, rc))
 | 
						f.actions = append(f.actions, core.NewUpdateAction("replicationcontrollers", rc.Namespace, rc))
 | 
				
			||||||
	f.objects.Items = append(f.objects.Items, rc)
 | 
						f.objects.Items = append(f.objects.Items, rc)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -435,7 +436,7 @@ func newFixture(t *testing.T) *fixture {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *fixture) run(deploymentName string) {
 | 
					func (f *fixture) run(deploymentName string) {
 | 
				
			||||||
	f.client = testclient.NewSimpleFake(f.objects)
 | 
						f.client = fake.NewSimpleClientset(f.objects)
 | 
				
			||||||
	c := NewDeploymentController(f.client, controller.NoResyncPeriodFunc)
 | 
						c := NewDeploymentController(f.client, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	c.eventRecorder = &record.FakeRecorder{}
 | 
						c.eventRecorder = &record.FakeRecorder{}
 | 
				
			||||||
	c.rcStoreSynced = alwaysReady
 | 
						c.rcStoreSynced = alwaysReady
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,8 +27,9 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
 | 
						replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
 | 
				
			||||||
@@ -39,7 +40,7 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type JobController struct {
 | 
					type JobController struct {
 | 
				
			||||||
	kubeClient client.Interface
 | 
						kubeClient clientset.Interface
 | 
				
			||||||
	podControl controller.PodControlInterface
 | 
						podControl controller.PodControlInterface
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// To allow injection of updateJobStatus for testing.
 | 
						// To allow injection of updateJobStatus for testing.
 | 
				
			||||||
@@ -68,10 +69,11 @@ type JobController struct {
 | 
				
			|||||||
	recorder record.EventRecorder
 | 
						recorder record.EventRecorder
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewJobController(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController {
 | 
					func NewJobController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController {
 | 
				
			||||||
	eventBroadcaster := record.NewBroadcaster()
 | 
						eventBroadcaster := record.NewBroadcaster()
 | 
				
			||||||
	eventBroadcaster.StartLogging(glog.Infof)
 | 
						eventBroadcaster.StartLogging(glog.Infof)
 | 
				
			||||||
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
 | 
						// TODO: remove the wrapper when every clients have moved to use the clientset.
 | 
				
			||||||
 | 
						eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	jm := &JobController{
 | 
						jm := &JobController{
 | 
				
			||||||
		kubeClient: kubeClient,
 | 
							kubeClient: kubeClient,
 | 
				
			||||||
@@ -110,10 +112,10 @@ func NewJobController(kubeClient client.Interface, resyncPeriod controller.Resyn
 | 
				
			|||||||
	jm.podStore.Store, jm.podController = framework.NewInformer(
 | 
						jm.podStore.Store, jm.podController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return jm.kubeClient.Pods(api.NamespaceAll).List(options)
 | 
									return jm.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return jm.kubeClient.Pods(api.NamespaceAll).Watch(options)
 | 
									return jm.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.Pod{},
 | 
							&api.Pod{},
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,6 +25,9 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
@@ -204,8 +207,8 @@ func TestControllerSyncJob(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for name, tc := range testCases {
 | 
						for name, tc := range testCases {
 | 
				
			||||||
		// job manager setup
 | 
							// job manager setup
 | 
				
			||||||
		client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
							clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
		manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
							manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
		fakePodControl := controller.FakePodControl{Err: tc.podControllerError}
 | 
							fakePodControl := controller.FakePodControl{Err: tc.podControllerError}
 | 
				
			||||||
		manager.podControl = &fakePodControl
 | 
							manager.podControl = &fakePodControl
 | 
				
			||||||
		manager.podStoreSynced = alwaysReady
 | 
							manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -299,8 +302,8 @@ func TestSyncJobPastDeadline(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for name, tc := range testCases {
 | 
						for name, tc := range testCases {
 | 
				
			||||||
		// job manager setup
 | 
							// job manager setup
 | 
				
			||||||
		client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
							clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
		manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
							manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
		fakePodControl := controller.FakePodControl{}
 | 
							fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
		manager.podControl = &fakePodControl
 | 
							manager.podControl = &fakePodControl
 | 
				
			||||||
		manager.podStoreSynced = alwaysReady
 | 
							manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -369,8 +372,8 @@ func getCondition(job *extensions.Job, condition extensions.JobConditionType) bo
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncPastDeadlineJobFinished(t *testing.T) {
 | 
					func TestSyncPastDeadlineJobFinished(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -403,8 +406,8 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncJobComplete(t *testing.T) {
 | 
					func TestSyncJobComplete(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -428,8 +431,8 @@ func TestSyncJobComplete(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncJobDeleted(t *testing.T) {
 | 
					func TestSyncJobDeleted(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -448,8 +451,8 @@ func TestSyncJobDeleted(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncJobUpdateRequeue(t *testing.T) {
 | 
					func TestSyncJobUpdateRequeue(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -469,8 +472,8 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestJobPodLookup(t *testing.T) {
 | 
					func TestJobPodLookup(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		job *extensions.Job
 | 
							job *extensions.Job
 | 
				
			||||||
@@ -559,8 +562,8 @@ func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool {
 | 
				
			|||||||
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
 | 
					// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
 | 
				
			||||||
// and checking expectations.
 | 
					// and checking expectations.
 | 
				
			||||||
func TestSyncJobExpectations(t *testing.T) {
 | 
					func TestSyncJobExpectations(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -594,10 +597,10 @@ type FakeWatcher struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestWatchJobs(t *testing.T) {
 | 
					func TestWatchJobs(t *testing.T) {
 | 
				
			||||||
	client := testclient.NewSimpleFake()
 | 
						clientset := fake.NewSimpleClientset()
 | 
				
			||||||
	fakeWatch := watch.NewFake()
 | 
						fakeWatch := watch.NewFake()
 | 
				
			||||||
	client.PrependWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
 | 
						clientset.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var testJob extensions.Job
 | 
						var testJob extensions.Job
 | 
				
			||||||
@@ -658,10 +661,10 @@ func TestIsJobFinished(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestWatchPods(t *testing.T) {
 | 
					func TestWatchPods(t *testing.T) {
 | 
				
			||||||
	client := testclient.NewSimpleFake()
 | 
						clientset := fake.NewSimpleClientset()
 | 
				
			||||||
	fakeWatch := watch.NewFake()
 | 
						fakeWatch := watch.NewFake()
 | 
				
			||||||
	client.PrependWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
 | 
						clientset.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
 | 
				
			||||||
	manager := NewJobController(client, controller.NoResyncPeriodFunc)
 | 
						manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Put one job and one pod into the store
 | 
						// Put one job and one pod into the store
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/errors"
 | 
						"k8s.io/kubernetes/pkg/api/errors"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/conversion"
 | 
						"k8s.io/kubernetes/pkg/conversion"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -44,7 +44,7 @@ type PersistentVolumeClaimBinder struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
 | 
					// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
 | 
				
			||||||
func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
 | 
					func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
 | 
				
			||||||
	volumeIndex := NewPersistentVolumeOrderedIndex()
 | 
						volumeIndex := NewPersistentVolumeOrderedIndex()
 | 
				
			||||||
	binderClient := NewBinderClient(kubeClient)
 | 
						binderClient := NewBinderClient(kubeClient)
 | 
				
			||||||
	binder := &PersistentVolumeClaimBinder{
 | 
						binder := &PersistentVolumeClaimBinder{
 | 
				
			||||||
@@ -55,10 +55,10 @@ func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time
 | 
				
			|||||||
	_, volumeController := framework.NewInformer(
 | 
						_, volumeController := framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return kubeClient.PersistentVolumes().List(options)
 | 
									return kubeClient.Legacy().PersistentVolumes().List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return kubeClient.PersistentVolumes().Watch(options)
 | 
									return kubeClient.Legacy().PersistentVolumes().Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.PersistentVolume{},
 | 
							&api.PersistentVolume{},
 | 
				
			||||||
@@ -73,10 +73,10 @@ func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time
 | 
				
			|||||||
	_, claimController := framework.NewInformer(
 | 
						_, claimController := framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(options)
 | 
									return kubeClient.Legacy().PersistentVolumeClaims(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(options)
 | 
									return kubeClient.Legacy().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.PersistentVolumeClaim{},
 | 
							&api.PersistentVolumeClaim{},
 | 
				
			||||||
@@ -453,38 +453,38 @@ type binderClient interface {
 | 
				
			|||||||
	UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
 | 
						UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewBinderClient(c client.Interface) binderClient {
 | 
					func NewBinderClient(c clientset.Interface) binderClient {
 | 
				
			||||||
	return &realBinderClient{c}
 | 
						return &realBinderClient{c}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type realBinderClient struct {
 | 
					type realBinderClient struct {
 | 
				
			||||||
	client client.Interface
 | 
						client clientset.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
 | 
					func (c *realBinderClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Get(name)
 | 
						return c.client.Legacy().PersistentVolumes().Get(name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realBinderClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Update(volume)
 | 
						return c.client.Legacy().PersistentVolumes().Update(volume)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
 | 
					func (c *realBinderClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
 | 
				
			||||||
	return c.client.PersistentVolumes().Delete(volume.Name)
 | 
						return c.client.Legacy().PersistentVolumes().Delete(volume.Name, nil)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realBinderClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().UpdateStatus(volume)
 | 
						return c.client.Legacy().PersistentVolumes().UpdateStatus(volume)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
 | 
					func (c *realBinderClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(namespace).Get(name)
 | 
						return c.client.Legacy().PersistentVolumeClaims(namespace).Get(name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
					func (c *realBinderClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(claim.Namespace).Update(claim)
 | 
						return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).Update(claim)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
					func (c *realBinderClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
 | 
						return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -28,15 +28,16 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume"
 | 
						"k8s.io/kubernetes/pkg/volume"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume/host_path"
 | 
						"k8s.io/kubernetes/pkg/volume/host_path"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestRunStop(t *testing.T) {
 | 
					func TestRunStop(t *testing.T) {
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						clientset := fake.NewSimpleClientset()
 | 
				
			||||||
	binder := NewPersistentVolumeClaimBinder(client, 1*time.Second)
 | 
						binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if len(binder.stopChannels) != 0 {
 | 
						if len(binder.stopChannels) != 0 {
 | 
				
			||||||
		t.Errorf("Non-running binder should not have any stopChannels.  Got %v", len(binder.stopChannels))
 | 
							t.Errorf("Non-running binder should not have any stopChannels.  Got %v", len(binder.stopChannels))
 | 
				
			||||||
@@ -290,18 +291,18 @@ func TestExampleObjects(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for name, scenario := range scenarios {
 | 
						for name, scenario := range scenarios {
 | 
				
			||||||
		codec := api.Codecs.UniversalDecoder()
 | 
							codec := api.Codecs.UniversalDecoder()
 | 
				
			||||||
		o := testclient.NewObjects(api.Scheme, codec)
 | 
							o := core.NewObjects(api.Scheme, codec)
 | 
				
			||||||
		if err := testclient.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/"+name, o, codec); err != nil {
 | 
							if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/"+name, o, codec); err != nil {
 | 
				
			||||||
			t.Fatal(err)
 | 
								t.Fatal(err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		client := &testclient.Fake{}
 | 
							clientset := &fake.Clientset{}
 | 
				
			||||||
		client.AddReactor("*", "*", testclient.ObjectReaction(o, api.RESTMapper))
 | 
							clientset.AddReactor("*", "*", core.ObjectReaction(o, api.RESTMapper))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolumeClaim{}) {
 | 
							if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolumeClaim{}) {
 | 
				
			||||||
			pvc, err := client.PersistentVolumeClaims("ns").Get("doesntmatter")
 | 
								pvc, err := clientset.Legacy().PersistentVolumeClaims("ns").Get("doesntmatter")
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Errorf("Error retrieving object: %v", err)
 | 
									t.Fatalf("Error retrieving object: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			expected := scenario.expected.(*api.PersistentVolumeClaim)
 | 
								expected := scenario.expected.(*api.PersistentVolumeClaim)
 | 
				
			||||||
@@ -320,9 +321,9 @@ func TestExampleObjects(t *testing.T) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolume{}) {
 | 
							if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolume{}) {
 | 
				
			||||||
			pv, err := client.PersistentVolumes().Get("doesntmatter")
 | 
								pv, err := clientset.Legacy().PersistentVolumes().Get("doesntmatter")
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Errorf("Error retrieving object: %v", err)
 | 
									t.Fatalf("Error retrieving object: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			expected := scenario.expected.(*api.PersistentVolume)
 | 
								expected := scenario.expected.(*api.PersistentVolume)
 | 
				
			||||||
@@ -354,18 +355,21 @@ func TestBindingWithExamples(t *testing.T) {
 | 
				
			|||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	codec := api.Codecs.UniversalDecoder()
 | 
						codec := api.Codecs.UniversalDecoder()
 | 
				
			||||||
	o := testclient.NewObjects(api.Scheme, codec)
 | 
						o := core.NewObjects(api.Scheme, codec)
 | 
				
			||||||
	if err := testclient.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil {
 | 
						if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, codec); err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if err := testclient.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil {
 | 
						if err := core.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, codec); err != nil {
 | 
				
			||||||
		t.Fatal(err)
 | 
							t.Fatal(err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						clientset := &fake.Clientset{}
 | 
				
			||||||
	client.AddReactor("*", "*", testclient.ObjectReaction(o, api.RESTMapper))
 | 
						clientset.AddReactor("*", "*", core.ObjectReaction(o, api.RESTMapper))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pv, err := client.PersistentVolumes().Get("any")
 | 
						pv, err := clientset.Legacy().PersistentVolumes().Get("any")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Errorf("Unexpected error getting PV from client: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
 | 
						pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Errorf("Unexpected error getting PV from client: %v", err)
 | 
							t.Errorf("Unexpected error getting PV from client: %v", err)
 | 
				
			||||||
@@ -377,7 +381,7 @@ func TestBindingWithExamples(t *testing.T) {
 | 
				
			|||||||
	// Test that !Pending gets correctly added
 | 
						// Test that !Pending gets correctly added
 | 
				
			||||||
	pv.Status.Phase = api.VolumeAvailable
 | 
						pv.Status.Phase = api.VolumeAvailable
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	claim, error := client.PersistentVolumeClaims("ns").Get("any")
 | 
						claim, error := clientset.Legacy().PersistentVolumeClaims("ns").Get("any")
 | 
				
			||||||
	if error != nil {
 | 
						if error != nil {
 | 
				
			||||||
		t.Errorf("Unexpected error getting PVC from client: %v", err)
 | 
							t.Errorf("Unexpected error getting PVC from client: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -393,7 +397,7 @@ func TestBindingWithExamples(t *testing.T) {
 | 
				
			|||||||
	plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost(tmpDir, nil, nil))
 | 
						plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost(tmpDir, nil, nil))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	recycler := &PersistentVolumeRecycler{
 | 
						recycler := &PersistentVolumeRecycler{
 | 
				
			||||||
		kubeClient: client,
 | 
							kubeClient: clientset,
 | 
				
			||||||
		client:     mockClient,
 | 
							client:     mockClient,
 | 
				
			||||||
		pluginMgr:  plugMgr,
 | 
							pluginMgr:  plugMgr,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -463,8 +467,8 @@ func TestBindingWithExamples(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestCasting(t *testing.T) {
 | 
					func TestCasting(t *testing.T) {
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						clientset := fake.NewSimpleClientset()
 | 
				
			||||||
	binder := NewPersistentVolumeClaimBinder(client, 1*time.Second)
 | 
						binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pv := &api.PersistentVolume{}
 | 
						pv := &api.PersistentVolume{}
 | 
				
			||||||
	unk := cache.DeletedFinalStateUnknown{}
 | 
						unk := cache.DeletedFinalStateUnknown{}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
						"k8s.io/kubernetes/pkg/cloudprovider"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/conversion"
 | 
						"k8s.io/kubernetes/pkg/conversion"
 | 
				
			||||||
@@ -368,68 +368,68 @@ type controllerClient interface {
 | 
				
			|||||||
	UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
 | 
						UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// provided to give VolumeHost and plugins access to the kube client
 | 
						// provided to give VolumeHost and plugins access to the kube client
 | 
				
			||||||
	GetKubeClient() client.Interface
 | 
						GetKubeClient() clientset.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewControllerClient(c client.Interface) controllerClient {
 | 
					func NewControllerClient(c clientset.Interface) controllerClient {
 | 
				
			||||||
	return &realControllerClient{c}
 | 
						return &realControllerClient{c}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ controllerClient = &realControllerClient{}
 | 
					var _ controllerClient = &realControllerClient{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type realControllerClient struct {
 | 
					type realControllerClient struct {
 | 
				
			||||||
	client client.Interface
 | 
						client clientset.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
 | 
					func (c *realControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Get(name)
 | 
						return c.client.Legacy().PersistentVolumes().Get(name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) {
 | 
					func (c *realControllerClient) ListPersistentVolumes(options api.ListOptions) (*api.PersistentVolumeList, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().List(options)
 | 
						return c.client.Legacy().PersistentVolumes().List(options)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) {
 | 
					func (c *realControllerClient) WatchPersistentVolumes(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Watch(options)
 | 
						return c.client.Legacy().PersistentVolumes().Watch(options)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Create(pv)
 | 
						return c.client.Legacy().PersistentVolumes().Create(pv)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realControllerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Update(volume)
 | 
						return c.client.Legacy().PersistentVolumes().Update(volume)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
 | 
					func (c *realControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
 | 
				
			||||||
	return c.client.PersistentVolumes().Delete(volume.Name)
 | 
						return c.client.Legacy().PersistentVolumes().Delete(volume.Name, nil)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().UpdateStatus(volume)
 | 
						return c.client.Legacy().PersistentVolumes().UpdateStatus(volume)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
 | 
					func (c *realControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(namespace).Get(name)
 | 
						return c.client.Legacy().PersistentVolumeClaims(namespace).Get(name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) {
 | 
					func (c *realControllerClient) ListPersistentVolumeClaims(namespace string, options api.ListOptions) (*api.PersistentVolumeClaimList, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(namespace).List(options)
 | 
						return c.client.Legacy().PersistentVolumeClaims(namespace).List(options)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) {
 | 
					func (c *realControllerClient) WatchPersistentVolumeClaims(namespace string, options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(namespace).Watch(options)
 | 
						return c.client.Legacy().PersistentVolumeClaims(namespace).Watch(options)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
					func (c *realControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(claim.Namespace).Update(claim)
 | 
						return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).Update(claim)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
					func (c *realControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
 | 
				
			||||||
	return c.client.PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
 | 
						return c.client.Legacy().PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realControllerClient) GetKubeClient() client.Interface {
 | 
					func (c *realControllerClient) GetKubeClient() clientset.Interface {
 | 
				
			||||||
	return c.client
 | 
						return c.client
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -469,7 +469,7 @@ func (c *PersistentVolumeProvisionerController) GetPodPluginDir(podUID types.UID
 | 
				
			|||||||
	return ""
 | 
						return ""
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *PersistentVolumeProvisionerController) GetKubeClient() client.Interface {
 | 
					func (c *PersistentVolumeProvisionerController) GetKubeClient() clientset.Interface {
 | 
				
			||||||
	return c.client.GetKubeClient()
 | 
						return c.client.GetKubeClient()
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,7 +25,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/errors"
 | 
						"k8s.io/kubernetes/pkg/api/errors"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
						fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util"
 | 
						"k8s.io/kubernetes/pkg/util"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume"
 | 
						"k8s.io/kubernetes/pkg/volume"
 | 
				
			||||||
@@ -248,6 +248,6 @@ func (c *mockControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.Pers
 | 
				
			|||||||
	return claim, nil
 | 
						return claim, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *mockControllerClient) GetKubeClient() client.Interface {
 | 
					func (c *mockControllerClient) GetKubeClient() clientset.Interface {
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,7 @@ import (
 | 
				
			|||||||
	"github.com/golang/glog"
 | 
						"github.com/golang/glog"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
						"k8s.io/kubernetes/pkg/cloudprovider"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -43,13 +43,13 @@ type PersistentVolumeRecycler struct {
 | 
				
			|||||||
	volumeController *framework.Controller
 | 
						volumeController *framework.Controller
 | 
				
			||||||
	stopChannel      chan struct{}
 | 
						stopChannel      chan struct{}
 | 
				
			||||||
	client           recyclerClient
 | 
						client           recyclerClient
 | 
				
			||||||
	kubeClient       client.Interface
 | 
						kubeClient       clientset.Interface
 | 
				
			||||||
	pluginMgr        volume.VolumePluginMgr
 | 
						pluginMgr        volume.VolumePluginMgr
 | 
				
			||||||
	cloud            cloudprovider.Interface
 | 
						cloud            cloudprovider.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PersistentVolumeRecycler creates a new PersistentVolumeRecycler
 | 
					// PersistentVolumeRecycler creates a new PersistentVolumeRecycler
 | 
				
			||||||
func NewPersistentVolumeRecycler(kubeClient client.Interface, syncPeriod time.Duration, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
 | 
					func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time.Duration, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
 | 
				
			||||||
	recyclerClient := NewRecyclerClient(kubeClient)
 | 
						recyclerClient := NewRecyclerClient(kubeClient)
 | 
				
			||||||
	recycler := &PersistentVolumeRecycler{
 | 
						recycler := &PersistentVolumeRecycler{
 | 
				
			||||||
		client:     recyclerClient,
 | 
							client:     recyclerClient,
 | 
				
			||||||
@@ -64,10 +64,10 @@ func NewPersistentVolumeRecycler(kubeClient client.Interface, syncPeriod time.Du
 | 
				
			|||||||
	_, volumeController := framework.NewInformer(
 | 
						_, volumeController := framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return kubeClient.PersistentVolumes().List(options)
 | 
									return kubeClient.Legacy().PersistentVolumes().List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return kubeClient.PersistentVolumes().Watch(options)
 | 
									return kubeClient.Legacy().PersistentVolumes().Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.PersistentVolume{},
 | 
							&api.PersistentVolume{},
 | 
				
			||||||
@@ -249,28 +249,28 @@ type recyclerClient interface {
 | 
				
			|||||||
	UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
 | 
						UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewRecyclerClient(c client.Interface) recyclerClient {
 | 
					func NewRecyclerClient(c clientset.Interface) recyclerClient {
 | 
				
			||||||
	return &realRecyclerClient{c}
 | 
						return &realRecyclerClient{c}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type realRecyclerClient struct {
 | 
					type realRecyclerClient struct {
 | 
				
			||||||
	client client.Interface
 | 
						client clientset.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
 | 
					func (c *realRecyclerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Get(name)
 | 
						return c.client.Legacy().PersistentVolumes().Get(name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realRecyclerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().Update(volume)
 | 
						return c.client.Legacy().PersistentVolumes().Update(volume)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
 | 
					func (c *realRecyclerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
 | 
				
			||||||
	return c.client.PersistentVolumes().Delete(volume.Name)
 | 
						return c.client.Legacy().PersistentVolumes().Delete(volume.Name, nil)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
					func (c *realRecyclerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
 | 
				
			||||||
	return c.client.PersistentVolumes().UpdateStatus(volume)
 | 
						return c.client.Legacy().PersistentVolumes().UpdateStatus(volume)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes.
 | 
					// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes.
 | 
				
			||||||
@@ -287,7 +287,7 @@ func (f *PersistentVolumeRecycler) GetPodPluginDir(podUID types.UID, pluginName
 | 
				
			|||||||
	return ""
 | 
						return ""
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *PersistentVolumeRecycler) GetKubeClient() client.Interface {
 | 
					func (f *PersistentVolumeRecycler) GetKubeClient() clientset.Interface {
 | 
				
			||||||
	return f.kubeClient
 | 
						return f.kubeClient
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -21,7 +21,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume"
 | 
						"k8s.io/kubernetes/pkg/volume"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -56,7 +56,7 @@ func TestFailedRecycling(t *testing.T) {
 | 
				
			|||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	recycler := &PersistentVolumeRecycler{
 | 
						recycler := &PersistentVolumeRecycler{
 | 
				
			||||||
		kubeClient: &testclient.Fake{},
 | 
							kubeClient: fake.NewSimpleClientset(),
 | 
				
			||||||
		client:     mockClient,
 | 
							client:     mockClient,
 | 
				
			||||||
		pluginMgr:  plugMgr,
 | 
							pluginMgr:  plugMgr,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -28,8 +28,9 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
@@ -59,7 +60,7 @@ const (
 | 
				
			|||||||
// ReplicaSetController is responsible for synchronizing ReplicaSet objects stored
 | 
					// ReplicaSetController is responsible for synchronizing ReplicaSet objects stored
 | 
				
			||||||
// in the system with actual running pods.
 | 
					// in the system with actual running pods.
 | 
				
			||||||
type ReplicaSetController struct {
 | 
					type ReplicaSetController struct {
 | 
				
			||||||
	kubeClient client.Interface
 | 
						kubeClient clientset.Interface
 | 
				
			||||||
	podControl controller.PodControlInterface
 | 
						podControl controller.PodControlInterface
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
 | 
						// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
 | 
				
			||||||
@@ -88,10 +89,10 @@ type ReplicaSetController struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewReplicaSetController creates a new ReplicaSetController.
 | 
					// NewReplicaSetController creates a new ReplicaSetController.
 | 
				
			||||||
func NewReplicaSetController(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicaSetController {
 | 
					func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicaSetController {
 | 
				
			||||||
	eventBroadcaster := record.NewBroadcaster()
 | 
						eventBroadcaster := record.NewBroadcaster()
 | 
				
			||||||
	eventBroadcaster.StartLogging(glog.Infof)
 | 
						eventBroadcaster.StartLogging(glog.Infof)
 | 
				
			||||||
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
 | 
						eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rsc := &ReplicaSetController{
 | 
						rsc := &ReplicaSetController{
 | 
				
			||||||
		kubeClient: kubeClient,
 | 
							kubeClient: kubeClient,
 | 
				
			||||||
@@ -148,10 +149,10 @@ func NewReplicaSetController(kubeClient client.Interface, resyncPeriod controlle
 | 
				
			|||||||
	rsc.podStore.Store, rsc.podController = framework.NewInformer(
 | 
						rsc.podStore.Store, rsc.podController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return rsc.kubeClient.Pods(api.NamespaceAll).List(options)
 | 
									return rsc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return rsc.kubeClient.Pods(api.NamespaceAll).Watch(options)
 | 
									return rsc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.Pod{},
 | 
							&api.Pod{},
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -30,6 +30,9 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
@@ -133,7 +136,7 @@ type serverResponse struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncReplicaSetDoesNothing(t *testing.T) {
 | 
					func TestSyncReplicaSetDoesNothing(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -150,7 +153,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncReplicaSetDeletes(t *testing.T) {
 | 
					func TestSyncReplicaSetDeletes(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -167,7 +170,7 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
					func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -201,7 +204,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncReplicaSetCreates(t *testing.T) {
 | 
					func TestSyncReplicaSetCreates(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -224,7 +227,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	defer testServer.Close()
 | 
						defer testServer.Close()
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -267,7 +270,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
 | 
				
			|||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	defer testServer.Close()
 | 
						defer testServer.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -306,7 +309,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	defer testServer.Close()
 | 
						defer testServer.Close()
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
@@ -356,7 +359,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestPodControllerLookup(t *testing.T) {
 | 
					func TestPodControllerLookup(t *testing.T) {
 | 
				
			||||||
	manager := NewReplicaSetController(client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		inRSs     []*extensions.ReplicaSet
 | 
							inRSs     []*extensions.ReplicaSet
 | 
				
			||||||
@@ -417,13 +420,13 @@ func TestPodControllerLookup(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
type FakeWatcher struct {
 | 
					type FakeWatcher struct {
 | 
				
			||||||
	w *watch.FakeWatcher
 | 
						w *watch.FakeWatcher
 | 
				
			||||||
	*testclient.Fake
 | 
						*fake.Clientset
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestWatchControllers(t *testing.T) {
 | 
					func TestWatchControllers(t *testing.T) {
 | 
				
			||||||
	fakeWatch := watch.NewFake()
 | 
						fakeWatch := watch.NewFake()
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						client := &fake.Clientset{}
 | 
				
			||||||
	client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
 | 
						client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -465,8 +468,8 @@ func TestWatchControllers(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestWatchPods(t *testing.T) {
 | 
					func TestWatchPods(t *testing.T) {
 | 
				
			||||||
	fakeWatch := watch.NewFake()
 | 
						fakeWatch := watch.NewFake()
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						client := &fake.Clientset{}
 | 
				
			||||||
	client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
 | 
						client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -510,7 +513,7 @@ func TestWatchPods(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestUpdatePods(t *testing.T) {
 | 
					func TestUpdatePods(t *testing.T) {
 | 
				
			||||||
	manager := NewReplicaSetController(testclient.NewSimpleFake(), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	received := make(chan string)
 | 
						received := make(chan string)
 | 
				
			||||||
@@ -570,7 +573,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
 | 
				
			|||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	defer testServer.Close()
 | 
						defer testServer.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -606,12 +609,12 @@ func TestControllerUpdateRequeue(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestControllerUpdateStatusWithFailure(t *testing.T) {
 | 
					func TestControllerUpdateStatusWithFailure(t *testing.T) {
 | 
				
			||||||
	rs := newReplicaSet(1, map[string]string{"foo": "bar"})
 | 
						rs := newReplicaSet(1, map[string]string{"foo": "bar"})
 | 
				
			||||||
	fakeClient := &testclient.FakeExperimental{Fake: &testclient.Fake{}}
 | 
						fakeClient := &fake.Clientset{}
 | 
				
			||||||
	fakeClient.AddReactor("get", "replicasets", func(action testclient.Action) (bool, runtime.Object, error) { return true, rs, nil })
 | 
						fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
 | 
				
			||||||
	fakeClient.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) {
 | 
						fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		return true, &extensions.ReplicaSet{}, fmt.Errorf("Fake error")
 | 
							return true, &extensions.ReplicaSet{}, fmt.Errorf("Fake error")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	fakeRSClient := &testclient.FakeReplicaSets{fakeClient, "default"}
 | 
						fakeRSClient := fakeClient.Extensions().ReplicaSets("default")
 | 
				
			||||||
	numReplicas := 10
 | 
						numReplicas := 10
 | 
				
			||||||
	updateReplicaCount(fakeRSClient, *rs, numReplicas)
 | 
						updateReplicaCount(fakeRSClient, *rs, numReplicas)
 | 
				
			||||||
	updates, gets := 0, 0
 | 
						updates, gets := 0, 0
 | 
				
			||||||
@@ -649,7 +652,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
 | 
					func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, burstReplicas)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, burstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -771,7 +774,7 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
 | 
				
			|||||||
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
 | 
					// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
 | 
				
			||||||
// and checking expectations.
 | 
					// and checking expectations.
 | 
				
			||||||
func TestRSSyncExpectations(t *testing.T) {
 | 
					func TestRSSyncExpectations(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
@@ -797,7 +800,7 @@ func TestRSSyncExpectations(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
					func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 10)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -839,7 +842,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestRSManagerNotReady(t *testing.T) {
 | 
					func TestRSManagerNotReady(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2)
 | 
						manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, 2)
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
@@ -876,7 +879,7 @@ func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestOverlappingRSs(t *testing.T) {
 | 
					func TestOverlappingRSs(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						client := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	labelMap := map[string]string{"foo": "bar"}
 | 
						labelMap := map[string]string{"foo": "bar"}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := 0; i < 5; i++ {
 | 
						for i := 0; i < 5; i++ {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,8 +27,9 @@ import (
 | 
				
			|||||||
	"github.com/golang/glog"
 | 
						"github.com/golang/glog"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller/framework"
 | 
						"k8s.io/kubernetes/pkg/controller/framework"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/labels"
 | 
						"k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
@@ -61,7 +62,7 @@ const (
 | 
				
			|||||||
// TODO: this really should be called ReplicationController. The only reason why it's a Manager
 | 
					// TODO: this really should be called ReplicationController. The only reason why it's a Manager
 | 
				
			||||||
// is to distinguish this type from API object "ReplicationController". We should fix this.
 | 
					// is to distinguish this type from API object "ReplicationController". We should fix this.
 | 
				
			||||||
type ReplicationManager struct {
 | 
					type ReplicationManager struct {
 | 
				
			||||||
	kubeClient client.Interface
 | 
						kubeClient clientset.Interface
 | 
				
			||||||
	podControl controller.PodControlInterface
 | 
						podControl controller.PodControlInterface
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// An rc is temporarily suspended after creating/deleting these many replicas.
 | 
						// An rc is temporarily suspended after creating/deleting these many replicas.
 | 
				
			||||||
@@ -90,10 +91,10 @@ type ReplicationManager struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NewReplicationManager creates a new ReplicationManager.
 | 
					// NewReplicationManager creates a new ReplicationManager.
 | 
				
			||||||
func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
 | 
					func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
 | 
				
			||||||
	eventBroadcaster := record.NewBroadcaster()
 | 
						eventBroadcaster := record.NewBroadcaster()
 | 
				
			||||||
	eventBroadcaster.StartLogging(glog.Infof)
 | 
						eventBroadcaster.StartLogging(glog.Infof)
 | 
				
			||||||
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
 | 
						eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rm := &ReplicationManager{
 | 
						rm := &ReplicationManager{
 | 
				
			||||||
		kubeClient: kubeClient,
 | 
							kubeClient: kubeClient,
 | 
				
			||||||
@@ -109,10 +110,10 @@ func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.
 | 
				
			|||||||
	rm.rcStore.Store, rm.rcController = framework.NewInformer(
 | 
						rm.rcStore.Store, rm.rcController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
 | 
									return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(options)
 | 
									return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.ReplicationController{},
 | 
							&api.ReplicationController{},
 | 
				
			||||||
@@ -150,10 +151,10 @@ func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.
 | 
				
			|||||||
	rm.podStore.Store, rm.podController = framework.NewInformer(
 | 
						rm.podStore.Store, rm.podController = framework.NewInformer(
 | 
				
			||||||
		&cache.ListWatch{
 | 
							&cache.ListWatch{
 | 
				
			||||||
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
								ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
				return rm.kubeClient.Pods(api.NamespaceAll).List(options)
 | 
									return rm.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
								WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
				return rm.kubeClient.Pods(api.NamespaceAll).Watch(options)
 | 
									return rm.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		&api.Pod{},
 | 
							&api.Pod{},
 | 
				
			||||||
@@ -451,7 +452,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Always updates status as pods come up or die.
 | 
						// Always updates status as pods come up or die.
 | 
				
			||||||
	if err := updateReplicaCount(rm.kubeClient.ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
 | 
						if err := updateReplicaCount(rm.kubeClient.Legacy().ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
 | 
				
			||||||
		// Multiple things could lead to this update failing. Requeuing the controller ensures
 | 
							// Multiple things could lead to this update failing. Requeuing the controller ensures
 | 
				
			||||||
		// we retry with some fairness.
 | 
							// we retry with some fairness.
 | 
				
			||||||
		glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rc.Namespace, rc.Name, err)
 | 
							glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rc.Namespace, rc.Name, err)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,8 +29,10 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/core"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/securitycontext"
 | 
						"k8s.io/kubernetes/pkg/securitycontext"
 | 
				
			||||||
@@ -132,9 +134,9 @@ type serverResponse struct {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
 | 
					func TestSyncReplicationControllerDoesNothing(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// 2 running pods, a controller with 2 replicas, sync is a no-op
 | 
						// 2 running pods, a controller with 2 replicas, sync is a no-op
 | 
				
			||||||
@@ -148,9 +150,9 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncReplicationControllerDeletes(t *testing.T) {
 | 
					func TestSyncReplicationControllerDeletes(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -164,9 +166,9 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
					func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -197,8 +199,8 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncReplicationControllerCreates(t *testing.T) {
 | 
					func TestSyncReplicationControllerCreates(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// A controller with 2 replicas and no pods in the store, 2 creates expected
 | 
						// A controller with 2 replicas and no pods in the store, 2 creates expected
 | 
				
			||||||
@@ -220,8 +222,8 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
 | 
				
			|||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Steady state for the replication controller, no Status.Replicas updates expected
 | 
						// Steady state for the replication controller, no Status.Replicas updates expected
 | 
				
			||||||
@@ -262,9 +264,8 @@ func TestControllerUpdateReplicas(t *testing.T) {
 | 
				
			|||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
					 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Insufficient number of pods in the system, and Status.Replicas is wrong;
 | 
						// Insufficient number of pods in the system, and Status.Replicas is wrong;
 | 
				
			||||||
@@ -302,10 +303,9 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
 | 
				
			|||||||
	testServer := httptest.NewServer(&fakeHandler)
 | 
						testServer := httptest.NewServer(&fakeHandler)
 | 
				
			||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
 | 
					 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -351,7 +351,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestPodControllerLookup(t *testing.T) {
 | 
					func TestPodControllerLookup(t *testing.T) {
 | 
				
			||||||
	manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	testCases := []struct {
 | 
						testCases := []struct {
 | 
				
			||||||
		inRCs     []*api.ReplicationController
 | 
							inRCs     []*api.ReplicationController
 | 
				
			||||||
@@ -410,16 +410,11 @@ func TestPodControllerLookup(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type FakeWatcher struct {
 | 
					 | 
				
			||||||
	w *watch.FakeWatcher
 | 
					 | 
				
			||||||
	*testclient.Fake
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func TestWatchControllers(t *testing.T) {
 | 
					func TestWatchControllers(t *testing.T) {
 | 
				
			||||||
	fakeWatch := watch.NewFake()
 | 
						fakeWatch := watch.NewFake()
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						c := &fake.Clientset{}
 | 
				
			||||||
	client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
 | 
						c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var testControllerSpec api.ReplicationController
 | 
						var testControllerSpec api.ReplicationController
 | 
				
			||||||
@@ -460,9 +455,9 @@ func TestWatchControllers(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestWatchPods(t *testing.T) {
 | 
					func TestWatchPods(t *testing.T) {
 | 
				
			||||||
	fakeWatch := watch.NewFake()
 | 
						fakeWatch := watch.NewFake()
 | 
				
			||||||
	client := &testclient.Fake{}
 | 
						c := &fake.Clientset{}
 | 
				
			||||||
	client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
 | 
						c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Put one rc and one pod into the controller's stores
 | 
						// Put one rc and one pod into the controller's stores
 | 
				
			||||||
@@ -504,7 +499,7 @@ func TestWatchPods(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestUpdatePods(t *testing.T) {
 | 
					func TestUpdatePods(t *testing.T) {
 | 
				
			||||||
	manager := NewReplicationManager(testclient.NewSimpleFake(), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	received := make(chan string)
 | 
						received := make(chan string)
 | 
				
			||||||
@@ -563,8 +558,8 @@ func TestControllerUpdateRequeue(t *testing.T) {
 | 
				
			|||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rc := newReplicationController(1)
 | 
						rc := newReplicationController(1)
 | 
				
			||||||
@@ -598,31 +593,31 @@ func TestControllerUpdateRequeue(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestControllerUpdateStatusWithFailure(t *testing.T) {
 | 
					func TestControllerUpdateStatusWithFailure(t *testing.T) {
 | 
				
			||||||
	rc := newReplicationController(1)
 | 
						rc := newReplicationController(1)
 | 
				
			||||||
	fakeClient := &testclient.Fake{}
 | 
						c := &fake.Clientset{}
 | 
				
			||||||
	fakeClient.AddReactor("get", "replicationcontrollers", func(action testclient.Action) (bool, runtime.Object, error) {
 | 
						c.AddReactor("get", "replicationcontrollers", func(action core.Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		return true, rc, nil
 | 
							return true, rc, nil
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	fakeClient.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) {
 | 
						c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
 | 
				
			||||||
		return true, &api.ReplicationController{}, fmt.Errorf("Fake error")
 | 
							return true, &api.ReplicationController{}, fmt.Errorf("Fake error")
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	fakeRCClient := &testclient.FakeReplicationControllers{fakeClient, "default"}
 | 
						fakeRCClient := c.Legacy().ReplicationControllers("default")
 | 
				
			||||||
	numReplicas := 10
 | 
						numReplicas := 10
 | 
				
			||||||
	updateReplicaCount(fakeRCClient, *rc, numReplicas)
 | 
						updateReplicaCount(fakeRCClient, *rc, numReplicas)
 | 
				
			||||||
	updates, gets := 0, 0
 | 
						updates, gets := 0, 0
 | 
				
			||||||
	for _, a := range fakeClient.Actions() {
 | 
						for _, a := range c.Actions() {
 | 
				
			||||||
		if a.GetResource() != "replicationcontrollers" {
 | 
							if a.GetResource() != "replicationcontrollers" {
 | 
				
			||||||
			t.Errorf("Unexpected action %+v", a)
 | 
								t.Errorf("Unexpected action %+v", a)
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		switch action := a.(type) {
 | 
							switch action := a.(type) {
 | 
				
			||||||
		case testclient.GetAction:
 | 
							case core.GetAction:
 | 
				
			||||||
			gets++
 | 
								gets++
 | 
				
			||||||
			// Make sure the get is for the right rc even though the update failed.
 | 
								// Make sure the get is for the right rc even though the update failed.
 | 
				
			||||||
			if action.GetName() != rc.Name {
 | 
								if action.GetName() != rc.Name {
 | 
				
			||||||
				t.Errorf("Expected get for rc %v, got %+v instead", rc.Name, action.GetName())
 | 
									t.Errorf("Expected get for rc %v, got %+v instead", rc.Name, action.GetName())
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		case testclient.UpdateAction:
 | 
							case core.UpdateAction:
 | 
				
			||||||
			updates++
 | 
								updates++
 | 
				
			||||||
			// Confirm that the update has the right status.Replicas even though the Get
 | 
								// Confirm that the update has the right status.Replicas even though the Get
 | 
				
			||||||
			// returned an rc with replicas=1.
 | 
								// returned an rc with replicas=1.
 | 
				
			||||||
@@ -643,9 +638,9 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
 | 
					func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, burstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, burstReplicas)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -763,9 +758,9 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
 | 
				
			|||||||
// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods
 | 
					// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods
 | 
				
			||||||
// and checking expectations.
 | 
					// and checking expectations.
 | 
				
			||||||
func TestRCSyncExpectations(t *testing.T) {
 | 
					func TestRCSyncExpectations(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -788,8 +783,8 @@ func TestRCSyncExpectations(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
					func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rc := newReplicationController(1)
 | 
						rc := newReplicationController(1)
 | 
				
			||||||
@@ -830,9 +825,9 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestRCManagerNotReady(t *testing.T) {
 | 
					func TestRCManagerNotReady(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	fakePodControl := controller.FakePodControl{}
 | 
						fakePodControl := controller.FakePodControl{}
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 2)
 | 
				
			||||||
	manager.podControl = &fakePodControl
 | 
						manager.podControl = &fakePodControl
 | 
				
			||||||
	manager.podStoreSynced = func() bool { return false }
 | 
						manager.podStoreSynced = func() bool { return false }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -867,10 +862,10 @@ func shuffle(controllers []*api.ReplicationController) []*api.ReplicationControl
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestOverlappingRCs(t *testing.T) {
 | 
					func TestOverlappingRCs(t *testing.T) {
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := 0; i < 5; i++ {
 | 
						for i := 0; i < 5; i++ {
 | 
				
			||||||
		manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 10)
 | 
							manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, 10)
 | 
				
			||||||
		manager.podStoreSynced = alwaysReady
 | 
							manager.podStoreSynced = alwaysReady
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
 | 
							// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
 | 
				
			||||||
@@ -910,8 +905,8 @@ func TestRCManagerInit(t *testing.T) {
 | 
				
			|||||||
	// TODO: Uncomment when fix #19254
 | 
						// TODO: Uncomment when fix #19254
 | 
				
			||||||
	// defer testServer.Close()
 | 
						// defer testServer.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						c := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
						manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
 | 
				
			||||||
	manager.rcStore.Store.Add(rc)
 | 
						manager.rcStore.Store.Add(rc)
 | 
				
			||||||
	manager.podStoreSynced = alwaysReady
 | 
						manager.podStoreSynced = alwaysReady
 | 
				
			||||||
	controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)
 | 
						controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -21,11 +21,11 @@ package replication
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	"github.com/golang/glog"
 | 
						"github.com/golang/glog"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
 | 
					// updateReplicaCount attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
 | 
				
			||||||
func updateReplicaCount(rcClient client.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
 | 
					func updateReplicaCount(rcClient unversioned_legacy.ReplicationControllerInterface, controller api.ReplicationController, numReplicas int) (updateErr error) {
 | 
				
			||||||
	// This is the steady state. It happens when the rc doesn't have any expectations, since
 | 
						// This is the steady state. It happens when the rc doesn't have any expectations, since
 | 
				
			||||||
	// we do a periodic relist every 30s. If the generations differ but the replicas are
 | 
						// we do a periodic relist every 30s. If the generations differ but the replicas are
 | 
				
			||||||
	// the same, a caller might've resized to the same replica count.
 | 
						// the same, a caller might've resized to the same replica count.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -32,6 +32,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/fieldpath"
 | 
						"k8s.io/kubernetes/pkg/fieldpath"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/fields"
 | 
						"k8s.io/kubernetes/pkg/fields"
 | 
				
			||||||
@@ -88,8 +89,8 @@ func describerMap(c *client.Client) map[unversioned.GroupKind]Describer {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		extensions.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c},
 | 
							extensions.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c},
 | 
				
			||||||
		extensions.Kind("DaemonSet"):               &DaemonSetDescriber{c},
 | 
							extensions.Kind("DaemonSet"):               &DaemonSetDescriber{c},
 | 
				
			||||||
 | 
							extensions.Kind("Deployment"):              &DeploymentDescriber{clientset.FromUnversionedClient(c)},
 | 
				
			||||||
		extensions.Kind("Job"):                     &JobDescriber{c},
 | 
							extensions.Kind("Job"):                     &JobDescriber{c},
 | 
				
			||||||
		extensions.Kind("Deployment"):              &DeploymentDescriber{c},
 | 
					 | 
				
			||||||
		extensions.Kind("Ingress"):                 &IngressDescriber{c},
 | 
							extensions.Kind("Ingress"):                 &IngressDescriber{c},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -1573,7 +1574,7 @@ func DescribeEvents(el *api.EventList, w io.Writer) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// DeploymentDescriber generates information about a deployment.
 | 
					// DeploymentDescriber generates information about a deployment.
 | 
				
			||||||
type DeploymentDescriber struct {
 | 
					type DeploymentDescriber struct {
 | 
				
			||||||
	client.Interface
 | 
						clientset.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error) {
 | 
					func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error) {
 | 
				
			||||||
@@ -1605,7 +1606,7 @@ func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error)
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
			fmt.Fprintf(out, "NewReplicationController:\t%s\n", printReplicationControllersByLabels(newRCs))
 | 
								fmt.Fprintf(out, "NewReplicationController:\t%s\n", printReplicationControllersByLabels(newRCs))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		events, err := dd.Events(namespace).Search(d)
 | 
							events, err := dd.Legacy().Events(namespace).Search(d)
 | 
				
			||||||
		if err == nil && events != nil {
 | 
							if err == nil && events != nil {
 | 
				
			||||||
			DescribeEvents(events, out)
 | 
								DescribeEvents(events, out)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -28,6 +28,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -502,7 +503,7 @@ func TestPersistentVolumeDescriber(t *testing.T) {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestDescribeDeployment(t *testing.T) {
 | 
					func TestDescribeDeployment(t *testing.T) {
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&extensions.Deployment{
 | 
						fake := fake.NewSimpleClientset(&extensions.Deployment{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      "bar",
 | 
								Name:      "bar",
 | 
				
			||||||
			Namespace: "foo",
 | 
								Namespace: "foo",
 | 
				
			||||||
@@ -511,8 +512,7 @@ func TestDescribeDeployment(t *testing.T) {
 | 
				
			|||||||
			Template: api.PodTemplateSpec{},
 | 
								Template: api.PodTemplateSpec{},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	c := &describeClient{T: t, Namespace: "foo", Interface: fake}
 | 
						d := DeploymentDescriber{fake}
 | 
				
			||||||
	d := DeploymentDescriber{c}
 | 
					 | 
				
			||||||
	out, err := d.Describe("foo", "bar")
 | 
						out, err := d.Describe("foo", "bar")
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Errorf("unexpected error: %v", err)
 | 
							t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -40,6 +40,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/validation"
 | 
						"k8s.io/kubernetes/pkg/api/validation"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
						"k8s.io/kubernetes/pkg/cloudprovider"
 | 
				
			||||||
@@ -147,6 +148,7 @@ func NewMainKubelet(
 | 
				
			|||||||
	nodeName string,
 | 
						nodeName string,
 | 
				
			||||||
	dockerClient dockertools.DockerInterface,
 | 
						dockerClient dockertools.DockerInterface,
 | 
				
			||||||
	kubeClient client.Interface,
 | 
						kubeClient client.Interface,
 | 
				
			||||||
 | 
						clientset clientset.Interface,
 | 
				
			||||||
	rootDirectory string,
 | 
						rootDirectory string,
 | 
				
			||||||
	podInfraContainerImage string,
 | 
						podInfraContainerImage string,
 | 
				
			||||||
	resyncInterval time.Duration,
 | 
						resyncInterval time.Duration,
 | 
				
			||||||
@@ -272,6 +274,7 @@ func NewMainKubelet(
 | 
				
			|||||||
		nodeName:                       nodeName,
 | 
							nodeName:                       nodeName,
 | 
				
			||||||
		dockerClient:                   dockerClient,
 | 
							dockerClient:                   dockerClient,
 | 
				
			||||||
		kubeClient:                     kubeClient,
 | 
							kubeClient:                     kubeClient,
 | 
				
			||||||
 | 
							clientset:                      clientset,
 | 
				
			||||||
		rootDirectory:                  rootDirectory,
 | 
							rootDirectory:                  rootDirectory,
 | 
				
			||||||
		resyncInterval:                 resyncInterval,
 | 
							resyncInterval:                 resyncInterval,
 | 
				
			||||||
		containerRefManager:            containerRefManager,
 | 
							containerRefManager:            containerRefManager,
 | 
				
			||||||
@@ -466,6 +469,7 @@ type Kubelet struct {
 | 
				
			|||||||
	dockerClient  dockertools.DockerInterface
 | 
						dockerClient  dockertools.DockerInterface
 | 
				
			||||||
	runtimeCache  kubecontainer.RuntimeCache
 | 
						runtimeCache  kubecontainer.RuntimeCache
 | 
				
			||||||
	kubeClient    client.Interface
 | 
						kubeClient    client.Interface
 | 
				
			||||||
 | 
						clientset     clientset.Interface
 | 
				
			||||||
	rootDirectory string
 | 
						rootDirectory string
 | 
				
			||||||
	podWorkers    PodWorkers
 | 
						podWorkers    PodWorkers
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"github.com/golang/glog"
 | 
						"github.com/golang/glog"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
						"k8s.io/kubernetes/pkg/cloudprovider"
 | 
				
			||||||
	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
 | 
						kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
@@ -54,8 +54,8 @@ func (vh *volumeHost) GetPodPluginDir(podUID types.UID, pluginName string) strin
 | 
				
			|||||||
	return vh.kubelet.getPodPluginDir(podUID, pluginName)
 | 
						return vh.kubelet.getPodPluginDir(podUID, pluginName)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (vh *volumeHost) GetKubeClient() client.Interface {
 | 
					func (vh *volumeHost) GetKubeClient() clientset.Interface {
 | 
				
			||||||
	return vh.kubelet.kubeClient
 | 
						return vh.kubelet.clientset
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (vh *volumeHost) NewWrapperBuilder(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
 | 
					func (vh *volumeHost) NewWrapperBuilder(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/labels"
 | 
						"k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
	labelsutil "k8s.io/kubernetes/pkg/util/labels"
 | 
						labelsutil "k8s.io/kubernetes/pkg/util/labels"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/util/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/util/pod"
 | 
				
			||||||
@@ -35,20 +35,20 @@ const (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// GetOldRCs returns the old RCs targeted by the given Deployment; get PodList and RCList from client interface.
 | 
					// GetOldRCs returns the old RCs targeted by the given Deployment; get PodList and RCList from client interface.
 | 
				
			||||||
// Note that the first set of old RCs doesn't include the ones with no pods, and the second set of old RCs include all old RCs.
 | 
					// Note that the first set of old RCs doesn't include the ones with no pods, and the second set of old RCs include all old RCs.
 | 
				
			||||||
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, []*api.ReplicationController, error) {
 | 
					func GetOldRCs(deployment extensions.Deployment, c clientset.Interface) ([]*api.ReplicationController, []*api.ReplicationController, error) {
 | 
				
			||||||
	return GetOldRCsFromLists(deployment, c,
 | 
						return GetOldRCsFromLists(deployment, c,
 | 
				
			||||||
		func(namespace string, options api.ListOptions) (*api.PodList, error) {
 | 
							func(namespace string, options api.ListOptions) (*api.PodList, error) {
 | 
				
			||||||
			return c.Pods(namespace).List(options)
 | 
								return c.Legacy().Pods(namespace).List(options)
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
 | 
							func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
 | 
				
			||||||
			rcList, err := c.ReplicationControllers(namespace).List(options)
 | 
								rcList, err := c.Legacy().ReplicationControllers(namespace).List(options)
 | 
				
			||||||
			return rcList.Items, err
 | 
								return rcList.Items, err
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// GetOldRCsFromLists returns two sets of old RCs targeted by the given Deployment; get PodList and RCList with input functions.
 | 
					// GetOldRCsFromLists returns two sets of old RCs targeted by the given Deployment; get PodList and RCList with input functions.
 | 
				
			||||||
// Note that the first set of old RCs doesn't include the ones with no pods, and the second set of old RCs include all old RCs.
 | 
					// Note that the first set of old RCs doesn't include the ones with no pods, and the second set of old RCs include all old RCs.
 | 
				
			||||||
func GetOldRCsFromLists(deployment extensions.Deployment, c client.Interface, getPodList func(string, api.ListOptions) (*api.PodList, error), getRcList func(string, api.ListOptions) ([]api.ReplicationController, error)) ([]*api.ReplicationController, []*api.ReplicationController, error) {
 | 
					func GetOldRCsFromLists(deployment extensions.Deployment, c clientset.Interface, getPodList func(string, api.ListOptions) (*api.PodList, error), getRcList func(string, api.ListOptions) ([]api.ReplicationController, error)) ([]*api.ReplicationController, []*api.ReplicationController, error) {
 | 
				
			||||||
	namespace := deployment.ObjectMeta.Namespace
 | 
						namespace := deployment.ObjectMeta.Namespace
 | 
				
			||||||
	// 1. Find all pods whose labels match deployment.Spec.Selector
 | 
						// 1. Find all pods whose labels match deployment.Spec.Selector
 | 
				
			||||||
	selector := labels.SelectorFromSet(deployment.Spec.Selector)
 | 
						selector := labels.SelectorFromSet(deployment.Spec.Selector)
 | 
				
			||||||
@@ -95,17 +95,17 @@ func GetOldRCsFromLists(deployment extensions.Deployment, c client.Interface, ge
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
 | 
					// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
 | 
				
			||||||
// Returns nil if the new RC doesnt exist yet.
 | 
					// Returns nil if the new RC doesnt exist yet.
 | 
				
			||||||
func GetNewRC(deployment extensions.Deployment, c client.Interface) (*api.ReplicationController, error) {
 | 
					func GetNewRC(deployment extensions.Deployment, c clientset.Interface) (*api.ReplicationController, error) {
 | 
				
			||||||
	return GetNewRCFromList(deployment, c,
 | 
						return GetNewRCFromList(deployment, c,
 | 
				
			||||||
		func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
 | 
							func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
 | 
				
			||||||
			rcList, err := c.ReplicationControllers(namespace).List(options)
 | 
								rcList, err := c.Legacy().ReplicationControllers(namespace).List(options)
 | 
				
			||||||
			return rcList.Items, err
 | 
								return rcList.Items, err
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// GetNewRCFromList returns an RC that matches the intent of the given deployment; get RCList with the input function.
 | 
					// GetNewRCFromList returns an RC that matches the intent of the given deployment; get RCList with the input function.
 | 
				
			||||||
// Returns nil if the new RC doesnt exist yet.
 | 
					// Returns nil if the new RC doesnt exist yet.
 | 
				
			||||||
func GetNewRCFromList(deployment extensions.Deployment, c client.Interface, getRcList func(string, api.ListOptions) ([]api.ReplicationController, error)) (*api.ReplicationController, error) {
 | 
					func GetNewRCFromList(deployment extensions.Deployment, c clientset.Interface, getRcList func(string, api.ListOptions) ([]api.ReplicationController, error)) (*api.ReplicationController, error) {
 | 
				
			||||||
	namespace := deployment.ObjectMeta.Namespace
 | 
						namespace := deployment.ObjectMeta.Namespace
 | 
				
			||||||
	rcList, err := getRcList(namespace, api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)})
 | 
						rcList, err := getRcList(namespace, api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)})
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -157,7 +157,7 @@ func GetReplicaCountForRCs(replicationControllers []*api.ReplicationController)
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Returns the number of available pods corresponding to the given RCs.
 | 
					// Returns the number of available pods corresponding to the given RCs.
 | 
				
			||||||
func GetAvailablePodsForRCs(c client.Interface, rcs []*api.ReplicationController, minReadySeconds int) (int, error) {
 | 
					func GetAvailablePodsForRCs(c clientset.Interface, rcs []*api.ReplicationController, minReadySeconds int) (int, error) {
 | 
				
			||||||
	allPods, err := getPodsForRCs(c, rcs)
 | 
						allPods, err := getPodsForRCs(c, rcs)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return 0, err
 | 
							return 0, err
 | 
				
			||||||
@@ -189,12 +189,12 @@ func getReadyPodsCount(pods []api.Pod, minReadySeconds int) int {
 | 
				
			|||||||
	return readyPodCount
 | 
						return readyPodCount
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func getPodsForRCs(c client.Interface, replicationControllers []*api.ReplicationController) ([]api.Pod, error) {
 | 
					func getPodsForRCs(c clientset.Interface, replicationControllers []*api.ReplicationController) ([]api.Pod, error) {
 | 
				
			||||||
	allPods := []api.Pod{}
 | 
						allPods := []api.Pod{}
 | 
				
			||||||
	for _, rc := range replicationControllers {
 | 
						for _, rc := range replicationControllers {
 | 
				
			||||||
		selector := labels.SelectorFromSet(rc.Spec.Selector)
 | 
							selector := labels.SelectorFromSet(rc.Spec.Selector)
 | 
				
			||||||
		options := api.ListOptions{LabelSelector: selector}
 | 
							options := api.ListOptions{LabelSelector: selector}
 | 
				
			||||||
		podList, err := c.Pods(rc.ObjectMeta.Namespace).List(options)
 | 
							podList, err := c.Legacy().Pods(rc.ObjectMeta.Namespace).List(options)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return allPods, fmt.Errorf("error listing pods: %v", err)
 | 
								return allPods, fmt.Errorf("error listing pods: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,7 +25,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
						"k8s.io/kubernetes/pkg/api/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient/simple"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/testclient/simple"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -231,7 +231,7 @@ func TestGetNewRC(t *testing.T) {
 | 
				
			|||||||
				Body:       &test.rcList,
 | 
									Body:       &test.rcList,
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		rc, err := GetNewRC(newDeployment, c.Setup(t))
 | 
							rc, err := GetNewRC(newDeployment, c.Setup(t).Clientset)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("In test case %s, got unexpected error %v", test.test, err)
 | 
								t.Errorf("In test case %s, got unexpected error %v", test.test, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -314,7 +314,7 @@ func TestGetOldRCs(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, test := range tests {
 | 
						for _, test := range tests {
 | 
				
			||||||
		rcs, _, err := GetOldRCs(newDeployment, testclient.NewSimpleFake(test.objs...))
 | 
							rcs, _, err := GetOldRCs(newDeployment, fake.NewSimpleClientset(test.objs...))
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			t.Errorf("In test case %s, got unexpected error %v", test.test, err)
 | 
								t.Errorf("In test case %s, got unexpected error %v", test.test, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -258,7 +258,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						clientset := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
 | 
						tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
@@ -266,7 +266,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, clientset, nil))
 | 
				
			||||||
	plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
 | 
						plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
 | 
						// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -73,7 +73,7 @@ func (plugin *cephfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume
 | 
				
			|||||||
			return nil, fmt.Errorf("Cannot get kube client")
 | 
								return nil, fmt.Errorf("Cannot get kube client")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		secretName, err := kubeClient.Secrets(pod.Namespace).Get(cephvs.SecretRef.Name)
 | 
							secretName, err := kubeClient.Legacy().Secrets(pod.Namespace).Get(cephvs.SecretRef.Name)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, cephvs.SecretRef, err)
 | 
								err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, cephvs.SecretRef, err)
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,8 +24,8 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume"
 | 
						"k8s.io/kubernetes/pkg/volume"
 | 
				
			||||||
@@ -40,12 +40,12 @@ func formatMap(m map[string]string) (fmtstr string) {
 | 
				
			|||||||
	return
 | 
						return
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func newTestHost(t *testing.T, client client.Interface) (string, volume.VolumeHost) {
 | 
					func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
 | 
				
			||||||
	tempDir, err := utiltesting.MkTmpdir("downwardApi_volume_test.")
 | 
						tempDir, err := utiltesting.MkTmpdir("downwardApi_volume_test.")
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("can't make a temp rootdir: %v", err)
 | 
							t.Fatalf("can't make a temp rootdir: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return tempDir, volume.NewFakeVolumeHost(tempDir, client, empty_dir.ProbeVolumePlugins())
 | 
						return tempDir, volume.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestCanSupport(t *testing.T) {
 | 
					func TestCanSupport(t *testing.T) {
 | 
				
			||||||
@@ -94,7 +94,7 @@ func TestLabels(t *testing.T) {
 | 
				
			|||||||
		"key1": "value1",
 | 
							"key1": "value1",
 | 
				
			||||||
		"key2": "value2"}
 | 
							"key2": "value2"}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -103,7 +103,7 @@ func TestLabels(t *testing.T) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	rootDir, host := newTestHost(t, fake)
 | 
						rootDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(rootDir)
 | 
						defer os.RemoveAll(rootDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -181,7 +181,7 @@ func TestAnnotations(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:        testName,
 | 
								Name:        testName,
 | 
				
			||||||
			Namespace:   testNamespace,
 | 
								Namespace:   testNamespace,
 | 
				
			||||||
@@ -190,7 +190,7 @@ func TestAnnotations(t *testing.T) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -244,7 +244,7 @@ func TestName(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -252,7 +252,7 @@ func TestName(t *testing.T) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -307,7 +307,7 @@ func TestNamespace(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -315,7 +315,7 @@ func TestNamespace(t *testing.T) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -363,7 +363,7 @@ func TestWriteTwiceNoUpdate(t *testing.T) {
 | 
				
			|||||||
		"key1": "value1",
 | 
							"key1": "value1",
 | 
				
			||||||
		"key2": "value2"}
 | 
							"key2": "value2"}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -371,7 +371,7 @@ func TestWriteTwiceNoUpdate(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -449,7 +449,7 @@ func TestWriteTwiceWithUpdate(t *testing.T) {
 | 
				
			|||||||
		"key1": "value1",
 | 
							"key1": "value1",
 | 
				
			||||||
		"key2": "value2"}
 | 
							"key2": "value2"}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -457,7 +457,7 @@ func TestWriteTwiceWithUpdate(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -554,7 +554,7 @@ func TestWriteWithUnixPath(t *testing.T) {
 | 
				
			|||||||
		"a1": "value1",
 | 
							"a1": "value1",
 | 
				
			||||||
		"a2": "value2"}
 | 
							"a2": "value2"}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -563,7 +563,7 @@ func TestWriteWithUnixPath(t *testing.T) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
@@ -630,7 +630,7 @@ func TestWriteWithUnixPathBadPath(t *testing.T) {
 | 
				
			|||||||
		"key2": "value2",
 | 
							"key2": "value2",
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fake := testclient.NewSimpleFake(&api.Pod{
 | 
						clientset := fake.NewSimpleClientset(&api.Pod{
 | 
				
			||||||
		ObjectMeta: api.ObjectMeta{
 | 
							ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
			Name:      testName,
 | 
								Name:      testName,
 | 
				
			||||||
			Namespace: testNamespace,
 | 
								Namespace: testNamespace,
 | 
				
			||||||
@@ -639,7 +639,7 @@ func TestWriteWithUnixPathBadPath(t *testing.T) {
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pluginMgr := volume.VolumePluginMgr{}
 | 
						pluginMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tmpDir, host := newTestHost(t, fake)
 | 
						tmpDir, host := newTestHost(t, clientset)
 | 
				
			||||||
	defer os.RemoveAll(tmpDir)
 | 
						defer os.RemoveAll(tmpDir)
 | 
				
			||||||
	pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
						pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
 | 
				
			||||||
	plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
						plugin, err := pluginMgr.FindPluginByName(downwardAPIPluginName)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -270,7 +270,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -110,7 +110,7 @@ func (plugin *flexVolumePlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ vo
 | 
				
			|||||||
			return nil, fmt.Errorf("Cannot get kube client")
 | 
								return nil, fmt.Errorf("Cannot get kube client")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		secretName, err := kubeClient.Secrets(pod.Namespace).Get(fv.SecretRef.Name)
 | 
							secretName, err := kubeClient.Legacy().Secrets(pod.Namespace).Get(fv.SecretRef.Name)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, fv.SecretRef, err)
 | 
								err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, fv.SecretRef, err)
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -273,7 +273,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
 | 
						tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -85,7 +85,7 @@ func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ vol
 | 
				
			|||||||
	source, _ := plugin.getGlusterVolumeSource(spec)
 | 
						source, _ := plugin.getGlusterVolumeSource(spec)
 | 
				
			||||||
	ep_name := source.EndpointsName
 | 
						ep_name := source.EndpointsName
 | 
				
			||||||
	ns := pod.Namespace
 | 
						ns := pod.Namespace
 | 
				
			||||||
	ep, err := plugin.host.GetKubeClient().Endpoints(ns).Get(ep_name)
 | 
						ep, err := plugin.host.GetKubeClient().Legacy().Endpoints(ns).Get(ep_name)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Errorf("glusterfs: failed to get endpoints %s[%v]", ep_name, err)
 | 
							glog.Errorf("glusterfs: failed to get endpoints %s[%v]", ep_name, err)
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/exec"
 | 
						"k8s.io/kubernetes/pkg/util/exec"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
@@ -219,7 +219,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		}},
 | 
							}},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim, ep)
 | 
						client := fake.NewSimpleClientset(pv, claim, ep)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util"
 | 
						"k8s.io/kubernetes/pkg/util"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume"
 | 
						"k8s.io/kubernetes/pkg/volume"
 | 
				
			||||||
@@ -256,7 +256,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -270,7 +270,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -265,7 +265,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -54,7 +54,7 @@ func (plugin *persistentClaimPlugin) CanSupport(spec *volume.Spec) bool {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
 | 
					func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
 | 
				
			||||||
	claim, err := plugin.host.GetKubeClient().PersistentVolumeClaims(pod.Namespace).Get(spec.Volume.PersistentVolumeClaim.ClaimName)
 | 
						claim, err := plugin.host.GetKubeClient().Legacy().PersistentVolumeClaims(pod.Namespace).Get(spec.Volume.PersistentVolumeClaim.ClaimName)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Errorf("Error finding claim: %+v\n", spec.Volume.PersistentVolumeClaim.ClaimName)
 | 
							glog.Errorf("Error finding claim: %+v\n", spec.Volume.PersistentVolumeClaim.ClaimName)
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
@@ -64,7 +64,7 @@ func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod,
 | 
				
			|||||||
		return nil, fmt.Errorf("The claim %+v is not yet bound to a volume", claim.Name)
 | 
							return nil, fmt.Errorf("The claim %+v is not yet bound to a volume", claim.Name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pv, err := plugin.host.GetKubeClient().PersistentVolumes().Get(claim.Spec.VolumeName)
 | 
						pv, err := plugin.host.GetKubeClient().Legacy().PersistentVolumes().Get(claim.Spec.VolumeName)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Errorf("Error finding persistent volume for claim: %+v\n", claim.Name)
 | 
							glog.Errorf("Error finding persistent volume for claim: %+v\n", claim.Name)
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,8 +23,8 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	utilstrings "k8s.io/kubernetes/pkg/util/strings"
 | 
						utilstrings "k8s.io/kubernetes/pkg/util/strings"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -35,12 +35,12 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// newTestHost returns the temp directory and the VolumeHost created.
 | 
					// newTestHost returns the temp directory and the VolumeHost created.
 | 
				
			||||||
// Please be sure to cleanup the temp directory once done!
 | 
					// Please be sure to cleanup the temp directory once done!
 | 
				
			||||||
func newTestHost(t *testing.T, fakeKubeClient client.Interface) (string, volume.VolumeHost) {
 | 
					func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
 | 
				
			||||||
	tempDir, err := utiltesting.MkTmpdir("persistent_volume_test.")
 | 
						tempDir, err := utiltesting.MkTmpdir("persistent_volume_test.")
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("can't make a temp rootdir: %v", err)
 | 
							t.Fatalf("can't make a temp rootdir: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return tempDir, volume.NewFakeVolumeHost(tempDir, fakeKubeClient, testProbeVolumePlugins())
 | 
						return tempDir, volume.NewFakeVolumeHost(tempDir, clientset, testProbeVolumePlugins())
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestCanSupport(t *testing.T) {
 | 
					func TestCanSupport(t *testing.T) {
 | 
				
			||||||
@@ -237,7 +237,7 @@ func TestNewBuilder(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, item := range tests {
 | 
						for _, item := range tests {
 | 
				
			||||||
		client := testclient.NewSimpleFake(item.pv, item.claim)
 | 
							client := fake.NewSimpleClientset(item.pv, item.claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		plugMgr := volume.VolumePluginMgr{}
 | 
							plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
		tempDir, vh := newTestHost(t, client)
 | 
							tempDir, vh := newTestHost(t, client)
 | 
				
			||||||
@@ -290,7 +290,7 @@ func TestNewBuilderClaimNotBound(t *testing.T) {
 | 
				
			|||||||
			ClaimName: "claimC",
 | 
								ClaimName: "claimC",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	tempDir, vh := newTestHost(t, client)
 | 
						tempDir, vh := newTestHost(t, client)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,7 @@ import (
 | 
				
			|||||||
	"github.com/golang/glog"
 | 
						"github.com/golang/glog"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
						"k8s.io/kubernetes/pkg/cloudprovider"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	utilerrors "k8s.io/kubernetes/pkg/util/errors"
 | 
						utilerrors "k8s.io/kubernetes/pkg/util/errors"
 | 
				
			||||||
@@ -138,7 +138,7 @@ type VolumeHost interface {
 | 
				
			|||||||
	GetPodPluginDir(podUID types.UID, pluginName string) string
 | 
						GetPodPluginDir(podUID types.UID, pluginName string) string
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// GetKubeClient returns a client interface
 | 
						// GetKubeClient returns a client interface
 | 
				
			||||||
	GetKubeClient() client.Interface
 | 
						GetKubeClient() clientset.Interface
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// NewWrapperBuilder finds an appropriate plugin with which to handle
 | 
						// NewWrapperBuilder finds an appropriate plugin with which to handle
 | 
				
			||||||
	// the provided spec.  This is used to implement volume plugins which
 | 
						// the provided spec.  This is used to implement volume plugins which
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -84,7 +84,7 @@ func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.Vo
 | 
				
			|||||||
			return nil, fmt.Errorf("Cannot get kube client")
 | 
								return nil, fmt.Errorf("Cannot get kube client")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		secretName, err := kubeClient.Secrets(pod.Namespace).Get(source.SecretRef.Name)
 | 
							secretName, err := kubeClient.Legacy().Secrets(pod.Namespace).Get(source.SecretRef.Name)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			glog.Errorf("Couldn't get secret %v/%v", pod.Namespace, source.SecretRef)
 | 
								glog.Errorf("Couldn't get secret %v/%v", pod.Namespace, source.SecretRef)
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
						utiltesting "k8s.io/kubernetes/pkg/util/testing"
 | 
				
			||||||
@@ -222,7 +222,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
 | 
				
			|||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	client := testclient.NewSimpleFake(pv, claim)
 | 
						client := fake.NewSimpleClientset(pv, claim)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugMgr := volume.VolumePluginMgr{}
 | 
						plugMgr := volume.VolumePluginMgr{}
 | 
				
			||||||
	plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
						plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -165,7 +165,7 @@ func (b *secretVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error {
 | 
				
			|||||||
		return fmt.Errorf("Cannot setup secret volume %v because kube client is not configured", b.volName)
 | 
							return fmt.Errorf("Cannot setup secret volume %v because kube client is not configured", b.volName)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	secret, err := kubeClient.Secrets(b.pod.Namespace).Get(b.secretName)
 | 
						secret, err := kubeClient.Legacy().Secrets(b.pod.Namespace).Get(b.secretName)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Errorf("Couldn't get secret %v/%v", b.pod.Namespace, b.secretName)
 | 
							glog.Errorf("Couldn't get secret %v/%v", b.pod.Namespace, b.secretName)
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,8 +25,8 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
						"k8s.io/kubernetes/pkg/client/testing/fake"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/mount"
 | 
						"k8s.io/kubernetes/pkg/util/mount"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/volume"
 | 
						"k8s.io/kubernetes/pkg/volume"
 | 
				
			||||||
@@ -34,13 +34,13 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/volume/util"
 | 
						"k8s.io/kubernetes/pkg/volume/util"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func newTestHost(t *testing.T, client client.Interface) (string, volume.VolumeHost) {
 | 
					func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
 | 
				
			||||||
	tempDir, err := ioutil.TempDir("/tmp", "secret_volume_test.")
 | 
						tempDir, err := ioutil.TempDir("/tmp", "secret_volume_test.")
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		t.Fatalf("can't make a temp rootdir: %v", err)
 | 
							t.Fatalf("can't make a temp rootdir: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return tempDir, volume.NewFakeVolumeHost(tempDir, client, empty_dir.ProbeVolumePlugins())
 | 
						return tempDir, volume.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestCanSupport(t *testing.T) {
 | 
					func TestCanSupport(t *testing.T) {
 | 
				
			||||||
@@ -72,7 +72,7 @@ func TestPlugin(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		volumeSpec    = volumeSpec(testVolumeName, testName)
 | 
							volumeSpec    = volumeSpec(testVolumeName, testName)
 | 
				
			||||||
		secret        = secret(testNamespace, testName)
 | 
							secret        = secret(testNamespace, testName)
 | 
				
			||||||
		client        = testclient.NewSimpleFake(&secret)
 | 
							client        = fake.NewSimpleClientset(&secret)
 | 
				
			||||||
		pluginMgr     = volume.VolumePluginMgr{}
 | 
							pluginMgr     = volume.VolumePluginMgr{}
 | 
				
			||||||
		rootDir, host = newTestHost(t, client)
 | 
							rootDir, host = newTestHost(t, client)
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
@@ -135,7 +135,7 @@ func TestPluginIdempotent(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		volumeSpec    = volumeSpec(testVolumeName, testName)
 | 
							volumeSpec    = volumeSpec(testVolumeName, testName)
 | 
				
			||||||
		secret        = secret(testNamespace, testName)
 | 
							secret        = secret(testNamespace, testName)
 | 
				
			||||||
		client        = testclient.NewSimpleFake(&secret)
 | 
							client        = fake.NewSimpleClientset(&secret)
 | 
				
			||||||
		pluginMgr     = volume.VolumePluginMgr{}
 | 
							pluginMgr     = volume.VolumePluginMgr{}
 | 
				
			||||||
		rootDir, host = newTestHost(t, client)
 | 
							rootDir, host = newTestHost(t, client)
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
@@ -196,7 +196,7 @@ func TestPluginReboot(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		volumeSpec    = volumeSpec(testVolumeName, testName)
 | 
							volumeSpec    = volumeSpec(testVolumeName, testName)
 | 
				
			||||||
		secret        = secret(testNamespace, testName)
 | 
							secret        = secret(testNamespace, testName)
 | 
				
			||||||
		client        = testclient.NewSimpleFake(&secret)
 | 
							client        = fake.NewSimpleClientset(&secret)
 | 
				
			||||||
		pluginMgr     = volume.VolumePluginMgr{}
 | 
							pluginMgr     = volume.VolumePluginMgr{}
 | 
				
			||||||
		rootDir, host = newTestHost(t, client)
 | 
							rootDir, host = newTestHost(t, client)
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
						"k8s.io/kubernetes/pkg/cloudprovider"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/types"
 | 
						"k8s.io/kubernetes/pkg/types"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util"
 | 
						"k8s.io/kubernetes/pkg/util"
 | 
				
			||||||
@@ -38,14 +38,14 @@ import (
 | 
				
			|||||||
// fakeVolumeHost is useful for testing volume plugins.
 | 
					// fakeVolumeHost is useful for testing volume plugins.
 | 
				
			||||||
type fakeVolumeHost struct {
 | 
					type fakeVolumeHost struct {
 | 
				
			||||||
	rootDir    string
 | 
						rootDir    string
 | 
				
			||||||
	kubeClient client.Interface
 | 
						kubeClient clientset.Interface
 | 
				
			||||||
	pluginMgr  VolumePluginMgr
 | 
						pluginMgr  VolumePluginMgr
 | 
				
			||||||
	cloud      cloudprovider.Interface
 | 
						cloud      cloudprovider.Interface
 | 
				
			||||||
	mounter    mount.Interface
 | 
						mounter    mount.Interface
 | 
				
			||||||
	writer     io.Writer
 | 
						writer     io.Writer
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewFakeVolumeHost(rootDir string, kubeClient client.Interface, plugins []VolumePlugin) *fakeVolumeHost {
 | 
					func NewFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeVolumeHost {
 | 
				
			||||||
	host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: nil}
 | 
						host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: nil}
 | 
				
			||||||
	host.mounter = &mount.FakeMounter{}
 | 
						host.mounter = &mount.FakeMounter{}
 | 
				
			||||||
	host.writer = &io.StdWriter{}
 | 
						host.writer = &io.StdWriter{}
 | 
				
			||||||
@@ -65,7 +65,7 @@ func (f *fakeVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) st
 | 
				
			|||||||
	return path.Join(f.rootDir, "pods", string(podUID), "plugins", pluginName)
 | 
						return path.Join(f.rootDir, "pods", string(podUID), "plugins", pluginName)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *fakeVolumeHost) GetKubeClient() client.Interface {
 | 
					func (f *fakeVolumeHost) GetKubeClient() clientset.Interface {
 | 
				
			||||||
	return f.kubeClient
 | 
						return f.kubeClient
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/fields"
 | 
						"k8s.io/kubernetes/pkg/fields"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
						"k8s.io/kubernetes/pkg/runtime"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/watch"
 | 
						"k8s.io/kubernetes/pkg/watch"
 | 
				
			||||||
@@ -36,7 +36,7 @@ import (
 | 
				
			|||||||
// An attempt to delete a recycler pod is always attempted before returning.
 | 
					// An attempt to delete a recycler pod is always attempted before returning.
 | 
				
			||||||
// 	pod - the pod designed by a volume plugin to recycle the volume
 | 
					// 	pod - the pod designed by a volume plugin to recycle the volume
 | 
				
			||||||
//	client - kube client for API operations.
 | 
					//	client - kube client for API operations.
 | 
				
			||||||
func RecycleVolumeByWatchingPodUntilCompletion(pod *api.Pod, kubeClient client.Interface) error {
 | 
					func RecycleVolumeByWatchingPodUntilCompletion(pod *api.Pod, kubeClient clientset.Interface) error {
 | 
				
			||||||
	return internalRecycleVolumeByWatchingPodUntilCompletion(pod, newRecyclerClient(kubeClient))
 | 
						return internalRecycleVolumeByWatchingPodUntilCompletion(pod, newRecyclerClient(kubeClient))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -80,24 +80,24 @@ type recyclerClient interface {
 | 
				
			|||||||
	WatchPod(name, namespace, resourceVersion string, stopChannel chan struct{}) func() *api.Pod
 | 
						WatchPod(name, namespace, resourceVersion string, stopChannel chan struct{}) func() *api.Pod
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func newRecyclerClient(client client.Interface) recyclerClient {
 | 
					func newRecyclerClient(client clientset.Interface) recyclerClient {
 | 
				
			||||||
	return &realRecyclerClient{client}
 | 
						return &realRecyclerClient{client}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type realRecyclerClient struct {
 | 
					type realRecyclerClient struct {
 | 
				
			||||||
	client client.Interface
 | 
						client clientset.Interface
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
 | 
					func (c *realRecyclerClient) CreatePod(pod *api.Pod) (*api.Pod, error) {
 | 
				
			||||||
	return c.client.Pods(pod.Namespace).Create(pod)
 | 
						return c.client.Legacy().Pods(pod.Namespace).Create(pod)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) GetPod(name, namespace string) (*api.Pod, error) {
 | 
					func (c *realRecyclerClient) GetPod(name, namespace string) (*api.Pod, error) {
 | 
				
			||||||
	return c.client.Pods(namespace).Get(name)
 | 
						return c.client.Legacy().Pods(namespace).Get(name)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
 | 
					func (c *realRecyclerClient) DeletePod(name, namespace string) error {
 | 
				
			||||||
	return c.client.Pods(namespace).Delete(name, nil)
 | 
						return c.client.Legacy().Pods(namespace).Delete(name, nil)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// WatchPod returns a ListWatch for watching a pod.  The stopChannel is used
 | 
					// WatchPod returns a ListWatch for watching a pod.  The stopChannel is used
 | 
				
			||||||
@@ -109,11 +109,11 @@ func (c *realRecyclerClient) WatchPod(name, namespace, resourceVersion string, s
 | 
				
			|||||||
	podLW := &cache.ListWatch{
 | 
						podLW := &cache.ListWatch{
 | 
				
			||||||
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
							ListFunc: func(options api.ListOptions) (runtime.Object, error) {
 | 
				
			||||||
			options.FieldSelector = fieldSelector
 | 
								options.FieldSelector = fieldSelector
 | 
				
			||||||
			return c.client.Pods(namespace).List(options)
 | 
								return c.client.Legacy().Pods(namespace).List(options)
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
							WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
 | 
				
			||||||
			options.FieldSelector = fieldSelector
 | 
								options.FieldSelector = fieldSelector
 | 
				
			||||||
			return c.client.Pods(namespace).Watch(options)
 | 
								return c.client.Legacy().Pods(namespace).Watch(options)
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
 | 
						queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,11 +18,11 @@ package volume
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"io/ioutil"
 | 
						"io/ioutil"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	"os"
 | 
						"os"
 | 
				
			||||||
	"path"
 | 
						"path"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Volume represents a directory used by pods or hosts on a node.
 | 
					// Volume represents a directory used by pods or hosts on a node.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -19,13 +19,15 @@ limitations under the License.
 | 
				
			|||||||
package volume
 | 
					package volume
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/chmod"
 | 
					 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/chown"
 | 
					 | 
				
			||||||
	"path/filepath"
 | 
						"path/filepath"
 | 
				
			||||||
	"syscall"
 | 
						"syscall"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/golang/glog"
 | 
						"k8s.io/kubernetes/pkg/util/chmod"
 | 
				
			||||||
 | 
						"k8s.io/kubernetes/pkg/util/chown"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"os"
 | 
						"os"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"github.com/golang/glog"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/labels"
 | 
						"k8s.io/kubernetes/pkg/labels"
 | 
				
			||||||
	deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
 | 
						deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/intstr"
 | 
						"k8s.io/kubernetes/pkg/util/intstr"
 | 
				
			||||||
@@ -127,8 +127,8 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// checkDeploymentRevision checks if the input deployment's and its new RC's revision and images are as expected.
 | 
					// checkDeploymentRevision checks if the input deployment's and its new RC's revision and images are as expected.
 | 
				
			||||||
func checkDeploymentRevision(c *client.Client, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *api.ReplicationController) {
 | 
					func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *api.ReplicationController) {
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// Check revision of the new RC of this deployment
 | 
						// Check revision of the new RC of this deployment
 | 
				
			||||||
	newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
						newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
@@ -151,35 +151,39 @@ func checkDeploymentRevision(c *client.Client, ns, deploymentName, revision, ima
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func testNewDeployment(f *Framework) {
 | 
					func testNewDeployment(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						// TODO: remove unversionedClient when the refactoring is done. Currently some
 | 
				
			||||||
 | 
						// functions like verifyPod still expects a unversioned#Client.
 | 
				
			||||||
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(f.Client)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	deploymentName := "nginx-deployment"
 | 
						deploymentName := "nginx-deployment"
 | 
				
			||||||
	podLabels := map[string]string{"name": "nginx"}
 | 
						podLabels := map[string]string{"name": "nginx"}
 | 
				
			||||||
	replicas := 1
 | 
						replicas := 1
 | 
				
			||||||
	Logf("Creating simple deployment %s", deploymentName)
 | 
						Logf("Creating simple deployment %s", deploymentName)
 | 
				
			||||||
	_, err := c.Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil))
 | 
						_, err := c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Check that deployment is created fine.
 | 
						// Check that deployment is created fine.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "nginx", false, replicas)
 | 
						err = verifyPods(unversionedClient, ns, "nginx", false, replicas)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// DeploymentStatus should be appropriately updated.
 | 
						// DeploymentStatus should be appropriately updated.
 | 
				
			||||||
	deployment, err = c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	Expect(deployment.Status.Replicas).Should(Equal(replicas))
 | 
						Expect(deployment.Status.Replicas).Should(Equal(replicas))
 | 
				
			||||||
	Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
 | 
						Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas))
 | 
				
			||||||
@@ -190,7 +194,10 @@ func testNewDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func testRollingUpdateDeployment(f *Framework) {
 | 
					func testRollingUpdateDeployment(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						// TODO: remove unversionedClient when the refactoring is done. Currently some
 | 
				
			||||||
 | 
						// functions like verifyPod still expects a unversioned#Client.
 | 
				
			||||||
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	// Create nginx pods.
 | 
						// Create nginx pods.
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": "sample-pod"}
 | 
						deploymentPodLabels := map[string]string{"name": "sample-pod"}
 | 
				
			||||||
	rcPodLabels := map[string]string{
 | 
						rcPodLabels := map[string]string{
 | 
				
			||||||
@@ -200,14 +207,14 @@ func testRollingUpdateDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	rcName := "nginx-controller"
 | 
						rcName := "nginx-controller"
 | 
				
			||||||
	replicas := 3
 | 
						replicas := 3
 | 
				
			||||||
	_, err := c.ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
 | 
						_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		Logf("deleting replication controller %s", rcName)
 | 
							Logf("deleting replication controller %s", rcName)
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(rcName)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "sample-pod", false, 3)
 | 
						err = verifyPods(unversionedClient, ns, "sample-pod", false, 3)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -216,17 +223,17 @@ func testRollingUpdateDeployment(f *Framework) {
 | 
				
			|||||||
	// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
						// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
				
			||||||
	deploymentName := "redis-deployment"
 | 
						deploymentName := "redis-deployment"
 | 
				
			||||||
	Logf("Creating deployment %s", deploymentName)
 | 
						Logf("Creating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
 | 
						_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
 | 
				
			||||||
@@ -238,7 +245,10 @@ func testRollingUpdateDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func testRollingUpdateDeploymentEvents(f *Framework) {
 | 
					func testRollingUpdateDeploymentEvents(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						// TODO: remove unversionedClient when the refactoring is done. Currently some
 | 
				
			||||||
 | 
						// functions like verifyPod still expects a unversioned#Client.
 | 
				
			||||||
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	// Create nginx pods.
 | 
						// Create nginx pods.
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
 | 
						deploymentPodLabels := map[string]string{"name": "sample-pod-2"}
 | 
				
			||||||
	rcPodLabels := map[string]string{
 | 
						rcPodLabels := map[string]string{
 | 
				
			||||||
@@ -254,14 +264,14 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
 | 
				
			|||||||
	rc := newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")
 | 
						rc := newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")
 | 
				
			||||||
	rc.Annotations = annotations
 | 
						rc.Annotations = annotations
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	_, err := c.ReplicationControllers(ns).Create(rc)
 | 
						_, err := c.Legacy().ReplicationControllers(ns).Create(rc)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		Logf("deleting replication controller %s", rcName)
 | 
							Logf("deleting replication controller %s", rcName)
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(rcName)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "sample-pod-2", false, 1)
 | 
						err = verifyPods(unversionedClient, ns, "sample-pod-2", false, 1)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -270,26 +280,26 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
 | 
				
			|||||||
	// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
						// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
				
			||||||
	deploymentName := "redis-deployment-2"
 | 
						deploymentName := "redis-deployment-2"
 | 
				
			||||||
	Logf("Creating deployment %s", deploymentName)
 | 
						Logf("Creating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
 | 
						_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// Verify that the pods were scaled up and down as expected. We use events to verify that.
 | 
						// Verify that the pods were scaled up and down as expected. We use events to verify that.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	waitForEvents(c, ns, deployment, 2)
 | 
						waitForEvents(unversionedClient, ns, deployment, 2)
 | 
				
			||||||
	events, err := c.Events(ns).Search(deployment)
 | 
						events, err := c.Legacy().Events(ns).Search(deployment)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in listing events: %s", err)
 | 
							Logf("error in listing events: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -308,7 +318,10 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func testRecreateDeployment(f *Framework) {
 | 
					func testRecreateDeployment(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						// TODO: remove unversionedClient when the refactoring is done. Currently some
 | 
				
			||||||
 | 
						// functions like verifyPod still expects a unversioned#Client.
 | 
				
			||||||
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	// Create nginx pods.
 | 
						// Create nginx pods.
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
 | 
						deploymentPodLabels := map[string]string{"name": "sample-pod-3"}
 | 
				
			||||||
	rcPodLabels := map[string]string{
 | 
						rcPodLabels := map[string]string{
 | 
				
			||||||
@@ -318,14 +331,14 @@ func testRecreateDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	rcName := "nginx-controller"
 | 
						rcName := "nginx-controller"
 | 
				
			||||||
	replicas := 3
 | 
						replicas := 3
 | 
				
			||||||
	_, err := c.ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
 | 
						_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		Logf("deleting replication controller %s", rcName)
 | 
							Logf("deleting replication controller %s", rcName)
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(rcName)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "sample-pod-3", false, 3)
 | 
						err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -334,31 +347,31 @@ func testRecreateDeployment(f *Framework) {
 | 
				
			|||||||
	// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
						// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
				
			||||||
	deploymentName := "redis-deployment-3"
 | 
						deploymentName := "redis-deployment-3"
 | 
				
			||||||
	Logf("Creating deployment %s", deploymentName)
 | 
						Logf("Creating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
 | 
						_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		deployment, _ := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, _ := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Logf("deployment = %+v", deployment)
 | 
							Logf("deployment = %+v", deployment)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that the pods were scaled up and down as expected. We use events to verify that.
 | 
						// Verify that the pods were scaled up and down as expected. We use events to verify that.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	waitForEvents(c, ns, deployment, 2)
 | 
						waitForEvents(unversionedClient, ns, deployment, 2)
 | 
				
			||||||
	events, err := c.Events(ns).Search(deployment)
 | 
						events, err := c.Legacy().Events(ns).Search(deployment)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in listing events: %s", err)
 | 
							Logf("error in listing events: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -378,7 +391,8 @@ func testRecreateDeployment(f *Framework) {
 | 
				
			|||||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
 | 
					// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
 | 
				
			||||||
func testDeploymentCleanUpPolicy(f *Framework) {
 | 
					func testDeploymentCleanUpPolicy(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	// Create nginx pods.
 | 
						// Create nginx pods.
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
 | 
						deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
 | 
				
			||||||
	rcPodLabels := map[string]string{
 | 
						rcPodLabels := map[string]string{
 | 
				
			||||||
@@ -389,11 +403,11 @@ func testDeploymentCleanUpPolicy(f *Framework) {
 | 
				
			|||||||
	replicas := 1
 | 
						replicas := 1
 | 
				
			||||||
	revisionHistoryLimit := new(int)
 | 
						revisionHistoryLimit := new(int)
 | 
				
			||||||
	*revisionHistoryLimit = 0
 | 
						*revisionHistoryLimit = 0
 | 
				
			||||||
	_, err := c.ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
 | 
						_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx"))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "cleanup-pod", false, 1)
 | 
						err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -402,17 +416,17 @@ func testDeploymentCleanUpPolicy(f *Framework) {
 | 
				
			|||||||
	// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
						// Create a deployment to delete nginx pods and instead bring up redis pods.
 | 
				
			||||||
	deploymentName := "redis-deployment"
 | 
						deploymentName := "redis-deployment"
 | 
				
			||||||
	Logf("Creating deployment %s", deploymentName)
 | 
						Logf("Creating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
 | 
						_, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit)
 | 
						err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit)
 | 
				
			||||||
@@ -423,7 +437,10 @@ func testDeploymentCleanUpPolicy(f *Framework) {
 | 
				
			|||||||
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
 | 
					// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
 | 
				
			||||||
func testRolloverDeployment(f *Framework) {
 | 
					func testRolloverDeployment(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						// TODO: remove unversionedClient when the refactoring is done. Currently some
 | 
				
			||||||
 | 
						// functions like verifyPod still expects a unversioned#Client.
 | 
				
			||||||
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	podName := "rollover-pod"
 | 
						podName := "rollover-pod"
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": podName}
 | 
						deploymentPodLabels := map[string]string{"name": podName}
 | 
				
			||||||
	rcPodLabels := map[string]string{
 | 
						rcPodLabels := map[string]string{
 | 
				
			||||||
@@ -433,14 +450,14 @@ func testRolloverDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	rcName := "nginx-controller"
 | 
						rcName := "nginx-controller"
 | 
				
			||||||
	rcReplicas := 4
 | 
						rcReplicas := 4
 | 
				
			||||||
	_, err := c.ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx"))
 | 
						_, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx"))
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		Logf("deleting replication controller %s", rcName)
 | 
							Logf("deleting replication controller %s", rcName)
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(rcName)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, podName, false, rcReplicas)
 | 
						err = verifyPods(unversionedClient, ns, podName, false, rcReplicas)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
@@ -459,23 +476,23 @@ func testRolloverDeployment(f *Framework) {
 | 
				
			|||||||
		MaxSurge:        intstr.FromInt(1),
 | 
							MaxSurge:        intstr.FromInt(1),
 | 
				
			||||||
		MinReadySeconds: deploymentMinReadySeconds,
 | 
							MinReadySeconds: deploymentMinReadySeconds,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	_, err = c.Deployments(ns).Create(newDeployment)
 | 
						_, err = c.Extensions().Deployments(ns).Create(newDeployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Verify that the pods were scaled up and down as expected. We use events to verify that.
 | 
						// Verify that the pods were scaled up and down as expected. We use events to verify that.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// Make sure the deployment starts to scale up and down RCs
 | 
						// Make sure the deployment starts to scale up and down RCs
 | 
				
			||||||
	waitForPartialEvents(c, ns, deployment, 2)
 | 
						waitForPartialEvents(unversionedClient, ns, deployment, 2)
 | 
				
			||||||
	// Check if it's updated to revision 1 correctly
 | 
						// Check if it's updated to revision 1 correctly
 | 
				
			||||||
	_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
 | 
						_, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -486,7 +503,7 @@ func testRolloverDeployment(f *Framework) {
 | 
				
			|||||||
	newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage
 | 
						newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage
 | 
				
			||||||
	newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
 | 
						newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
 | 
				
			||||||
	Logf("updating deployment %s", deploymentName)
 | 
						Logf("updating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Update(newDeployment)
 | 
						_, err = c.Extensions().Deployments(ns).Update(newDeployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds)
 | 
				
			||||||
@@ -498,22 +515,25 @@ func testRolloverDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func testPausedDeployment(f *Framework) {
 | 
					func testPausedDeployment(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						// TODO: remove unversionedClient when the refactoring is done. Currently some
 | 
				
			||||||
 | 
						// functions like verifyPod still expects a unversioned#Client.
 | 
				
			||||||
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	deploymentName := "nginx"
 | 
						deploymentName := "nginx"
 | 
				
			||||||
	podLabels := map[string]string{"name": "nginx"}
 | 
						podLabels := map[string]string{"name": "nginx"}
 | 
				
			||||||
	d := newDeployment(deploymentName, 1, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
 | 
						d := newDeployment(deploymentName, 1, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil)
 | 
				
			||||||
	d.Spec.Paused = true
 | 
						d.Spec.Paused = true
 | 
				
			||||||
	Logf("Creating paused deployment %s", deploymentName)
 | 
						Logf("Creating paused deployment %s", deploymentName)
 | 
				
			||||||
	_, err := c.Deployments(ns).Create(d)
 | 
						_, err := c.Extensions().Deployments(ns).Create(d)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		_, err := c.Deployments(ns).Get(deploymentName)
 | 
							_, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Check that deployment is created fine.
 | 
						// Check that deployment is created fine.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that there is no latest state realized for the new deployment.
 | 
						// Verify that there is no latest state realized for the new deployment.
 | 
				
			||||||
@@ -526,11 +546,11 @@ func testPausedDeployment(f *Framework) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Update the deployment to run
 | 
						// Update the deployment to run
 | 
				
			||||||
	deployment.Spec.Paused = false
 | 
						deployment.Spec.Paused = false
 | 
				
			||||||
	deployment, err = c.Deployments(ns).Update(deployment)
 | 
						deployment, err = c.Extensions().Deployments(ns).Update(deployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	opts := api.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()}
 | 
						opts := api.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()}
 | 
				
			||||||
	w, err := c.ReplicationControllers(ns).Watch(opts)
 | 
						w, err := c.Legacy().ReplicationControllers(ns).Watch(opts)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
@@ -545,14 +565,14 @@ func testPausedDeployment(f *Framework) {
 | 
				
			|||||||
	// The paused deployment shouldn't recreate a new one.
 | 
						// The paused deployment shouldn't recreate a new one.
 | 
				
			||||||
	deployment.Spec.Paused = true
 | 
						deployment.Spec.Paused = true
 | 
				
			||||||
	deployment.ResourceVersion = ""
 | 
						deployment.ResourceVersion = ""
 | 
				
			||||||
	deployment, err = c.Deployments(ns).Update(deployment)
 | 
						deployment, err = c.Extensions().Deployments(ns).Update(deployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
						newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
						Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	deployment, err = c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !deployment.Spec.Paused {
 | 
						if !deployment.Spec.Paused {
 | 
				
			||||||
@@ -572,7 +592,8 @@ func testPausedDeployment(f *Framework) {
 | 
				
			|||||||
// and then rollback to last revision.
 | 
					// and then rollback to last revision.
 | 
				
			||||||
func testRollbackDeployment(f *Framework) {
 | 
					func testRollbackDeployment(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(unversionedClient)
 | 
				
			||||||
	podName := "nginx"
 | 
						podName := "nginx"
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": podName}
 | 
						deploymentPodLabels := map[string]string{"name": podName}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -583,34 +604,34 @@ func testRollbackDeployment(f *Framework) {
 | 
				
			|||||||
	deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
 | 
						deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
 | 
				
			||||||
	Logf("Creating deployment %s", deploymentName)
 | 
						Logf("Creating deployment %s", deploymentName)
 | 
				
			||||||
	d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
 | 
						d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
 | 
				
			||||||
	_, err := c.Deployments(ns).Create(d)
 | 
						_, err := c.Extensions().Deployments(ns).Create(d)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
 | 
							oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		for _, oldRC := range oldRCs {
 | 
							for _, oldRC := range oldRCs {
 | 
				
			||||||
			Expect(c.ReplicationControllers(ns).Delete(oldRC.Name)).NotTo(HaveOccurred())
 | 
								Expect(c.Legacy().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Check that deployment is created fine.
 | 
						// Check that deployment is created fine.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "nginx", false, deploymentReplicas)
 | 
						err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	deployment, err = c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// DeploymentStatus should be appropriately updated.
 | 
						// DeploymentStatus should be appropriately updated.
 | 
				
			||||||
	Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
 | 
						Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
 | 
				
			||||||
@@ -625,7 +646,7 @@ func testRollbackDeployment(f *Framework) {
 | 
				
			|||||||
	d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
 | 
						d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
 | 
				
			||||||
	d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
 | 
						d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
 | 
				
			||||||
	Logf("updating deployment %s", deploymentName)
 | 
						Logf("updating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Update(d)
 | 
						_, err = c.Extensions().Deployments(ns).Update(d)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
				
			||||||
@@ -638,7 +659,7 @@ func testRollbackDeployment(f *Framework) {
 | 
				
			|||||||
	revision := int64(1)
 | 
						revision := int64(1)
 | 
				
			||||||
	Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
						Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
				
			||||||
	rollback := newDeploymentRollback(deploymentName, nil, revision)
 | 
						rollback := newDeploymentRollback(deploymentName, nil, revision)
 | 
				
			||||||
	err = c.Deployments(ns).Rollback(rollback)
 | 
						err = c.Extensions().Deployments(ns).Rollback(rollback)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
				
			||||||
@@ -651,7 +672,7 @@ func testRollbackDeployment(f *Framework) {
 | 
				
			|||||||
	revision = 0
 | 
						revision = 0
 | 
				
			||||||
	Logf("rolling back deployment %s to last revision", deploymentName)
 | 
						Logf("rolling back deployment %s to last revision", deploymentName)
 | 
				
			||||||
	rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
						rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
				
			||||||
	err = c.Deployments(ns).Rollback(rollback)
 | 
						err = c.Extensions().Deployments(ns).Rollback(rollback)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
				
			||||||
@@ -669,7 +690,8 @@ func testRollbackDeployment(f *Framework) {
 | 
				
			|||||||
// Finally, rollback the deployment (v3) to v3 should be no-op and emit related event.
 | 
					// Finally, rollback the deployment (v3) to v3 should be no-op and emit related event.
 | 
				
			||||||
func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
					func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			||||||
	ns := f.Namespace.Name
 | 
						ns := f.Namespace.Name
 | 
				
			||||||
	c := f.Client
 | 
						unversionedClient := f.Client
 | 
				
			||||||
 | 
						c := clientset.FromUnversionedClient(f.Client)
 | 
				
			||||||
	podName := "nginx"
 | 
						podName := "nginx"
 | 
				
			||||||
	deploymentPodLabels := map[string]string{"name": podName}
 | 
						deploymentPodLabels := map[string]string{"name": podName}
 | 
				
			||||||
	rcPodLabels := map[string]string{
 | 
						rcPodLabels := map[string]string{
 | 
				
			||||||
@@ -682,11 +704,11 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	rc := newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx")
 | 
						rc := newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx")
 | 
				
			||||||
	rc.Annotations = make(map[string]string)
 | 
						rc.Annotations = make(map[string]string)
 | 
				
			||||||
	rc.Annotations["make"] = "difference"
 | 
						rc.Annotations["make"] = "difference"
 | 
				
			||||||
	_, err := c.ReplicationControllers(ns).Create(rc)
 | 
						_, err := c.Legacy().ReplicationControllers(ns).Create(rc)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		Logf("deleting replication controller %s", rcName)
 | 
							Logf("deleting replication controller %s", rcName)
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(rcName)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create a deployment to create nginx pods, which have different template than the rc created above.
 | 
						// Create a deployment to create nginx pods, which have different template than the rc created above.
 | 
				
			||||||
@@ -696,34 +718,34 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
 | 
						deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
 | 
				
			||||||
	Logf("Creating deployment %s", deploymentName)
 | 
						Logf("Creating deployment %s", deploymentName)
 | 
				
			||||||
	d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
 | 
						d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
 | 
				
			||||||
	_, err = c.Deployments(ns).Create(d)
 | 
						_, err = c.Extensions().Deployments(ns).Create(d)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Logf("deleting deployment %s", deploymentName)
 | 
							Logf("deleting deployment %s", deploymentName)
 | 
				
			||||||
		Expect(c.Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
							Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		// TODO: remove this once we can delete rcs with deployment
 | 
							// TODO: remove this once we can delete rcs with deployment
 | 
				
			||||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
							newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		Expect(c.ReplicationControllers(ns).Delete(newRC.Name)).NotTo(HaveOccurred())
 | 
							Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
 | 
							oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
		for _, oldRC := range oldRCs {
 | 
							for _, oldRC := range oldRCs {
 | 
				
			||||||
			Expect(c.ReplicationControllers(ns).Delete(oldRC.Name)).NotTo(HaveOccurred())
 | 
								Expect(c.Legacy().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred())
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	// Check that deployment is created fine.
 | 
						// Check that deployment is created fine.
 | 
				
			||||||
	deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Verify that the required pods have come up.
 | 
						// Verify that the required pods have come up.
 | 
				
			||||||
	err = verifyPods(c, ns, "nginx", false, deploymentReplicas)
 | 
						err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		Logf("error in waiting for pods to come up: %s", err)
 | 
							Logf("error in waiting for pods to come up: %s", err)
 | 
				
			||||||
		Expect(err).NotTo(HaveOccurred())
 | 
							Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	deployment, err = c.Deployments(ns).Get(deploymentName)
 | 
						deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	// DeploymentStatus should be appropriately updated.
 | 
						// DeploymentStatus should be appropriately updated.
 | 
				
			||||||
	Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
 | 
						Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas))
 | 
				
			||||||
@@ -733,7 +755,7 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
 | 
						checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Check that the rc we created still doesn't contain revision information
 | 
						// Check that the rc we created still doesn't contain revision information
 | 
				
			||||||
	rc, err = c.ReplicationControllers(ns).Get(rcName)
 | 
						rc, err = c.Legacy().ReplicationControllers(ns).Get(rcName)
 | 
				
			||||||
	Expect(rc.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
 | 
						Expect(rc.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(""))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Update the deploymentRollback to rollback to last revision
 | 
						// Update the deploymentRollback to rollback to last revision
 | 
				
			||||||
@@ -741,11 +763,11 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	revision := int64(0)
 | 
						revision := int64(0)
 | 
				
			||||||
	Logf("rolling back deployment %s to last revision", deploymentName)
 | 
						Logf("rolling back deployment %s to last revision", deploymentName)
 | 
				
			||||||
	rollback := newDeploymentRollback(deploymentName, nil, revision)
 | 
						rollback := newDeploymentRollback(deploymentName, nil, revision)
 | 
				
			||||||
	err = c.Deployments(ns).Rollback(rollback)
 | 
						err = c.Extensions().Deployments(ns).Rollback(rollback)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// There should be revision not found event since there's no last revision
 | 
						// There should be revision not found event since there's no last revision
 | 
				
			||||||
	waitForEvents(c, ns, deployment, 2)
 | 
						waitForEvents(unversionedClient, ns, deployment, 2)
 | 
				
			||||||
	events, err := c.Events(ns).Search(deployment)
 | 
						events, err := c.Events(ns).Search(deployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	Expect(events.Items[1].Reason).Should(Equal("DeploymentRollbackRevisionNotFound"))
 | 
						Expect(events.Items[1].Reason).Should(Equal("DeploymentRollbackRevisionNotFound"))
 | 
				
			||||||
@@ -759,7 +781,7 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
 | 
						d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
 | 
				
			||||||
	d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
 | 
						d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
 | 
				
			||||||
	Logf("updating deployment %s", deploymentName)
 | 
						Logf("updating deployment %s", deploymentName)
 | 
				
			||||||
	_, err = c.Deployments(ns).Update(d)
 | 
						_, err = c.Extensions().Deployments(ns).Update(d)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
				
			||||||
@@ -772,14 +794,14 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	revision = 1
 | 
						revision = 1
 | 
				
			||||||
	Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
						Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
				
			||||||
	rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
						rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
				
			||||||
	err = c.Deployments(ns).Rollback(rollback)
 | 
						err = c.Extensions().Deployments(ns).Rollback(rollback)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
						err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// There should be rollback event after we rollback to revision 1
 | 
						// There should be rollback event after we rollback to revision 1
 | 
				
			||||||
	waitForEvents(c, ns, deployment, 5)
 | 
						waitForEvents(unversionedClient, ns, deployment, 5)
 | 
				
			||||||
	events, err = c.Events(ns).Search(deployment)
 | 
						events, err = c.Events(ns).Search(deployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	Expect(events.Items[4].Reason).Should(Equal("DeploymentRollback"))
 | 
						Expect(events.Items[4].Reason).Should(Equal("DeploymentRollback"))
 | 
				
			||||||
@@ -792,11 +814,11 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	revision = 10
 | 
						revision = 10
 | 
				
			||||||
	Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
						Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
				
			||||||
	rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
						rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
				
			||||||
	err = c.Deployments(ns).Rollback(rollback)
 | 
						err = c.Extensions().Deployments(ns).Rollback(rollback)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// There should be revision not found event since there's no revision 10
 | 
						// There should be revision not found event since there's no revision 10
 | 
				
			||||||
	waitForEvents(c, ns, deployment, 7)
 | 
						waitForEvents(unversionedClient, ns, deployment, 7)
 | 
				
			||||||
	events, err = c.Events(ns).Search(deployment)
 | 
						events, err = c.Events(ns).Search(deployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	Expect(events.Items[6].Reason).Should(Equal("DeploymentRollbackRevisionNotFound"))
 | 
						Expect(events.Items[6].Reason).Should(Equal("DeploymentRollbackRevisionNotFound"))
 | 
				
			||||||
@@ -809,11 +831,11 @@ func testRollbackDeploymentRCNoRevision(f *Framework) {
 | 
				
			|||||||
	revision = 3
 | 
						revision = 3
 | 
				
			||||||
	Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
						Logf("rolling back deployment %s to revision %d", deploymentName, revision)
 | 
				
			||||||
	rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
						rollback = newDeploymentRollback(deploymentName, nil, revision)
 | 
				
			||||||
	err = c.Deployments(ns).Rollback(rollback)
 | 
						err = c.Extensions().Deployments(ns).Rollback(rollback)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// There should be revision template unchanged event since it's already revision 3
 | 
						// There should be revision template unchanged event since it's already revision 3
 | 
				
			||||||
	waitForEvents(c, ns, deployment, 8)
 | 
						waitForEvents(unversionedClient, ns, deployment, 8)
 | 
				
			||||||
	events, err = c.Events(ns).Search(deployment)
 | 
						events, err = c.Events(ns).Search(deployment)
 | 
				
			||||||
	Expect(err).NotTo(HaveOccurred())
 | 
						Expect(err).NotTo(HaveOccurred())
 | 
				
			||||||
	Expect(events.Items[7].Reason).Should(Equal("DeploymentRollbackTemplateUnchanged"))
 | 
						Expect(events.Items[7].Reason).Should(Equal("DeploymentRollbackTemplateUnchanged"))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -40,6 +40,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/apimachinery/registered"
 | 
						"k8s.io/kubernetes/pkg/apimachinery/registered"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/cache"
 | 
						"k8s.io/kubernetes/pkg/client/cache"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
 | 
						"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
 | 
				
			||||||
	clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
 | 
						clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
 | 
				
			||||||
@@ -1965,10 +1966,10 @@ func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Waits for the deployment to reach desired state.
 | 
					// Waits for the deployment to reach desired state.
 | 
				
			||||||
// Returns an error if minAvailable or maxCreated is broken at any times.
 | 
					// Returns an error if minAvailable or maxCreated is broken at any times.
 | 
				
			||||||
func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error {
 | 
					func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error {
 | 
				
			||||||
	return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
 | 
						return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return false, err
 | 
								return false, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -2017,10 +2018,10 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Waits for the deployment to clean up old rcs.
 | 
					// Waits for the deployment to clean up old rcs.
 | 
				
			||||||
func waitForDeploymentOldRCsNum(c *client.Client, ns, deploymentName string, desiredRCNum int) error {
 | 
					func waitForDeploymentOldRCsNum(c *clientset.Clientset, ns, deploymentName string, desiredRCNum int) error {
 | 
				
			||||||
	return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
 | 
						return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		deployment, err := c.Deployments(ns).Get(deploymentName)
 | 
							deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return false, err
 | 
								return false, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,6 +31,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
						"k8s.io/kubernetes/pkg/apis/extensions"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/apiserver"
 | 
						"k8s.io/kubernetes/pkg/apiserver"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
						"k8s.io/kubernetes/pkg/client/record"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/controller"
 | 
						"k8s.io/kubernetes/pkg/controller"
 | 
				
			||||||
@@ -100,9 +101,11 @@ func NewMasterComponents(c *Config) *MasterComponents {
 | 
				
			|||||||
	if c.DeleteEtcdKeys {
 | 
						if c.DeleteEtcdKeys {
 | 
				
			||||||
		DeleteAllEtcdKeys()
 | 
							DeleteAllEtcdKeys()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
 | 
				
			||||||
	restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
 | 
						restClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
 | 
				
			||||||
 | 
						clientset := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
 | 
				
			||||||
	rcStopCh := make(chan struct{})
 | 
						rcStopCh := make(chan struct{})
 | 
				
			||||||
	controllerManager := replicationcontroller.NewReplicationManager(restClient, controller.NoResyncPeriodFunc, c.Burst)
 | 
						controllerManager := replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, c.Burst)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// TODO: Support events once we can cleanly shutdown an event recorder.
 | 
						// TODO: Support events once we can cleanly shutdown an event recorder.
 | 
				
			||||||
	controllerManager.SetEventRecorder(&record.FakeRecorder{})
 | 
						controllerManager.SetEventRecorder(&record.FakeRecorder{})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,6 +27,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/api"
 | 
						"k8s.io/kubernetes/pkg/api"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
						"k8s.io/kubernetes/pkg/api/resource"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
						"k8s.io/kubernetes/pkg/api/testapi"
 | 
				
			||||||
 | 
						clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
 | 
				
			||||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
						client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
				
			||||||
	fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
						fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
				
			||||||
	persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/persistentvolume"
 | 
						persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/persistentvolume"
 | 
				
			||||||
@@ -45,9 +46,9 @@ func TestPersistentVolumeRecycler(t *testing.T) {
 | 
				
			|||||||
	defer s.Close()
 | 
						defer s.Close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	deleteAllEtcdKeys()
 | 
						deleteAllEtcdKeys()
 | 
				
			||||||
	binderClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						binderClient := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						recyclerClient := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	testClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
						testClient := clientset.NewForConfigOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
 | 
				
			||||||
	host := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)
 | 
						host := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	plugins := []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}}}
 | 
						plugins := []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}}}
 | 
				
			||||||
@@ -90,7 +91,7 @@ func TestPersistentVolumeRecycler(t *testing.T) {
 | 
				
			|||||||
	waitForPersistentVolumePhase(w, api.VolumeBound)
 | 
						waitForPersistentVolumePhase(w, api.VolumeBound)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// deleting a claim releases the volume, after which it can be recycled
 | 
						// deleting a claim releases the volume, after which it can be recycled
 | 
				
			||||||
	if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
 | 
						if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil {
 | 
				
			||||||
		t.Errorf("error deleting claim %s", pvc.Name)
 | 
							t.Errorf("error deleting claim %s", pvc.Name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -115,7 +116,7 @@ func TestPersistentVolumeRecycler(t *testing.T) {
 | 
				
			|||||||
	waitForPersistentVolumePhase(w, api.VolumeBound)
 | 
						waitForPersistentVolumePhase(w, api.VolumeBound)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// deleting a claim releases the volume, after which it can be recycled
 | 
						// deleting a claim releases the volume, after which it can be recycled
 | 
				
			||||||
	if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
 | 
						if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil {
 | 
				
			||||||
		t.Errorf("error deleting claim %s", pvc.Name)
 | 
							t.Errorf("error deleting claim %s", pvc.Name)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user