mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #15885 from ashcrow/fix-import-package-names-15319
Auto commit by PR queue bot
This commit is contained in:
		@@ -33,7 +33,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/contrib/mesos/pkg/executor/messages"
 | 
			
		||||
	"k8s.io/kubernetes/contrib/mesos/pkg/node"
 | 
			
		||||
	"k8s.io/kubernetes/contrib/mesos/pkg/offers"
 | 
			
		||||
	offerMetrics "k8s.io/kubernetes/contrib/mesos/pkg/offers/metrics"
 | 
			
		||||
	offermetrics "k8s.io/kubernetes/contrib/mesos/pkg/offers/metrics"
 | 
			
		||||
	"k8s.io/kubernetes/contrib/mesos/pkg/proc"
 | 
			
		||||
	"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
 | 
			
		||||
	schedcfg "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/config"
 | 
			
		||||
@@ -351,7 +351,7 @@ func (k *KubernetesScheduler) OfferRescinded(driver bindings.SchedulerDriver, of
 | 
			
		||||
	log.Infof("Offer rescinded %v\n", offerId)
 | 
			
		||||
 | 
			
		||||
	oid := offerId.GetValue()
 | 
			
		||||
	k.offers.Delete(oid, offerMetrics.OfferRescinded)
 | 
			
		||||
	k.offers.Delete(oid, offermetrics.OfferRescinded)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// StatusUpdate is called when a status update message is sent to the scheduler.
 | 
			
		||||
 
 | 
			
		||||
@@ -30,7 +30,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/validation"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
			
		||||
	expValidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
 | 
			
		||||
	expvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/capabilities"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/yaml"
 | 
			
		||||
@@ -105,17 +105,17 @@ func validateObject(obj runtime.Object) (errors []error) {
 | 
			
		||||
		if t.Namespace == "" {
 | 
			
		||||
			t.Namespace = api.NamespaceDefault
 | 
			
		||||
		}
 | 
			
		||||
		errors = expValidation.ValidateDeployment(t)
 | 
			
		||||
		errors = expvalidation.ValidateDeployment(t)
 | 
			
		||||
	case *extensions.Job:
 | 
			
		||||
		if t.Namespace == "" {
 | 
			
		||||
			t.Namespace = api.NamespaceDefault
 | 
			
		||||
		}
 | 
			
		||||
		errors = expValidation.ValidateJob(t)
 | 
			
		||||
		errors = expvalidation.ValidateJob(t)
 | 
			
		||||
	case *extensions.DaemonSet:
 | 
			
		||||
		if t.Namespace == "" {
 | 
			
		||||
			t.Namespace = api.NamespaceDefault
 | 
			
		||||
		}
 | 
			
		||||
		errors = expValidation.ValidateDaemonSet(t)
 | 
			
		||||
		errors = expvalidation.ValidateDaemonSet(t)
 | 
			
		||||
	default:
 | 
			
		||||
		return []error{fmt.Errorf("no validation defined for %#v", obj)}
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,7 @@ import (
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
			
		||||
	unversioned_api "k8s.io/kubernetes/pkg/api/unversioned"
 | 
			
		||||
	unversionedapi "k8s.io/kubernetes/pkg/api/unversioned"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/unversioned"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/unversioned/fake"
 | 
			
		||||
)
 | 
			
		||||
@@ -90,10 +90,10 @@ func TestNegotiateVersion(t *testing.T) {
 | 
			
		||||
			Codec: codec,
 | 
			
		||||
			Resp: &http.Response{
 | 
			
		||||
				StatusCode: 200,
 | 
			
		||||
				Body:       objBody(&unversioned_api.APIVersions{Versions: test.serverVersions}),
 | 
			
		||||
				Body:       objBody(&unversionedapi.APIVersions{Versions: test.serverVersions}),
 | 
			
		||||
			},
 | 
			
		||||
			Client: fake.HTTPClientFunc(func(req *http.Request) (*http.Response, error) {
 | 
			
		||||
				return &http.Response{StatusCode: 200, Body: objBody(&unversioned_api.APIVersions{Versions: test.serverVersions})}, nil
 | 
			
		||||
				return &http.Response{StatusCode: 200, Body: objBody(&unversionedapi.APIVersions{Versions: test.serverVersions})}, nil
 | 
			
		||||
			}),
 | 
			
		||||
		}
 | 
			
		||||
		c := unversioned.NewOrDie(test.config)
 | 
			
		||||
 
 | 
			
		||||
@@ -18,7 +18,7 @@ package testclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
			
		||||
	kClientLib "k8s.io/kubernetes/pkg/client/unversioned"
 | 
			
		||||
	kclientlib "k8s.io/kubernetes/pkg/client/unversioned"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/fields"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/labels"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/watch"
 | 
			
		||||
@@ -32,7 +32,7 @@ type FakeDaemonSets struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Ensure statically that FakeDaemonSets implements DaemonInterface.
 | 
			
		||||
var _ kClientLib.DaemonSetInterface = &FakeDaemonSets{}
 | 
			
		||||
var _ kclientlib.DaemonSetInterface = &FakeDaemonSets{}
 | 
			
		||||
 | 
			
		||||
func (c *FakeDaemonSets) Get(name string) (*extensions.DaemonSet, error) {
 | 
			
		||||
	obj, err := c.Fake.Invokes(NewGetAction("daemonsets", c.Namespace, name), &extensions.DaemonSet{})
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,7 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/rackspace/gophercloud"
 | 
			
		||||
	os_servers "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
 | 
			
		||||
	osservers "github.com/rackspace/gophercloud/openstack/compute/v2/servers"
 | 
			
		||||
	"github.com/rackspace/gophercloud/pagination"
 | 
			
		||||
	"github.com/rackspace/gophercloud/rackspace"
 | 
			
		||||
	"github.com/rackspace/gophercloud/rackspace/compute/v2/servers"
 | 
			
		||||
@@ -161,7 +161,7 @@ func (os *Rackspace) Instances() (cloudprovider.Instances, bool) {
 | 
			
		||||
func (i *Instances) List(name_filter string) ([]string, error) {
 | 
			
		||||
	glog.V(2).Infof("rackspace List(%v) called", name_filter)
 | 
			
		||||
 | 
			
		||||
	opts := os_servers.ListOpts{
 | 
			
		||||
	opts := osservers.ListOpts{
 | 
			
		||||
		Name:   name_filter,
 | 
			
		||||
		Status: "ACTIVE",
 | 
			
		||||
	}
 | 
			
		||||
@@ -187,7 +187,7 @@ func (i *Instances) List(name_filter string) ([]string, error) {
 | 
			
		||||
	return ret, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func serverHasAddress(srv os_servers.Server, ip string) bool {
 | 
			
		||||
func serverHasAddress(srv osservers.Server, ip string) bool {
 | 
			
		||||
	if ip == firstAddr(srv.Addresses["private"]) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
@@ -203,10 +203,10 @@ func serverHasAddress(srv os_servers.Server, ip string) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getServerByAddress(client *gophercloud.ServiceClient, name string) (*os_servers.Server, error) {
 | 
			
		||||
func getServerByAddress(client *gophercloud.ServiceClient, name string) (*osservers.Server, error) {
 | 
			
		||||
	pager := servers.List(client, nil)
 | 
			
		||||
 | 
			
		||||
	serverList := make([]os_servers.Server, 0, 1)
 | 
			
		||||
	serverList := make([]osservers.Server, 0, 1)
 | 
			
		||||
 | 
			
		||||
	err := pager.EachPage(func(page pagination.Page) (bool, error) {
 | 
			
		||||
		s, err := servers.ExtractServers(page)
 | 
			
		||||
@@ -236,19 +236,19 @@ func getServerByAddress(client *gophercloud.ServiceClient, name string) (*os_ser
 | 
			
		||||
	return &serverList[0], nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getServerByName(client *gophercloud.ServiceClient, name string) (*os_servers.Server, error) {
 | 
			
		||||
func getServerByName(client *gophercloud.ServiceClient, name string) (*osservers.Server, error) {
 | 
			
		||||
	if net.ParseIP(name) != nil {
 | 
			
		||||
		// we're an IP, so we'll have to walk the full list of servers to
 | 
			
		||||
		// figure out which one we are.
 | 
			
		||||
		return getServerByAddress(client, name)
 | 
			
		||||
	}
 | 
			
		||||
	opts := os_servers.ListOpts{
 | 
			
		||||
	opts := osservers.ListOpts{
 | 
			
		||||
		Name:   fmt.Sprintf("^%s$", regexp.QuoteMeta(name)),
 | 
			
		||||
		Status: "ACTIVE",
 | 
			
		||||
	}
 | 
			
		||||
	pager := servers.List(client, opts)
 | 
			
		||||
 | 
			
		||||
	serverList := make([]os_servers.Server, 0, 1)
 | 
			
		||||
	serverList := make([]osservers.Server, 0, 1)
 | 
			
		||||
 | 
			
		||||
	err := pager.EachPage(func(page pagination.Page) (bool, error) {
 | 
			
		||||
		s, err := servers.ExtractServers(page)
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/fields"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/labels"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
	deploymentUtil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
	deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type DeploymentController struct {
 | 
			
		||||
@@ -125,23 +125,23 @@ func (d *DeploymentController) reconcileRollingUpdateDeployment(deployment exten
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DeploymentController) getOldRCs(deployment extensions.Deployment) ([]*api.ReplicationController, error) {
 | 
			
		||||
	return deploymentUtil.GetOldRCs(deployment, d.client)
 | 
			
		||||
	return deploymentutil.GetOldRCs(deployment, d.client)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Returns an RC that matches the intent of the given deployment.
 | 
			
		||||
// It creates a new RC if required.
 | 
			
		||||
func (d *DeploymentController) getNewRC(deployment extensions.Deployment) (*api.ReplicationController, error) {
 | 
			
		||||
	existingNewRC, err := deploymentUtil.GetNewRC(deployment, d.client)
 | 
			
		||||
	existingNewRC, err := deploymentutil.GetNewRC(deployment, d.client)
 | 
			
		||||
	if err != nil || existingNewRC != nil {
 | 
			
		||||
		return existingNewRC, err
 | 
			
		||||
	}
 | 
			
		||||
	// new RC does not exist, create one.
 | 
			
		||||
	namespace := deployment.ObjectMeta.Namespace
 | 
			
		||||
	podTemplateSpecHash := deploymentUtil.GetPodTemplateSpecHash(deployment.Spec.Template)
 | 
			
		||||
	podTemplateSpecHash := deploymentutil.GetPodTemplateSpecHash(deployment.Spec.Template)
 | 
			
		||||
	rcName := fmt.Sprintf("deploymentrc-%d", podTemplateSpecHash)
 | 
			
		||||
	newRCTemplate := deploymentUtil.GetNewRCTemplate(deployment)
 | 
			
		||||
	newRCTemplate := deploymentutil.GetNewRCTemplate(deployment)
 | 
			
		||||
	// Add podTemplateHash label to selector.
 | 
			
		||||
	newRCSelector := deploymentUtil.CloneAndAddLabel(deployment.Spec.Selector, deployment.Spec.UniqueLabelKey, podTemplateSpecHash)
 | 
			
		||||
	newRCSelector := deploymentutil.CloneAndAddLabel(deployment.Spec.Selector, deployment.Spec.UniqueLabelKey, podTemplateSpecHash)
 | 
			
		||||
 | 
			
		||||
	newRC := api.ReplicationController{
 | 
			
		||||
		ObjectMeta: api.ObjectMeta{
 | 
			
		||||
@@ -180,7 +180,7 @@ func (d *DeploymentController) reconcileNewRC(allRCs []*api.ReplicationControlle
 | 
			
		||||
		maxSurge = util.GetValueFromPercent(maxSurge, deployment.Spec.Replicas)
 | 
			
		||||
	}
 | 
			
		||||
	// Find the total number of pods
 | 
			
		||||
	currentPodCount := deploymentUtil.GetReplicaCountForRCs(allRCs)
 | 
			
		||||
	currentPodCount := deploymentutil.GetReplicaCountForRCs(allRCs)
 | 
			
		||||
	maxTotalPods := deployment.Spec.Replicas + maxSurge
 | 
			
		||||
	if currentPodCount >= maxTotalPods {
 | 
			
		||||
		// Cannot scale up.
 | 
			
		||||
@@ -196,7 +196,7 @@ func (d *DeploymentController) reconcileNewRC(allRCs []*api.ReplicationControlle
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) (bool, error) {
 | 
			
		||||
	oldPodsCount := deploymentUtil.GetReplicaCountForRCs(oldRCs)
 | 
			
		||||
	oldPodsCount := deploymentutil.GetReplicaCountForRCs(oldRCs)
 | 
			
		||||
	if oldPodsCount == 0 {
 | 
			
		||||
		// Cant scale down further
 | 
			
		||||
		return false, nil
 | 
			
		||||
@@ -211,7 +211,7 @@ func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationControll
 | 
			
		||||
	// Check if we can scale down.
 | 
			
		||||
	minAvailable := deployment.Spec.Replicas - maxUnavailable
 | 
			
		||||
	// Find the number of ready pods.
 | 
			
		||||
	readyPodCount, err := deploymentUtil.GetAvailablePodsForRCs(d.client, allRCs)
 | 
			
		||||
	readyPodCount, err := deploymentutil.GetAvailablePodsForRCs(d.client, allRCs)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, fmt.Errorf("could not find available pods: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -243,8 +243,8 @@ func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationControll
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (d *DeploymentController) updateDeploymentStatus(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) error {
 | 
			
		||||
	totalReplicas := deploymentUtil.GetReplicaCountForRCs(allRCs)
 | 
			
		||||
	updatedReplicas := deploymentUtil.GetReplicaCountForRCs([]*api.ReplicationController{newRC})
 | 
			
		||||
	totalReplicas := deploymentutil.GetReplicaCountForRCs(allRCs)
 | 
			
		||||
	updatedReplicas := deploymentutil.GetReplicaCountForRCs([]*api.ReplicationController{newRC})
 | 
			
		||||
	newDeployment := deployment
 | 
			
		||||
	// TODO: Reconcile this with API definition. API definition talks about ready pods, while this just computes created pods.
 | 
			
		||||
	newDeployment.Status = extensions.DeploymentStatus{
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
			
		||||
	fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
			
		||||
	fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestIsResponsibleForRoute(t *testing.T) {
 | 
			
		||||
@@ -147,16 +147,16 @@ func TestReconcile(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for i, testCase := range testCases {
 | 
			
		||||
		cloud := &fake_cloud.FakeCloud{RouteMap: make(map[string]*fake_cloud.FakeRoute)}
 | 
			
		||||
		cloud := &fakecloud.FakeCloud{RouteMap: make(map[string]*fakecloud.FakeRoute)}
 | 
			
		||||
		for _, route := range testCase.initialRoutes {
 | 
			
		||||
			fakeRoute := &fake_cloud.FakeRoute{}
 | 
			
		||||
			fakeRoute := &fakecloud.FakeRoute{}
 | 
			
		||||
			fakeRoute.ClusterName = cluster
 | 
			
		||||
			fakeRoute.Route = *route
 | 
			
		||||
			cloud.RouteMap[route.Name] = fakeRoute
 | 
			
		||||
		}
 | 
			
		||||
		routes, ok := cloud.Routes()
 | 
			
		||||
		if !ok {
 | 
			
		||||
			t.Error("Error in test: fake_cloud doesn't support Routes()")
 | 
			
		||||
			t.Error("Error in test: fakecloud doesn't support Routes()")
 | 
			
		||||
		}
 | 
			
		||||
		_, cidr, _ := net.ParseCIDR("10.120.0.0/16")
 | 
			
		||||
		rc := New(routes, nil, cluster, cidr)
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/unversioned/testclient"
 | 
			
		||||
	fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
			
		||||
	fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/types"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -88,7 +88,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, item := range table {
 | 
			
		||||
		cloud := &fake_cloud.FakeCloud{}
 | 
			
		||||
		cloud := &fakecloud.FakeCloud{}
 | 
			
		||||
		cloud.Region = region
 | 
			
		||||
		client := &testclient.Fake{}
 | 
			
		||||
		controller := New(cloud, client, "test-cluster")
 | 
			
		||||
@@ -110,7 +110,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
				t.Errorf("unexpected client actions: %v", actions)
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			var balancer *fake_cloud.FakeBalancer
 | 
			
		||||
			var balancer *fakecloud.FakeBalancer
 | 
			
		||||
			for k := range cloud.Balancers {
 | 
			
		||||
				if balancer == nil {
 | 
			
		||||
					b := cloud.Balancers[k]
 | 
			
		||||
@@ -145,7 +145,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
	hosts := []string{"node0", "node1", "node73"}
 | 
			
		||||
	table := []struct {
 | 
			
		||||
		services            []*api.Service
 | 
			
		||||
		expectedUpdateCalls []fake_cloud.FakeUpdateBalancerCall
 | 
			
		||||
		expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			// No services present: no calls should be made.
 | 
			
		||||
@@ -165,7 +165,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
			services: []*api.Service{
 | 
			
		||||
				newService("s0", "333", api.ServiceTypeLoadBalancer),
 | 
			
		||||
			},
 | 
			
		||||
			expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
 | 
			
		||||
			expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
 | 
			
		||||
				{Name: "a333", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
@@ -176,7 +176,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
				newService("s1", "555", api.ServiceTypeLoadBalancer),
 | 
			
		||||
				newService("s2", "666", api.ServiceTypeLoadBalancer),
 | 
			
		||||
			},
 | 
			
		||||
			expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
 | 
			
		||||
			expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
 | 
			
		||||
				{Name: "a444", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
				{Name: "a555", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
				{Name: "a666", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
@@ -190,7 +190,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
				newService("s3", "999", api.ServiceTypeLoadBalancer),
 | 
			
		||||
				newService("s4", "123", api.ServiceTypeClusterIP),
 | 
			
		||||
			},
 | 
			
		||||
			expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
 | 
			
		||||
			expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
 | 
			
		||||
				{Name: "a888", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
				{Name: "a999", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
			},
 | 
			
		||||
@@ -201,13 +201,13 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
 | 
			
		||||
				newService("s0", "234", api.ServiceTypeLoadBalancer),
 | 
			
		||||
				nil,
 | 
			
		||||
			},
 | 
			
		||||
			expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
 | 
			
		||||
			expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
 | 
			
		||||
				{Name: "a234", Region: region, Hosts: []string{"node0", "node1", "node73"}},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, item := range table {
 | 
			
		||||
		cloud := &fake_cloud.FakeCloud{}
 | 
			
		||||
		cloud := &fakecloud.FakeCloud{}
 | 
			
		||||
 | 
			
		||||
		cloud.Region = region
 | 
			
		||||
		client := &testclient.Fake{}
 | 
			
		||||
 
 | 
			
		||||
@@ -35,7 +35,7 @@ import (
 | 
			
		||||
	qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/labels"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/types"
 | 
			
		||||
	deploymentUtil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
	deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -1428,11 +1428,11 @@ func (dd *DeploymentDescriber) Describe(namespace, name string) (string, error)
 | 
			
		||||
			ru := d.Spec.Strategy.RollingUpdate
 | 
			
		||||
			fmt.Fprintf(out, "RollingUpdateStrategy:\t%s max unavailable, %s max surge, %d min ready seconds\n", ru.MaxUnavailable.String(), ru.MaxSurge.String(), ru.MinReadySeconds)
 | 
			
		||||
		}
 | 
			
		||||
		oldRCs, err := deploymentUtil.GetOldRCs(*d, dd)
 | 
			
		||||
		oldRCs, err := deploymentutil.GetOldRCs(*d, dd)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			fmt.Fprintf(out, "OldReplicationControllers:\t%s\n", printReplicationControllersByLabels(oldRCs))
 | 
			
		||||
		}
 | 
			
		||||
		newRC, err := deploymentUtil.GetNewRC(*d, dd)
 | 
			
		||||
		newRC, err := deploymentutil.GetNewRC(*d, dd)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			var newRCs []*api.ReplicationController
 | 
			
		||||
			if newRC != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -27,8 +27,8 @@ import (
 | 
			
		||||
	"github.com/golang/glog"
 | 
			
		||||
	"github.com/google/cadvisor/cache/memory"
 | 
			
		||||
	"github.com/google/cadvisor/events"
 | 
			
		||||
	cadvisorFs "github.com/google/cadvisor/fs"
 | 
			
		||||
	cadvisorHttp "github.com/google/cadvisor/http"
 | 
			
		||||
	cadvisorfs "github.com/google/cadvisor/fs"
 | 
			
		||||
	cadvisorhttp "github.com/google/cadvisor/http"
 | 
			
		||||
	cadvisorapi "github.com/google/cadvisor/info/v1"
 | 
			
		||||
	cadvisorapiv2 "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	"github.com/google/cadvisor/manager"
 | 
			
		||||
@@ -80,14 +80,14 @@ func (cc *cadvisorClient) exportHTTP(port uint) error {
 | 
			
		||||
	// Register the handlers regardless as this registers the prometheus
 | 
			
		||||
	// collector properly.
 | 
			
		||||
	mux := http.NewServeMux()
 | 
			
		||||
	err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "")
 | 
			
		||||
	err := cadvisorhttp.RegisterHandlers(mux, cc, "", "", "", "")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	re := regexp.MustCompile(`^k8s_(?P<kubernetes_container_name>[^_\.]+)[^_]+_(?P<kubernetes_pod_name>[^_]+)_(?P<kubernetes_namespace>[^_]+)`)
 | 
			
		||||
	reCaptureNames := re.SubexpNames()
 | 
			
		||||
	cadvisorHttp.RegisterPrometheusHandler(mux, cc, "/metrics", func(name string) map[string]string {
 | 
			
		||||
	cadvisorhttp.RegisterPrometheusHandler(mux, cc, "/metrics", func(name string) map[string]string {
 | 
			
		||||
		extraLabels := map[string]string{}
 | 
			
		||||
		matches := re.FindStringSubmatch(name)
 | 
			
		||||
		for i, match := range matches {
 | 
			
		||||
@@ -149,11 +149,11 @@ func (cc *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cc *cadvisorClient) DockerImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
 | 
			
		||||
	return cc.getFsInfo(cadvisorFs.LabelDockerImages)
 | 
			
		||||
	return cc.getFsInfo(cadvisorfs.LabelDockerImages)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cc *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
 | 
			
		||||
	return cc.getFsInfo(cadvisorFs.LabelSystemRoot)
 | 
			
		||||
	return cc.getFsInfo(cadvisorfs.LabelSystemRoot)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (cc *cadvisorClient) getFsInfo(label string) (cadvisorapiv2.FsInfo, error) {
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@ import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/golang/glog"
 | 
			
		||||
	cadvisorApi "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	cadvisorapi "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/kubelet/cadvisor"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -63,7 +63,7 @@ type realDiskSpaceManager struct {
 | 
			
		||||
	frozen     bool              // space checks always return ok when frozen is set. True on creation.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (dm *realDiskSpaceManager) getFsInfo(fsType string, f func() (cadvisorApi.FsInfo, error)) (fsInfo, error) {
 | 
			
		||||
func (dm *realDiskSpaceManager) getFsInfo(fsType string, f func() (cadvisorapi.FsInfo, error)) (fsInfo, error) {
 | 
			
		||||
	dm.lock.Lock()
 | 
			
		||||
	defer dm.lock.Unlock()
 | 
			
		||||
	fsi := fsInfo{}
 | 
			
		||||
@@ -95,7 +95,7 @@ func (dm *realDiskSpaceManager) IsRootDiskSpaceAvailable() (bool, error) {
 | 
			
		||||
	return dm.isSpaceAvailable("root", dm.policy.RootFreeDiskMB, dm.cadvisor.RootFsInfo)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f func() (cadvisorApi.FsInfo, error)) (bool, error) {
 | 
			
		||||
func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f func() (cadvisorapi.FsInfo, error)) (bool, error) {
 | 
			
		||||
	if dm.frozen {
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,7 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	cadvisorApi "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	cadvisorapi "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"github.com/stretchr/testify/require"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/kubelet/cadvisor"
 | 
			
		||||
@@ -61,12 +61,12 @@ func TestSpaceAvailable(t *testing.T) {
 | 
			
		||||
	dm, err := newDiskSpaceManager(mockCadvisor, policy)
 | 
			
		||||
	assert.NoError(err)
 | 
			
		||||
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     400 * mb,
 | 
			
		||||
		Capacity:  1000 * mb,
 | 
			
		||||
		Available: 600 * mb,
 | 
			
		||||
	}, nil)
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:    9 * mb,
 | 
			
		||||
		Capacity: 10 * mb,
 | 
			
		||||
	}, nil)
 | 
			
		||||
@@ -90,7 +90,7 @@ func TestIsDockerDiskSpaceAvailableWithSpace(t *testing.T) {
 | 
			
		||||
	require.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	// 500MB available
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     9500 * mb,
 | 
			
		||||
		Capacity:  10000 * mb,
 | 
			
		||||
		Available: 500 * mb,
 | 
			
		||||
@@ -108,7 +108,7 @@ func TestIsDockerDiskSpaceAvailableWithSpace(t *testing.T) {
 | 
			
		||||
func TestIsDockerDiskSpaceAvailableWithoutSpace(t *testing.T) {
 | 
			
		||||
	// 1MB available
 | 
			
		||||
	assert, policy, mockCadvisor := setUp(t)
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     999 * mb,
 | 
			
		||||
		Capacity:  1000 * mb,
 | 
			
		||||
		Available: 1 * mb,
 | 
			
		||||
@@ -133,7 +133,7 @@ func TestIsRootDiskSpaceAvailableWithSpace(t *testing.T) {
 | 
			
		||||
	assert.NoError(err)
 | 
			
		||||
 | 
			
		||||
	// 999MB available
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     1 * mb,
 | 
			
		||||
		Capacity:  1000 * mb,
 | 
			
		||||
		Available: 999 * mb,
 | 
			
		||||
@@ -155,7 +155,7 @@ func TestIsRootDiskSpaceAvailableWithoutSpace(t *testing.T) {
 | 
			
		||||
	assert.NoError(err)
 | 
			
		||||
 | 
			
		||||
	// 9MB available
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     990 * mb,
 | 
			
		||||
		Capacity:  1000 * mb,
 | 
			
		||||
		Available: 9 * mb,
 | 
			
		||||
@@ -176,12 +176,12 @@ func TestCache(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	dm.Unfreeze()
 | 
			
		||||
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     400 * mb,
 | 
			
		||||
		Capacity:  1000 * mb,
 | 
			
		||||
		Available: 300 * mb,
 | 
			
		||||
	}, nil).Once()
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     500 * mb,
 | 
			
		||||
		Capacity:  1000 * mb,
 | 
			
		||||
		Available: 500 * mb,
 | 
			
		||||
@@ -221,8 +221,8 @@ func TestFsInfoError(t *testing.T) {
 | 
			
		||||
	assert.NoError(err)
 | 
			
		||||
 | 
			
		||||
	dm.Unfreeze()
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApi.FsInfo{}, fmt.Errorf("can't find fs"))
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{}, fmt.Errorf("EBUSY"))
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("can't find fs"))
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{}, fmt.Errorf("EBUSY"))
 | 
			
		||||
	ok, err := dm.IsDockerDiskSpaceAvailable()
 | 
			
		||||
	assert.Error(err)
 | 
			
		||||
	assert.True(ok)
 | 
			
		||||
@@ -236,7 +236,7 @@ func Test_getFsInfo(t *testing.T) {
 | 
			
		||||
	assert, policy, mockCadvisor := setUp(t)
 | 
			
		||||
 | 
			
		||||
	// Sunny day case
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     10 * mb,
 | 
			
		||||
		Capacity:  100 * mb,
 | 
			
		||||
		Available: 90 * mb,
 | 
			
		||||
@@ -255,7 +255,7 @@ func Test_getFsInfo(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Threshold case
 | 
			
		||||
	mockCadvisor = new(cadvisor.Mock)
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     9 * mb,
 | 
			
		||||
		Capacity:  100 * mb,
 | 
			
		||||
		Available: 9 * mb,
 | 
			
		||||
@@ -272,7 +272,7 @@ func Test_getFsInfo(t *testing.T) {
 | 
			
		||||
	assert.NoError(err)
 | 
			
		||||
 | 
			
		||||
	// Frozen case
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     9 * mb,
 | 
			
		||||
		Capacity:  10 * mb,
 | 
			
		||||
		Available: 500 * mb,
 | 
			
		||||
@@ -290,7 +290,7 @@ func Test_getFsInfo(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Capacity error case
 | 
			
		||||
	mockCadvisor = new(cadvisor.Mock)
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorApi.FsInfo{
 | 
			
		||||
	mockCadvisor.On("RootFsInfo").Return(cadvisorapi.FsInfo{
 | 
			
		||||
		Usage:     9 * mb,
 | 
			
		||||
		Capacity:  0,
 | 
			
		||||
		Available: 500 * mb,
 | 
			
		||||
 
 | 
			
		||||
@@ -21,7 +21,7 @@ import (
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	cadvisorApiV2 "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	cadvisorapiv2 "github.com/google/cadvisor/info/v2"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"github.com/stretchr/testify/require"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/record"
 | 
			
		||||
@@ -343,7 +343,7 @@ func TestGarbageCollectBelowLowThreshold(t *testing.T) {
 | 
			
		||||
	manager, _, mockCadvisor := newRealImageManager(policy)
 | 
			
		||||
 | 
			
		||||
	// Expect 40% usage.
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiV2.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
 | 
			
		||||
		Usage:    400,
 | 
			
		||||
		Capacity: 1000,
 | 
			
		||||
	}, nil)
 | 
			
		||||
@@ -358,7 +358,7 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	manager, _, mockCadvisor := newRealImageManager(policy)
 | 
			
		||||
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiV2.FsInfo{}, fmt.Errorf("error"))
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, fmt.Errorf("error"))
 | 
			
		||||
	assert.NotNil(t, manager.GarbageCollect())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -370,7 +370,7 @@ func TestGarbageCollectBelowSuccess(t *testing.T) {
 | 
			
		||||
	manager, fakeRuntime, mockCadvisor := newRealImageManager(policy)
 | 
			
		||||
 | 
			
		||||
	// Expect 95% usage and most of it gets freed.
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiV2.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
 | 
			
		||||
		Usage:    950,
 | 
			
		||||
		Capacity: 1000,
 | 
			
		||||
	}, nil)
 | 
			
		||||
@@ -389,7 +389,7 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
 | 
			
		||||
	manager, fakeRuntime, mockCadvisor := newRealImageManager(policy)
 | 
			
		||||
 | 
			
		||||
	// Expect 95% usage and little of it gets freed.
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiV2.FsInfo{
 | 
			
		||||
	mockCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{
 | 
			
		||||
		Usage:    950,
 | 
			
		||||
		Capacity: 1000,
 | 
			
		||||
	}, nil)
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ import (
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/appc/cni/libcni"
 | 
			
		||||
	cniTypes "github.com/appc/cni/pkg/types"
 | 
			
		||||
	cnitypes "github.com/appc/cni/pkg/types"
 | 
			
		||||
	"github.com/golang/glog"
 | 
			
		||||
	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/kubelet/dockertools"
 | 
			
		||||
@@ -152,7 +152,7 @@ func (plugin *cniNetworkPlugin) Status(namespace string, name string, id kubetyp
 | 
			
		||||
	return &network.PodNetworkStatus{IP: ip}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (network *cniNetwork) addToNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*cniTypes.Result, error) {
 | 
			
		||||
func (network *cniNetwork) addToNetwork(podName string, podNamespace string, podInfraContainerID kubecontainer.ContainerID, podNetnsPath string) (*cnitypes.Result, error) {
 | 
			
		||||
	rt, err := buildCNIRuntimeConf(podName, podNamespace, podInfraContainerID, podNetnsPath)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		glog.Errorf("Error adding network: %v", err)
 | 
			
		||||
 
 | 
			
		||||
@@ -80,7 +80,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/ui"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
	utilSets "k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
	utilsets "k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
 | 
			
		||||
	daemonetcd "k8s.io/kubernetes/pkg/registry/daemonset/etcd"
 | 
			
		||||
	horizontalpodautoscaleretcd "k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler/etcd"
 | 
			
		||||
@@ -1033,7 +1033,7 @@ func (m *Master) thirdpartyapi(group, kind, version string) *apiserver.APIGroupV
 | 
			
		||||
// experimental returns the resources and codec for the experimental api
 | 
			
		||||
func (m *Master) experimental(c *Config) *apiserver.APIGroupVersion {
 | 
			
		||||
	// All resources except these are disabled by default.
 | 
			
		||||
	enabledResources := utilSets.NewString("jobs", "horizontalpodautoscalers", "ingress")
 | 
			
		||||
	enabledResources := utilsets.NewString("jobs", "horizontalpodautoscalers", "ingress")
 | 
			
		||||
	resourceOverrides := m.apiGroupVersionOverrides["extensions/v1beta1"].ResourceOverrides
 | 
			
		||||
	isEnabled := func(resource string) bool {
 | 
			
		||||
		// Check if the resource has been overriden.
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/registry/registrytest"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/registry/service/allocator"
 | 
			
		||||
	allocator_etcd "k8s.io/kubernetes/pkg/registry/service/allocator/etcd"
 | 
			
		||||
	allocatoretcd "k8s.io/kubernetes/pkg/registry/service/allocator/etcd"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/registry/service/ipallocator"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/tools"
 | 
			
		||||
@@ -43,7 +43,7 @@ func newStorage(t *testing.T) (*tools.FakeEtcdClient, ipallocator.Interface, all
 | 
			
		||||
	storage := ipallocator.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface {
 | 
			
		||||
		mem := allocator.NewAllocationMap(max, rangeSpec)
 | 
			
		||||
		backing = mem
 | 
			
		||||
		etcd := allocator_etcd.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage)
 | 
			
		||||
		etcd := allocatoretcd.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage)
 | 
			
		||||
		return etcd
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -26,7 +26,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	"github.com/golang/glog"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	aws_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 | 
			
		||||
	awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/types"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/exec"
 | 
			
		||||
@@ -154,9 +154,9 @@ func detachDiskLogError(ebs *awsElasticBlockStore) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getVolumeProvider returns the AWS Volumes interface
 | 
			
		||||
func (ebs *awsElasticBlockStore) getVolumeProvider() (aws_cloud.Volumes, error) {
 | 
			
		||||
func (ebs *awsElasticBlockStore) getVolumeProvider() (awscloud.Volumes, error) {
 | 
			
		||||
	cloud := ebs.plugin.host.GetCloudProvider()
 | 
			
		||||
	volumes, ok := cloud.(aws_cloud.Volumes)
 | 
			
		||||
	volumes, ok := cloud.(awscloud.Volumes)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return nil, fmt.Errorf("Cloud provider does not support volumes")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@ import (
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	flockerClient "github.com/ClusterHQ/flocker-go"
 | 
			
		||||
	flockerclient "github.com/ClusterHQ/flocker-go"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/types"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
@@ -107,7 +107,7 @@ func (p *flockerPlugin) NewCleaner(datasetName string, podUID types.UID) (volume
 | 
			
		||||
 | 
			
		||||
type flockerBuilder struct {
 | 
			
		||||
	*flocker
 | 
			
		||||
	client   flockerClient.Clientable
 | 
			
		||||
	client   flockerclient.Clientable
 | 
			
		||||
	exe      exec.Interface
 | 
			
		||||
	opts     volume.VolumeOptions
 | 
			
		||||
	readOnly bool
 | 
			
		||||
@@ -123,7 +123,7 @@ func (b flockerBuilder) SetUp() error {
 | 
			
		||||
 | 
			
		||||
// newFlockerClient uses environment variables and pod attributes to return a
 | 
			
		||||
// flocker client capable of talking with the Flocker control service.
 | 
			
		||||
func (b flockerBuilder) newFlockerClient() (*flockerClient.Client, error) {
 | 
			
		||||
func (b flockerBuilder) newFlockerClient() (*flockerclient.Client, error) {
 | 
			
		||||
	host := getenvOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost)
 | 
			
		||||
	portConfig := getenvOrFallback("FLOCKER_CONTROL_SERVICE_PORT", strconv.Itoa(defaultPort))
 | 
			
		||||
	port, err := strconv.Atoi(portConfig)
 | 
			
		||||
@@ -134,7 +134,7 @@ func (b flockerBuilder) newFlockerClient() (*flockerClient.Client, error) {
 | 
			
		||||
	keyPath := getenvOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
 | 
			
		||||
	certPath := getenvOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
 | 
			
		||||
 | 
			
		||||
	c, err := flockerClient.NewClient(host, port, b.flocker.pod.Status.HostIP, caCertPath, keyPath, certPath)
 | 
			
		||||
	c, err := flockerclient.NewClient(host, port, b.flocker.pod.Status.HostIP, caCertPath, keyPath, certPath)
 | 
			
		||||
	return c, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -21,7 +21,7 @@ import (
 | 
			
		||||
	"os"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	flockerClient "github.com/ClusterHQ/flocker-go"
 | 
			
		||||
	flockerclient "github.com/ClusterHQ/flocker-go"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/types"
 | 
			
		||||
@@ -161,7 +161,7 @@ func TestGetPath(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
type mockFlockerClient struct {
 | 
			
		||||
	datasetID, primaryUUID, path string
 | 
			
		||||
	datasetState                 *flockerClient.DatasetState
 | 
			
		||||
	datasetState                 *flockerclient.DatasetState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient {
 | 
			
		||||
@@ -169,7 +169,7 @@ func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mock
 | 
			
		||||
		datasetID:   mockDatasetID,
 | 
			
		||||
		primaryUUID: mockPrimaryUUID,
 | 
			
		||||
		path:        mockPath,
 | 
			
		||||
		datasetState: &flockerClient.DatasetState{
 | 
			
		||||
		datasetState: &flockerclient.DatasetState{
 | 
			
		||||
			Path:      mockPath,
 | 
			
		||||
			DatasetID: mockDatasetID,
 | 
			
		||||
			Primary:   mockPrimaryUUID,
 | 
			
		||||
@@ -177,10 +177,10 @@ func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mock
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerClient.DatasetState, error) {
 | 
			
		||||
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerclient.DatasetState, error) {
 | 
			
		||||
	return m.datasetState, nil
 | 
			
		||||
}
 | 
			
		||||
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerClient.DatasetState, error) {
 | 
			
		||||
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerclient.DatasetState, error) {
 | 
			
		||||
	return m.datasetState, nil
 | 
			
		||||
}
 | 
			
		||||
func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
 | 
			
		||||
@@ -189,7 +189,7 @@ func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
 | 
			
		||||
func (m mockFlockerClient) GetPrimaryUUID() (string, error) {
 | 
			
		||||
	return m.primaryUUID, nil
 | 
			
		||||
}
 | 
			
		||||
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerClient.DatasetState, error) {
 | 
			
		||||
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerclient.DatasetState, error) {
 | 
			
		||||
	return m.datasetState, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -26,7 +26,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	"github.com/golang/glog"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/cloudprovider"
 | 
			
		||||
	gce_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
 | 
			
		||||
	gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/exec"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/operationmanager"
 | 
			
		||||
@@ -123,7 +123,7 @@ func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error {
 | 
			
		||||
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
 | 
			
		||||
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) {
 | 
			
		||||
	devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
 | 
			
		||||
	var gceCloud *gce_cloud.GCECloud
 | 
			
		||||
	var gceCloud *gcecloud.GCECloud
 | 
			
		||||
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
 | 
			
		||||
		// Block execution until any pending detach goroutines for this pd have completed
 | 
			
		||||
		detachCleanupManager.Send(b.pdName, true)
 | 
			
		||||
@@ -219,7 +219,7 @@ func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
 | 
			
		||||
	var gceCloud *gce_cloud.GCECloud
 | 
			
		||||
	var gceCloud *gcecloud.GCECloud
 | 
			
		||||
	for numRetries := 0; numRetries < maxRetries; numRetries++ {
 | 
			
		||||
		var err error
 | 
			
		||||
		if gceCloud == nil {
 | 
			
		||||
@@ -310,14 +310,14 @@ func pathExists(path string) (bool, error) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Return cloud provider
 | 
			
		||||
func getCloudProvider() (*gce_cloud.GCECloud, error) {
 | 
			
		||||
func getCloudProvider() (*gcecloud.GCECloud, error) {
 | 
			
		||||
	gceCloudProvider, err := cloudprovider.GetCloudProvider("gce", nil)
 | 
			
		||||
	if err != nil || gceCloudProvider == nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// The conversion must be safe otherwise bug in GetCloudProvider()
 | 
			
		||||
	return gceCloudProvider.(*gce_cloud.GCECloud), nil
 | 
			
		||||
	return gceCloudProvider.(*gcecloud.GCECloud), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set).
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,7 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/apis/extensions"
 | 
			
		||||
	deploymentUtil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
	deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/ginkgo"
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
@@ -263,7 +263,7 @@ func testRollingUpdateDeploymentEvents(f *Framework) {
 | 
			
		||||
	}
 | 
			
		||||
	// There should be 2 events, one to scale up the new RC and then to scale down the old RC.
 | 
			
		||||
	Expect(len(events.Items)).Should(Equal(2))
 | 
			
		||||
	newRC, err := deploymentUtil.GetNewRC(*deployment, c)
 | 
			
		||||
	newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	Expect(newRC).NotTo(Equal(nil))
 | 
			
		||||
	Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 1", newRC.Name)))
 | 
			
		||||
 
 | 
			
		||||
@@ -18,7 +18,7 @@ package e2e
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	math_rand "math/rand"
 | 
			
		||||
	mathrand "math/rand"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
@@ -30,7 +30,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/resource"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/unversioned"
 | 
			
		||||
	client "k8s.io/kubernetes/pkg/client/unversioned"
 | 
			
		||||
	aws_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 | 
			
		||||
	awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/fields"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/labels"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
@@ -62,7 +62,7 @@ var _ = Describe("Pod Disks", func() {
 | 
			
		||||
		host0Name = nodes.Items[0].ObjectMeta.Name
 | 
			
		||||
		host1Name = nodes.Items[1].ObjectMeta.Name
 | 
			
		||||
 | 
			
		||||
		math_rand.Seed(time.Now().UTC().UnixNano())
 | 
			
		||||
		mathrand.Seed(time.Now().UTC().UnixNano())
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host", func() {
 | 
			
		||||
@@ -94,7 +94,7 @@ var _ = Describe("Pod Disks", func() {
 | 
			
		||||
		expectNoError(framework.WaitForPodRunning(host0Pod.Name))
 | 
			
		||||
 | 
			
		||||
		testFile := "/testpd1/tracker"
 | 
			
		||||
		testFileContents := fmt.Sprintf("%v", math_rand.Int())
 | 
			
		||||
		testFileContents := fmt.Sprintf("%v", mathrand.Int())
 | 
			
		||||
 | 
			
		||||
		expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
			
		||||
		Logf("Wrote value: %v", testFileContents)
 | 
			
		||||
@@ -207,19 +207,19 @@ var _ = Describe("Pod Disks", func() {
 | 
			
		||||
			expectNoError(framework.WaitForPodRunning(host0Pod.Name))
 | 
			
		||||
 | 
			
		||||
			// randomly select a container and read/verify pd contents from it
 | 
			
		||||
			containerName := fmt.Sprintf("mycontainer%v", math_rand.Intn(numContainers)+1)
 | 
			
		||||
			containerName := fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
 | 
			
		||||
			verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify)
 | 
			
		||||
 | 
			
		||||
			// Randomly select a container to write a file to PD from
 | 
			
		||||
			containerName = fmt.Sprintf("mycontainer%v", math_rand.Intn(numContainers)+1)
 | 
			
		||||
			containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
 | 
			
		||||
			testFile := fmt.Sprintf("/testpd1/tracker%v", i)
 | 
			
		||||
			testFileContents := fmt.Sprintf("%v", math_rand.Int())
 | 
			
		||||
			testFileContents := fmt.Sprintf("%v", mathrand.Int())
 | 
			
		||||
			fileAndContentToVerify[testFile] = testFileContents
 | 
			
		||||
			expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
 | 
			
		||||
			Logf("Wrote value: \"%v\" to PD %q from pod %q container %q", testFileContents, diskName, host0Pod.Name, containerName)
 | 
			
		||||
 | 
			
		||||
			// Randomly select a container and read/verify pd contents from it
 | 
			
		||||
			containerName = fmt.Sprintf("mycontainer%v", math_rand.Intn(numContainers)+1)
 | 
			
		||||
			containerName = fmt.Sprintf("mycontainer%v", mathrand.Intn(numContainers)+1)
 | 
			
		||||
			verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify)
 | 
			
		||||
 | 
			
		||||
			By("deleting host0Pod")
 | 
			
		||||
@@ -271,8 +271,8 @@ var _ = Describe("Pod Disks", func() {
 | 
			
		||||
			// Write a file to both PDs from container
 | 
			
		||||
			testFilePD1 := fmt.Sprintf("/testpd1/tracker%v", i)
 | 
			
		||||
			testFilePD2 := fmt.Sprintf("/testpd2/tracker%v", i)
 | 
			
		||||
			testFilePD1Contents := fmt.Sprintf("%v", math_rand.Int())
 | 
			
		||||
			testFilePD2Contents := fmt.Sprintf("%v", math_rand.Int())
 | 
			
		||||
			testFilePD1Contents := fmt.Sprintf("%v", mathrand.Int())
 | 
			
		||||
			testFilePD2Contents := fmt.Sprintf("%v", mathrand.Int())
 | 
			
		||||
			fileAndContentToVerify[testFilePD1] = testFilePD1Contents
 | 
			
		||||
			fileAndContentToVerify[testFilePD2] = testFilePD2Contents
 | 
			
		||||
			expectNoError(framework.WriteFileViaContainer(host0Pod.Name, containerName, testFilePD1, testFilePD1Contents))
 | 
			
		||||
@@ -333,11 +333,11 @@ func createPD() (string, error) {
 | 
			
		||||
		}
 | 
			
		||||
		return pdName, nil
 | 
			
		||||
	} else {
 | 
			
		||||
		volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes)
 | 
			
		||||
		volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
 | 
			
		||||
		if !ok {
 | 
			
		||||
			return "", fmt.Errorf("Provider does not support volumes")
 | 
			
		||||
		}
 | 
			
		||||
		volumeOptions := &aws_cloud.VolumeOptions{}
 | 
			
		||||
		volumeOptions := &awscloud.VolumeOptions{}
 | 
			
		||||
		volumeOptions.CapacityMB = 10 * 1024
 | 
			
		||||
		return volumes.CreateVolume(volumeOptions)
 | 
			
		||||
	}
 | 
			
		||||
@@ -361,7 +361,7 @@ func deletePD(pdName string) error {
 | 
			
		||||
		}
 | 
			
		||||
		return err
 | 
			
		||||
	} else {
 | 
			
		||||
		volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes)
 | 
			
		||||
		volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
 | 
			
		||||
		if !ok {
 | 
			
		||||
			return fmt.Errorf("Provider does not support volumes")
 | 
			
		||||
		}
 | 
			
		||||
@@ -378,7 +378,7 @@ func detachPD(hostName, pdName string) error {
 | 
			
		||||
		// TODO: make this hit the compute API directly.
 | 
			
		||||
		return exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "detach-disk", "--zone="+zone, "--disk="+pdName, instanceName).Run()
 | 
			
		||||
	} else {
 | 
			
		||||
		volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes)
 | 
			
		||||
		volumes, ok := testContext.CloudConfig.Provider.(awscloud.Volumes)
 | 
			
		||||
		if !ok {
 | 
			
		||||
			return fmt.Errorf("Provider does not support volumes")
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -34,7 +34,7 @@ import (
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/ginkgo"
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
	aws_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 | 
			
		||||
	awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -56,7 +56,7 @@ func resizeGroup(size int) error {
 | 
			
		||||
		return err
 | 
			
		||||
	} else {
 | 
			
		||||
		// Supported by aws
 | 
			
		||||
		instanceGroups, ok := testContext.CloudConfig.Provider.(aws_cloud.InstanceGroups)
 | 
			
		||||
		instanceGroups, ok := testContext.CloudConfig.Provider.(awscloud.InstanceGroups)
 | 
			
		||||
		if !ok {
 | 
			
		||||
			return fmt.Errorf("Provider does not support InstanceGroups")
 | 
			
		||||
		}
 | 
			
		||||
@@ -78,7 +78,7 @@ func groupSize() (int, error) {
 | 
			
		||||
		return len(re.FindAllString(string(output), -1)), nil
 | 
			
		||||
	} else {
 | 
			
		||||
		// Supported by aws
 | 
			
		||||
		instanceGroups, ok := testContext.CloudConfig.Provider.(aws_cloud.InstanceGroups)
 | 
			
		||||
		instanceGroups, ok := testContext.CloudConfig.Provider.(awscloud.InstanceGroups)
 | 
			
		||||
		if !ok {
 | 
			
		||||
			return -1, fmt.Errorf("provider does not support InstanceGroups")
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -44,7 +44,7 @@ import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/labels"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/runtime"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util"
 | 
			
		||||
	deploymentUtil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
	deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/sets"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/util/wait"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/watch"
 | 
			
		||||
@@ -1632,11 +1632,11 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		oldRCs, err := deploymentUtil.GetOldRCs(*deployment, c)
 | 
			
		||||
		oldRCs, err := deploymentutil.GetOldRCs(*deployment, c)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		newRC, err := deploymentUtil.GetNewRC(*deployment, c)
 | 
			
		||||
		newRC, err := deploymentutil.GetNewRC(*deployment, c)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
@@ -1645,8 +1645,8 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		allRCs := append(oldRCs, newRC)
 | 
			
		||||
		totalCreated := deploymentUtil.GetReplicaCountForRCs(allRCs)
 | 
			
		||||
		totalAvailable, err := deploymentUtil.GetAvailablePodsForRCs(c, allRCs)
 | 
			
		||||
		totalCreated := deploymentutil.GetReplicaCountForRCs(allRCs)
 | 
			
		||||
		totalAvailable, err := deploymentutil.GetAvailablePodsForRCs(c, allRCs)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
@@ -1660,10 +1660,10 @@ func waitForDeploymentStatus(c *client.Client, ns, deploymentName string, desire
 | 
			
		||||
		if deployment.Status.Replicas == desiredUpdatedReplicas &&
 | 
			
		||||
			deployment.Status.UpdatedReplicas == desiredUpdatedReplicas {
 | 
			
		||||
			// Verify RCs.
 | 
			
		||||
			if deploymentUtil.GetReplicaCountForRCs(oldRCs) != 0 {
 | 
			
		||||
			if deploymentutil.GetReplicaCountForRCs(oldRCs) != 0 {
 | 
			
		||||
				return false, fmt.Errorf("old RCs are not fully scaled down")
 | 
			
		||||
			}
 | 
			
		||||
			if deploymentUtil.GetReplicaCountForRCs([]*api.ReplicationController{newRC}) != desiredUpdatedReplicas {
 | 
			
		||||
			if deploymentutil.GetReplicaCountForRCs([]*api.ReplicationController{newRC}) != desiredUpdatedReplicas {
 | 
			
		||||
				return false, fmt.Errorf("new RCs is not fully scaled up")
 | 
			
		||||
			}
 | 
			
		||||
			return true, nil
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user