mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-30 17:58:14 +00:00 
			
		
		
		
	Merge pull request #41708 from bsalamat/statefulset_spreading2
Automatic merge from submit-queue (batch tested with PRs 42200, 39535, 41708, 41487, 41335) Add support for statefulset spreading to the scheduler **What this PR does / why we need it**: The scheduler SelectorSpread priority funtion didn't have the code to spread pods of StatefulSets. This PR adds StatefulSets to the list of controllers that SelectorSpread supports. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #41513 **Special notes for your reviewer**: **Release note**: ```release-note Add the support to the scheduler for spreading pods of StatefulSets. ```
This commit is contained in:
		| @@ -19,6 +19,7 @@ go_library( | ||||
|         "//pkg/api:go_default_library", | ||||
|         "//pkg/client/clientset_generated/clientset:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library", | ||||
|         "//pkg/client/leaderelection:go_default_library", | ||||
|   | ||||
| @@ -21,6 +21,7 @@ import ( | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
|  | ||||
| 	appsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1" | ||||
| 	coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" | ||||
| 	extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1" | ||||
| 	"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" | ||||
| @@ -79,6 +80,7 @@ func createScheduler( | ||||
| 	pvcInformer coreinformers.PersistentVolumeClaimInformer, | ||||
| 	replicationControllerInformer coreinformers.ReplicationControllerInformer, | ||||
| 	replicaSetInformer extensionsinformers.ReplicaSetInformer, | ||||
| 	statefulSetInformer appsinformers.StatefulSetInformer, | ||||
| 	serviceInformer coreinformers.ServiceInformer, | ||||
| 	recorder record.EventRecorder, | ||||
| ) (*scheduler.Scheduler, error) { | ||||
| @@ -90,6 +92,7 @@ func createScheduler( | ||||
| 		pvcInformer, | ||||
| 		replicationControllerInformer, | ||||
| 		replicaSetInformer, | ||||
| 		statefulSetInformer, | ||||
| 		serviceInformer, | ||||
| 		s.HardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
|   | ||||
| @@ -81,6 +81,7 @@ func Run(s *options.SchedulerServer) error { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		recorder, | ||||
| 	) | ||||
|   | ||||
| @@ -18,6 +18,7 @@ go_library( | ||||
|     tags = ["automanaged"], | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/apis/apps/v1beta1:go_default_library", | ||||
|         "//pkg/apis/extensions/v1beta1:go_default_library", | ||||
|         "//plugin/pkg/scheduler/api:go_default_library", | ||||
|         "//plugin/pkg/scheduler/schedulercache:go_default_library", | ||||
|   | ||||
| @@ -60,6 +60,7 @@ go_test( | ||||
|     tags = ["automanaged"], | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/apis/apps/v1beta1:go_default_library", | ||||
|         "//pkg/apis/extensions/v1beta1:go_default_library", | ||||
|         "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", | ||||
|         "//plugin/pkg/scheduler/api:go_default_library", | ||||
|   | ||||
| @@ -39,25 +39,28 @@ const maxPriority float32 = 10 | ||||
| const zoneWeighting = 2.0 / 3.0 | ||||
|  | ||||
| type SelectorSpread struct { | ||||
| 	serviceLister    algorithm.ServiceLister | ||||
| 	controllerLister algorithm.ControllerLister | ||||
| 	replicaSetLister algorithm.ReplicaSetLister | ||||
| 	serviceLister     algorithm.ServiceLister | ||||
| 	controllerLister  algorithm.ControllerLister | ||||
| 	replicaSetLister  algorithm.ReplicaSetLister | ||||
| 	statefulSetLister algorithm.StatefulSetLister | ||||
| } | ||||
|  | ||||
| func NewSelectorSpreadPriority( | ||||
| 	serviceLister algorithm.ServiceLister, | ||||
| 	controllerLister algorithm.ControllerLister, | ||||
| 	replicaSetLister algorithm.ReplicaSetLister) algorithm.PriorityFunction { | ||||
| 	replicaSetLister algorithm.ReplicaSetLister, | ||||
| 	statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityFunction { | ||||
| 	selectorSpread := &SelectorSpread{ | ||||
| 		serviceLister:    serviceLister, | ||||
| 		controllerLister: controllerLister, | ||||
| 		replicaSetLister: replicaSetLister, | ||||
| 		serviceLister:     serviceLister, | ||||
| 		controllerLister:  controllerLister, | ||||
| 		replicaSetLister:  replicaSetLister, | ||||
| 		statefulSetLister: statefulSetLister, | ||||
| 	} | ||||
| 	return selectorSpread.CalculateSpreadPriority | ||||
| } | ||||
|  | ||||
| // Returns selectors of services, RCs and RSs matching the given pod. | ||||
| func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector { | ||||
| func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister, ssl algorithm.StatefulSetLister) []labels.Selector { | ||||
| 	selectors := make([]labels.Selector, 0, 3) | ||||
| 	if services, err := sl.GetPodServices(pod); err == nil { | ||||
| 		for _, service := range services { | ||||
| @@ -76,11 +79,18 @@ func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.Controll | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if sss, err := ssl.GetPodStatefulSets(pod); err == nil { | ||||
| 		for _, ss := range sss { | ||||
| 			if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { | ||||
| 				selectors = append(selectors, selector) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return selectors | ||||
| } | ||||
|  | ||||
| func (s *SelectorSpread) getSelectors(pod *v1.Pod) []labels.Selector { | ||||
| 	return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister) | ||||
| 	return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister) | ||||
| } | ||||
|  | ||||
| // CalculateSpreadPriority spreads pods across hosts and zones, considering pods belonging to the same service or replication controller. | ||||
|   | ||||
| @@ -23,6 +23,7 @@ import ( | ||||
|  | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" | ||||
| 	extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" | ||||
| 	schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" | ||||
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" | ||||
| @@ -60,6 +61,7 @@ func TestSelectorSpreadPriority(t *testing.T) { | ||||
| 		rcs          []*v1.ReplicationController | ||||
| 		rss          []*extensions.ReplicaSet | ||||
| 		services     []*v1.Service | ||||
| 		sss          []*apps.StatefulSet | ||||
| 		expectedList schedulerapi.HostPriorityList | ||||
| 		test         string | ||||
| 	}{ | ||||
| @@ -202,6 +204,19 @@ func TestSelectorSpreadPriority(t *testing.T) { | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, | ||||
| 			test:         "service with partial pod label matches with service and replica set", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 				{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			}, | ||||
| 			nodes:        []string{"machine1", "machine2"}, | ||||
| 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, | ||||
| 			sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, | ||||
| 			test:         "service with partial pod label matches with service and replica set", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| @@ -230,6 +245,19 @@ func TestSelectorSpreadPriority(t *testing.T) { | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, | ||||
| 			test:         "disjoined service and replica set should be treated equally", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 				{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			}, | ||||
| 			nodes:        []string{"machine1", "machine2"}, | ||||
| 			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, | ||||
| 			sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, | ||||
| 			test:         "disjoined service and replica set should be treated equally", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| @@ -256,6 +284,19 @@ func TestSelectorSpreadPriority(t *testing.T) { | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, | ||||
| 			test:         "Replica set with partial pod label matches", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 				{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			}, | ||||
| 			nodes: []string{"machine1", "machine2"}, | ||||
| 			sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, | ||||
| 			// We use StatefulSet, instead of ReplicationController. The result should be exactly as above. | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, | ||||
| 			test:         "StatefulSet with partial pod label matches", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| @@ -281,14 +322,28 @@ func TestSelectorSpreadPriority(t *testing.T) { | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, | ||||
| 			test:         "Another replication set with partial pod label matches", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			pods: []*v1.Pod{ | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 				{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 				{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, | ||||
| 			}, | ||||
| 			nodes: []string{"machine1", "machine2"}, | ||||
| 			sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, | ||||
| 			// We use StatefulSet, instead of ReplicationController. The result should be exactly as above. | ||||
| 			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, | ||||
| 			test:         "Another stateful set with partial pod label matches", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range tests { | ||||
| 		nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nil) | ||||
| 		selectorSpread := SelectorSpread{ | ||||
| 			serviceLister:    schedulertesting.FakeServiceLister(test.services), | ||||
| 			controllerLister: schedulertesting.FakeControllerLister(test.rcs), | ||||
| 			replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss), | ||||
| 			serviceLister:     schedulertesting.FakeServiceLister(test.services), | ||||
| 			controllerLister:  schedulertesting.FakeControllerLister(test.rcs), | ||||
| 			replicaSetLister:  schedulertesting.FakeReplicaSetLister(test.rss), | ||||
| 			statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss), | ||||
| 		} | ||||
| 		list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, makeNodeList(test.nodes)) | ||||
| 		if err != nil { | ||||
| @@ -346,6 +401,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | ||||
| 		rcs          []*v1.ReplicationController | ||||
| 		rss          []*extensions.ReplicaSet | ||||
| 		services     []*v1.Service | ||||
| 		sss          []*apps.StatefulSet | ||||
| 		expectedList schedulerapi.HostPriorityList | ||||
| 		test         string | ||||
| 	}{ | ||||
| @@ -493,9 +549,10 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { | ||||
| 	for _, test := range tests { | ||||
| 		nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nil) | ||||
| 		selectorSpread := SelectorSpread{ | ||||
| 			serviceLister:    schedulertesting.FakeServiceLister(test.services), | ||||
| 			controllerLister: schedulertesting.FakeControllerLister(test.rcs), | ||||
| 			replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss), | ||||
| 			serviceLister:     schedulertesting.FakeServiceLister(test.services), | ||||
| 			controllerLister:  schedulertesting.FakeControllerLister(test.rcs), | ||||
| 			replicaSetLister:  schedulertesting.FakeReplicaSetLister(test.rss), | ||||
| 			statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss), | ||||
| 		} | ||||
| 		list, err := selectorSpread.CalculateSpreadPriority(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes)) | ||||
| 		if err != nil { | ||||
|   | ||||
| @@ -19,6 +19,7 @@ package algorithm | ||||
| import ( | ||||
| 	"k8s.io/apimachinery/pkg/labels" | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" | ||||
| 	extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" | ||||
| 	schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" | ||||
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" | ||||
| @@ -127,3 +128,19 @@ type EmptyReplicaSetLister struct{} | ||||
| func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| // StatefulSetLister interface represents anything that can produce a list of StatefulSet; the list is consumed by a scheduler. | ||||
| type StatefulSetLister interface { | ||||
| 	// Gets the StatefulSet for the given pod. | ||||
| 	GetPodStatefulSets(*v1.Pod) ([]*apps.StatefulSet, error) | ||||
| } | ||||
|  | ||||
| var _ StatefulSetLister = &EmptyStatefulSetLister{} | ||||
|  | ||||
| // EmptyStatefulSetLister implements StatefulSetLister on []apps.StatefulSet returning empty data. | ||||
| type EmptyStatefulSetLister struct{} | ||||
|  | ||||
| // GetPodStatefulSets of EmptyStatefulSetLister returns nil. | ||||
| func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|   | ||||
| @@ -356,6 +356,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 			informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 			informerFactory.Core().V1().ReplicationControllers(), | ||||
| 			informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 			informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 			informerFactory.Core().V1().Services(), | ||||
| 			v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 		).CreateFromConfig(policy); err != nil { | ||||
|   | ||||
| @@ -94,7 +94,7 @@ func init() { | ||||
| 		"ServiceSpreadingPriority", | ||||
| 		factory.PriorityConfigFactory{ | ||||
| 			Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { | ||||
| 				return priorities.NewSelectorSpreadPriority(args.ServiceLister, algorithm.EmptyControllerLister{}, algorithm.EmptyReplicaSetLister{}) | ||||
| 				return priorities.NewSelectorSpreadPriority(args.ServiceLister, algorithm.EmptyControllerLister{}, algorithm.EmptyReplicaSetLister{}, algorithm.EmptyStatefulSetLister{}) | ||||
| 			}, | ||||
| 			Weight: 1, | ||||
| 		}, | ||||
| @@ -181,7 +181,7 @@ func defaultPriorities() sets.String { | ||||
| 			"SelectorSpreadPriority", | ||||
| 			factory.PriorityConfigFactory{ | ||||
| 				Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { | ||||
| 					return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister) | ||||
| 					return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) | ||||
| 				}, | ||||
| 				Weight: 1, | ||||
| 			}, | ||||
|   | ||||
| @@ -18,6 +18,7 @@ go_test( | ||||
|     tags = ["automanaged"], | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/apis/apps/v1beta1:go_default_library", | ||||
|         "//pkg/apis/extensions/v1beta1:go_default_library", | ||||
|         "//plugin/pkg/scheduler/algorithm:go_default_library", | ||||
|         "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", | ||||
|   | ||||
| @@ -30,6 +30,7 @@ import ( | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" | ||||
| 	extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" | ||||
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" | ||||
| 	algorithmpredicates "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" | ||||
| @@ -518,7 +519,8 @@ func TestZeroRequest(t *testing.T) { | ||||
| 				Function: algorithmpriorities.NewSelectorSpreadPriority( | ||||
| 					schedulertesting.FakeServiceLister([]*v1.Service{}), | ||||
| 					schedulertesting.FakeControllerLister([]*v1.ReplicationController{}), | ||||
| 					schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{})), | ||||
| 					schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}), | ||||
| 					schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{})), | ||||
| 				Weight: 1, | ||||
| 			}, | ||||
| 		} | ||||
|   | ||||
| @@ -18,8 +18,10 @@ go_library( | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/client/clientset_generated/clientset:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", | ||||
|         "//pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library", | ||||
|         "//pkg/client/listers/apps/v1beta1:go_default_library", | ||||
|         "//pkg/client/listers/core/v1:go_default_library", | ||||
|         "//pkg/client/listers/extensions/v1beta1:go_default_library", | ||||
|         "//plugin/pkg/scheduler:go_default_library", | ||||
|   | ||||
| @@ -33,8 +33,10 @@ import ( | ||||
| 	"k8s.io/client-go/tools/cache" | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	"k8s.io/kubernetes/pkg/client/clientset_generated/clientset" | ||||
| 	appsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1" | ||||
| 	coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1" | ||||
| 	extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1" | ||||
| 	appslisters "k8s.io/kubernetes/pkg/client/listers/apps/v1beta1" | ||||
| 	corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" | ||||
| 	extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1" | ||||
| 	"k8s.io/kubernetes/plugin/pkg/scheduler" | ||||
| @@ -74,6 +76,8 @@ type ConfigFactory struct { | ||||
| 	controllerLister corelisters.ReplicationControllerLister | ||||
| 	// a means to list all replicasets | ||||
| 	replicaSetLister extensionslisters.ReplicaSetLister | ||||
| 	// a means to list all statefulsets | ||||
| 	statefulSetLister appslisters.StatefulSetLister | ||||
|  | ||||
| 	// Close this to stop all reflectors | ||||
| 	StopEverything chan struct{} | ||||
| @@ -105,6 +109,7 @@ func NewConfigFactory( | ||||
| 	pvcInformer coreinformers.PersistentVolumeClaimInformer, | ||||
| 	replicationControllerInformer coreinformers.ReplicationControllerInformer, | ||||
| 	replicaSetInformer extensionsinformers.ReplicaSetInformer, | ||||
| 	statefulSetInformer appsinformers.StatefulSetInformer, | ||||
| 	serviceInformer coreinformers.ServiceInformer, | ||||
| 	hardPodAffinitySymmetricWeight int, | ||||
| ) scheduler.Configurator { | ||||
| @@ -120,6 +125,7 @@ func NewConfigFactory( | ||||
| 		serviceLister:                  serviceInformer.Lister(), | ||||
| 		controllerLister:               replicationControllerInformer.Lister(), | ||||
| 		replicaSetLister:               replicaSetInformer.Lister(), | ||||
| 		statefulSetLister:              statefulSetInformer.Lister(), | ||||
| 		schedulerCache:                 schedulerCache, | ||||
| 		StopEverything:                 stopEverything, | ||||
| 		schedulerName:                  schedulerName, | ||||
| @@ -426,10 +432,11 @@ func (f *ConfigFactory) GetPredicates(predicateKeys sets.String) (map[string]alg | ||||
|  | ||||
| func (f *ConfigFactory) getPluginArgs() (*PluginFactoryArgs, error) { | ||||
| 	return &PluginFactoryArgs{ | ||||
| 		PodLister:        f.podLister, | ||||
| 		ServiceLister:    f.serviceLister, | ||||
| 		ControllerLister: f.controllerLister, | ||||
| 		ReplicaSetLister: f.replicaSetLister, | ||||
| 		PodLister:         f.podLister, | ||||
| 		ServiceLister:     f.serviceLister, | ||||
| 		ControllerLister:  f.controllerLister, | ||||
| 		ReplicaSetLister:  f.replicaSetLister, | ||||
| 		StatefulSetLister: f.statefulSetLister, | ||||
| 		// All fit predicates only need to consider schedulable nodes. | ||||
| 		NodeLister: &nodePredicateLister{f.nodeLister}, | ||||
| 		NodeInfo:   &predicates.CachedNodeInfo{NodeLister: f.nodeLister}, | ||||
|   | ||||
| @@ -59,6 +59,7 @@ func TestCreate(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
| @@ -88,6 +89,7 @@ func TestCreateFromConfig(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
| @@ -140,6 +142,7 @@ func TestCreateFromEmptyConfig(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
| @@ -194,6 +197,7 @@ func TestDefaultErrorFunc(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
| @@ -302,6 +306,7 @@ func TestResponsibleForPod(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
| @@ -314,6 +319,7 @@ func TestResponsibleForPod(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		v1.DefaultHardPodAffinitySymmetricWeight, | ||||
| 	) | ||||
| @@ -381,6 +387,7 @@ func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) { | ||||
| 		informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 		informerFactory.Core().V1().ReplicationControllers(), | ||||
| 		informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 		informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 		informerFactory.Core().V1().Services(), | ||||
| 		-1, | ||||
| 	) | ||||
| @@ -424,6 +431,7 @@ func TestInvalidFactoryArgs(t *testing.T) { | ||||
| 			informerFactory.Core().V1().PersistentVolumeClaims(), | ||||
| 			informerFactory.Core().V1().ReplicationControllers(), | ||||
| 			informerFactory.Extensions().V1beta1().ReplicaSets(), | ||||
| 			informerFactory.Apps().V1beta1().StatefulSets(), | ||||
| 			informerFactory.Core().V1().Services(), | ||||
| 			test.hardPodAffinitySymmetricWeight, | ||||
| 		) | ||||
|   | ||||
| @@ -38,6 +38,7 @@ type PluginFactoryArgs struct { | ||||
| 	ServiceLister                  algorithm.ServiceLister | ||||
| 	ControllerLister               algorithm.ControllerLister | ||||
| 	ReplicaSetLister               algorithm.ReplicaSetLister | ||||
| 	StatefulSetLister              algorithm.StatefulSetLister | ||||
| 	NodeLister                     algorithm.NodeLister | ||||
| 	NodeInfo                       predicates.NodeInfo | ||||
| 	PVInfo                         predicates.PersistentVolumeInfo | ||||
|   | ||||
| @@ -17,6 +17,7 @@ go_library( | ||||
|     tags = ["automanaged"], | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/apis/apps/v1beta1:go_default_library", | ||||
|         "//pkg/apis/extensions/v1beta1:go_default_library", | ||||
|         "//plugin/pkg/scheduler/algorithm:go_default_library", | ||||
|         "//plugin/pkg/scheduler/schedulercache:go_default_library", | ||||
|   | ||||
| @@ -22,6 +22,7 @@ import ( | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/labels" | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" | ||||
| 	extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" | ||||
| 	. "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" | ||||
| ) | ||||
| @@ -138,3 +139,30 @@ func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions. | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| var _ StatefulSetLister = &FakeStatefulSetLister{} | ||||
|  | ||||
| // FakeStatefulSetLister implements ControllerLister on []apps.StatefulSet for testing purposes. | ||||
| type FakeStatefulSetLister []*apps.StatefulSet | ||||
|  | ||||
| // GetPodStatefulSets gets the StatefulSets that have the selector that match the labels on the given pod. | ||||
| func (f FakeStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) { | ||||
| 	var selector labels.Selector | ||||
|  | ||||
| 	for _, ss := range f { | ||||
| 		if ss.Namespace != pod.Namespace { | ||||
| 			continue | ||||
| 		} | ||||
| 		selector, err = metav1.LabelSelectorAsSelector(ss.Spec.Selector) | ||||
| 		if err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 		if selector.Matches(labels.Set(pod.Labels)) { | ||||
| 			sss = append(sss, ss) | ||||
| 		} | ||||
| 	} | ||||
| 	if len(sss) == 0 { | ||||
| 		err = fmt.Errorf("Could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Submit Queue
					Kubernetes Submit Queue