mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	refactor scheduler extender related API
- move extender related API from pkg/scheduler/api to pkg/scheduler/apis/extender/v1 - alias extenderv1 to pkg/scheduler/apis/extender/v1 - use NodeScore and NodeScoreList in non-extender logic
This commit is contained in:
		@@ -27,6 +27,7 @@ import (
 | 
				
			|||||||
	featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
						featuregatetesting "k8s.io/component-base/featuregate/testing"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -215,7 +216,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -232,7 +233,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "nothing scheduled, nothing requested",
 | 
								name:         "nothing scheduled, nothing requested",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -249,7 +250,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "nothing scheduled, resources requested, differently sized machines",
 | 
								name:         "nothing scheduled, resources requested, differently sized machines",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -266,7 +267,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "no resources requested, pods scheduled",
 | 
								name:         "no resources requested, pods scheduled",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
									{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
				
			||||||
@@ -289,7 +290,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 6}},
 | 
				
			||||||
			name:         "no resources requested, pods scheduled with resources",
 | 
								name:         "no resources requested, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
									{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
				
			||||||
@@ -312,7 +313,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 9}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources",
 | 
								name:         "resources requested, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -333,7 +334,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 6}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources, differently sized machines",
 | 
								name:         "resources requested, pods scheduled with resources, differently sized machines",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -354,7 +355,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuOnly},
 | 
								pod:          &v1.Pod{Spec: cpuOnly},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "requested resources exceed node capacity",
 | 
								name:         "requested resources exceed node capacity",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -364,7 +365,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "zero node resources, pods scheduled with resources",
 | 
								name:         "zero node resources, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -388,7 +389,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine3", Score: 8}, {Host: "machine4", Score: 9}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine3", Score: 8}, {Name: "machine4", Score: 9}},
 | 
				
			||||||
			name:         "Include volume count on a node for balanced resource allocation",
 | 
								name:         "Include volume count on a node for balanced resource allocation",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuAndMemory3},
 | 
									{Spec: cpuAndMemory3},
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,6 +25,7 @@ import (
 | 
				
			|||||||
	"k8s.io/client-go/util/workqueue"
 | 
						"k8s.io/client-go/util/workqueue"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	schedutil "k8s.io/kubernetes/pkg/scheduler/util"
 | 
						schedutil "k8s.io/kubernetes/pkg/scheduler/util"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -82,8 +83,8 @@ func (t *topologySpreadConstraintsMap) initialize(pod *v1.Pod, nodes []*v1.Node)
 | 
				
			|||||||
// Note: Symmetry is not applicable. We only weigh how incomingPod matches existingPod.
 | 
					// Note: Symmetry is not applicable. We only weigh how incomingPod matches existingPod.
 | 
				
			||||||
// Whether existingPod matches incomingPod doesn't contribute to the final score.
 | 
					// Whether existingPod matches incomingPod doesn't contribute to the final score.
 | 
				
			||||||
// This is different from the Affinity API.
 | 
					// This is different from the Affinity API.
 | 
				
			||||||
func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	result := make(schedulerapi.HostPriorityList, len(nodes))
 | 
						result := make(framework.NodeScoreList, len(nodes))
 | 
				
			||||||
	// return if incoming pod doesn't have soft topology spread constraints.
 | 
						// return if incoming pod doesn't have soft topology spread constraints.
 | 
				
			||||||
	constraints := getSoftTopologySpreadConstraints(pod)
 | 
						constraints := getSoftTopologySpreadConstraints(pod)
 | 
				
			||||||
	if len(constraints) == 0 {
 | 
						if len(constraints) == 0 {
 | 
				
			||||||
@@ -171,7 +172,7 @@ func CalculateEvenPodsSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*sch
 | 
				
			|||||||
	maxMinDiff := total - minCount
 | 
						maxMinDiff := total - minCount
 | 
				
			||||||
	for i := range nodes {
 | 
						for i := range nodes {
 | 
				
			||||||
		node := nodes[i]
 | 
							node := nodes[i]
 | 
				
			||||||
		result[i].Host = node.Name
 | 
							result[i].Name = node.Name
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// debugging purpose: print the value for each node
 | 
							// debugging purpose: print the value for each node
 | 
				
			||||||
		// score must be pointer here, otherwise it's always 0
 | 
							// score must be pointer here, otherwise it's always 0
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -21,7 +21,7 @@ import (
 | 
				
			|||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	st "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
						st "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -101,7 +101,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
		existingPods []*v1.Pod
 | 
							existingPods []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		failedNodes  []*v1.Node // nodes + failedNodes = all nodes
 | 
							failedNodes  []*v1.Node // nodes + failedNodes = all nodes
 | 
				
			||||||
		want         schedulerapi.HostPriorityList
 | 
							want         framework.NodeScoreList
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		// Explanation on the Legend:
 | 
							// Explanation on the Legend:
 | 
				
			||||||
		// a) X/Y means there are X matching pods on node1 and Y on node2, both nodes are candidates
 | 
							// a) X/Y means there are X matching pods on node1 and Y on node2, both nodes are candidates
 | 
				
			||||||
@@ -120,9 +120,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				st.MakeNode().Name("node-a").Label("node", "node-a").Obj(),
 | 
									st.MakeNode().Name("node-a").Label("node", "node-a").Obj(),
 | 
				
			||||||
				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
 | 
									st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 10},
 | 
									{Name: "node-a", Score: 10},
 | 
				
			||||||
				{Host: "node-b", Score: 10},
 | 
									{Name: "node-b", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -142,8 +142,8 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			failedNodes: []*v1.Node{
 | 
								failedNodes: []*v1.Node{
 | 
				
			||||||
				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
 | 
									st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 10},
 | 
									{Name: "node-a", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -159,9 +159,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				st.MakeNode().Name("node-a").Label("node", "node-a").Obj(),
 | 
									st.MakeNode().Name("node-a").Label("node", "node-a").Obj(),
 | 
				
			||||||
				st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
 | 
									st.MakeNode().Name("node-b").Label("node", "node-b").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 10},
 | 
									{Name: "node-a", Score: 10},
 | 
				
			||||||
				{Host: "node-b", Score: 10},
 | 
									{Name: "node-b", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -187,11 +187,11 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				st.MakeNode().Name("node-d").Label("node", "node-d").Obj(),
 | 
									st.MakeNode().Name("node-d").Label("node", "node-d").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			failedNodes: []*v1.Node{},
 | 
								failedNodes: []*v1.Node{},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 6},
 | 
									{Name: "node-a", Score: 6},
 | 
				
			||||||
				{Host: "node-b", Score: 8},
 | 
									{Name: "node-b", Score: 8},
 | 
				
			||||||
				{Host: "node-c", Score: 10},
 | 
									{Name: "node-c", Score: 10},
 | 
				
			||||||
				{Host: "node-d", Score: 5},
 | 
									{Name: "node-d", Score: 5},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -222,10 +222,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			failedNodes: []*v1.Node{
 | 
								failedNodes: []*v1.Node{
 | 
				
			||||||
				st.MakeNode().Name("node-y").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 5},
 | 
									{Name: "node-a", Score: 5},
 | 
				
			||||||
				{Host: "node-b", Score: 8},
 | 
									{Name: "node-b", Score: 8},
 | 
				
			||||||
				{Host: "node-x", Score: 10},
 | 
									{Name: "node-x", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -256,10 +256,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			failedNodes: []*v1.Node{
 | 
								failedNodes: []*v1.Node{
 | 
				
			||||||
				st.MakeNode().Name("node-y").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 2},
 | 
									{Name: "node-a", Score: 2},
 | 
				
			||||||
				{Host: "node-b", Score: 0},
 | 
									{Name: "node-b", Score: 0},
 | 
				
			||||||
				{Host: "node-x", Score: 10},
 | 
									{Name: "node-x", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -290,10 +290,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			failedNodes: []*v1.Node{
 | 
								failedNodes: []*v1.Node{
 | 
				
			||||||
				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 8},
 | 
									{Name: "node-a", Score: 8},
 | 
				
			||||||
				{Host: "node-b", Score: 8},
 | 
									{Name: "node-b", Score: 8},
 | 
				
			||||||
				{Host: "node-x", Score: 10},
 | 
									{Name: "node-x", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -324,9 +324,9 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
 | 
									st.MakeNode().Name("node-b").Label("zone", "zone1").Label("node", "node-b").Obj(),
 | 
				
			||||||
				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 10},
 | 
									{Name: "node-a", Score: 10},
 | 
				
			||||||
				{Host: "node-x", Score: 6},
 | 
									{Name: "node-x", Score: 6},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -361,11 +361,11 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			failedNodes: []*v1.Node{},
 | 
								failedNodes: []*v1.Node{},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 8},
 | 
									{Name: "node-a", Score: 8},
 | 
				
			||||||
				{Host: "node-b", Score: 7},
 | 
									{Name: "node-b", Score: 7},
 | 
				
			||||||
				{Host: "node-x", Score: 10},
 | 
									{Name: "node-x", Score: 10},
 | 
				
			||||||
				{Host: "node-y", Score: 8},
 | 
									{Name: "node-y", Score: 8},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -391,11 +391,11 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			failedNodes: []*v1.Node{},
 | 
								failedNodes: []*v1.Node{},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 10},
 | 
									{Name: "node-a", Score: 10},
 | 
				
			||||||
				{Host: "node-b", Score: 8},
 | 
									{Name: "node-b", Score: 8},
 | 
				
			||||||
				{Host: "node-x", Score: 6},
 | 
									{Name: "node-x", Score: 6},
 | 
				
			||||||
				{Host: "node-y", Score: 5},
 | 
									{Name: "node-y", Score: 5},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -423,10 +423,10 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			failedNodes: []*v1.Node{
 | 
								failedNodes: []*v1.Node{
 | 
				
			||||||
				st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
									st.MakeNode().Name("node-y").Label("zone", "zone2").Label("node", "node-y").Obj(),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			want: []schedulerapi.HostPriority{
 | 
								want: []framework.NodeScore{
 | 
				
			||||||
				{Host: "node-a", Score: 8},
 | 
									{Name: "node-a", Score: 8},
 | 
				
			||||||
				{Host: "node-b", Score: 6},
 | 
									{Name: "node-b", Score: 6},
 | 
				
			||||||
				{Host: "node-x", Score: 10},
 | 
									{Name: "node-x", Score: 10},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,6 +22,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/parsers"
 | 
						"k8s.io/kubernetes/pkg/util/parsers"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -39,10 +40,10 @@ const (
 | 
				
			|||||||
// based on the total size of those images.
 | 
					// based on the total size of those images.
 | 
				
			||||||
// - If none of the images are present, this node will be given the lowest priority.
 | 
					// - If none of the images are present, this node will be given the lowest priority.
 | 
				
			||||||
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
 | 
					// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
 | 
				
			||||||
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var score int
 | 
						var score int
 | 
				
			||||||
@@ -53,8 +54,8 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler
 | 
				
			|||||||
		score = 0
 | 
							score = 0
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: int64(score),
 | 
							Score: int64(score),
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,14 +18,14 @@ package priorities
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"crypto/sha256"
 | 
						"crypto/sha256"
 | 
				
			||||||
 | 
						"encoding/hex"
 | 
				
			||||||
	"reflect"
 | 
						"reflect"
 | 
				
			||||||
	"sort"
 | 
					 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"encoding/hex"
 | 
					 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/util/parsers"
 | 
						"k8s.io/kubernetes/pkg/util/parsers"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -114,7 +114,7 @@ func TestImageLocalityPriority(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -129,7 +129,7 @@ func TestImageLocalityPriority(t *testing.T) {
 | 
				
			|||||||
			// Score: 10 * (250M/2 - 23M)/(1000M - 23M) = 1
 | 
								// Score: 10 * (250M/2 - 23M)/(1000M - 23M) = 1
 | 
				
			||||||
			pod:          &v1.Pod{Spec: test40250},
 | 
								pod:          &v1.Pod{Spec: test40250},
 | 
				
			||||||
			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
 | 
								nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 1}},
 | 
				
			||||||
			name:         "two images spread on two nodes, prefer the larger image one",
 | 
								name:         "two images spread on two nodes, prefer the larger image one",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -144,7 +144,7 @@ func TestImageLocalityPriority(t *testing.T) {
 | 
				
			|||||||
			// Score: 0
 | 
								// Score: 0
 | 
				
			||||||
			pod:          &v1.Pod{Spec: test40300},
 | 
								pod:          &v1.Pod{Spec: test40300},
 | 
				
			||||||
			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
 | 
								nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "two images on one node, prefer this node",
 | 
								name:         "two images on one node, prefer this node",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -159,7 +159,7 @@ func TestImageLocalityPriority(t *testing.T) {
 | 
				
			|||||||
			// Score: 0 (10M/2 < 23M, min-threshold)
 | 
								// Score: 0 (10M/2 < 23M, min-threshold)
 | 
				
			||||||
			pod:          &v1.Pod{Spec: testMinMax},
 | 
								pod:          &v1.Pod{Spec: testMinMax},
 | 
				
			||||||
			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
 | 
								nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "if exceed limit, use limit",
 | 
								name:         "if exceed limit, use limit",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -178,7 +178,7 @@ func TestImageLocalityPriority(t *testing.T) {
 | 
				
			|||||||
			// Score: 0
 | 
								// Score: 0
 | 
				
			||||||
			pod:          &v1.Pod{Spec: testMinMax},
 | 
								pod:          &v1.Pod{Spec: testMinMax},
 | 
				
			||||||
			nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010), makeImageNode("machine3", nodeWithNoImages)},
 | 
								nodes:        []*v1.Node{makeImageNode("machine1", node403002000), makeImageNode("machine2", node25010), makeImageNode("machine3", nodeWithNoImages)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "if exceed limit, use limit (with node which has no images present)",
 | 
								name:         "if exceed limit, use limit (with node which has no images present)",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -191,8 +191,8 @@ func TestImageLocalityPriority(t *testing.T) {
 | 
				
			|||||||
				t.Errorf("unexpected error: %v", err)
 | 
									t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			sort.Sort(test.expectedList)
 | 
								sortNodeScoreList(test.expectedList)
 | 
				
			||||||
			sort.Sort(list)
 | 
								sortNodeScoreList(list)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if !reflect.DeepEqual(test.expectedList, list) {
 | 
								if !reflect.DeepEqual(test.expectedList, list) {
 | 
				
			||||||
				t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
									t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -27,6 +27,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
				
			||||||
	priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
 | 
						priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	schedutil "k8s.io/kubernetes/pkg/scheduler/util"
 | 
						schedutil "k8s.io/kubernetes/pkg/scheduler/util"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -96,7 +97,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm
 | 
				
			|||||||
// that node; the node(s) with the highest sum are the most preferred.
 | 
					// that node; the node(s) with the highest sum are the most preferred.
 | 
				
			||||||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
 | 
					// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
 | 
				
			||||||
// symmetry need to be considered for hard requirements from podAffinity
 | 
					// symmetry need to be considered for hard requirements from podAffinity
 | 
				
			||||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	affinity := pod.Spec.Affinity
 | 
						affinity := pod.Spec.Affinity
 | 
				
			||||||
	hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
 | 
						hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
 | 
				
			||||||
	hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
 | 
						hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
 | 
				
			||||||
@@ -219,14 +220,14 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// calculate final priority score for each node
 | 
						// calculate final priority score for each node
 | 
				
			||||||
	result := make(schedulerapi.HostPriorityList, 0, len(nodes))
 | 
						result := make(framework.NodeScoreList, 0, len(nodes))
 | 
				
			||||||
	maxMinDiff := maxCount - minCount
 | 
						maxMinDiff := maxCount - minCount
 | 
				
			||||||
	for i, node := range nodes {
 | 
						for i, node := range nodes {
 | 
				
			||||||
		fScore := float64(0)
 | 
							fScore := float64(0)
 | 
				
			||||||
		if maxMinDiff > 0 {
 | 
							if maxMinDiff > 0 {
 | 
				
			||||||
			fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
 | 
								fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(fScore)})
 | 
							result = append(result, framework.NodeScore{Name: node.Name, Score: int64(fScore)})
 | 
				
			||||||
		if klog.V(10) {
 | 
							if klog.V(10) {
 | 
				
			||||||
			klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
 | 
								klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,6 +24,7 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	st "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
						st "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -266,7 +267,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -276,7 +277,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "all machines are same priority as Affinity is nil",
 | 
								name:         "all machines are same priority as Affinity is nil",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
 | 
							// the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score
 | 
				
			||||||
@@ -294,7 +295,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
 | 
								name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
 | 
				
			||||||
				"which doesn't match either pods in nodes or in topology key",
 | 
									"which doesn't match either pods in nodes or in topology key",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -312,7 +313,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
 | 
								name:         "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
 | 
							// there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference.
 | 
				
			||||||
@@ -336,7 +337,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 5}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: schedulerapi.MaxPriority}, {Name: "machine5", Score: 5}},
 | 
				
			||||||
			name:         "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
 | 
								name:         "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
 | 
							// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
 | 
				
			||||||
@@ -352,7 +353,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
 | 
								name:         "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
 | 
							// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
 | 
				
			||||||
@@ -368,7 +369,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
 | 
								name:         "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -382,7 +383,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
 | 
								name:         "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -402,7 +403,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
 | 
								name:         "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -415,7 +416,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
 | 
								name:         "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -429,7 +430,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
 | 
								name:         "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Test the symmetry cases for anti affinity
 | 
							// Test the symmetry cases for anti affinity
 | 
				
			||||||
@@ -443,7 +444,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
 | 
								name:         "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Test both  affinity and anti-affinity
 | 
							// Test both  affinity and anti-affinity
 | 
				
			||||||
@@ -457,7 +458,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
 | 
								name:         "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
 | 
							// Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service),
 | 
				
			||||||
@@ -482,7 +483,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 4}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: schedulerapi.MaxPriority}, {Name: "machine5", Score: 4}},
 | 
				
			||||||
			name:         "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
 | 
								name:         "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Consider Affinity, Anti Affinity and symmetry together.
 | 
							// Consider Affinity, Anti Affinity and symmetry together.
 | 
				
			||||||
@@ -504,7 +505,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: schedulerapi.MaxPriority}, {Name: "machine4", Score: 0}},
 | 
				
			||||||
			name:         "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
 | 
								name:         "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
 | 
							// Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon:
 | 
				
			||||||
@@ -520,7 +521,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "Avoid panic when partial nodes in a topology don't have pods with affinity",
 | 
								name:         "Avoid panic when partial nodes in a topology don't have pods with affinity",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -578,7 +579,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
 | 
				
			|||||||
		pods                  []*v1.Pod
 | 
							pods                  []*v1.Pod
 | 
				
			||||||
		nodes                 []*v1.Node
 | 
							nodes                 []*v1.Node
 | 
				
			||||||
		hardPodAffinityWeight int32
 | 
							hardPodAffinityWeight int32
 | 
				
			||||||
		expectedList          schedulerapi.HostPriorityList
 | 
							expectedList          framework.NodeScoreList
 | 
				
			||||||
		name                  string
 | 
							name                  string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -593,7 +594,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
 | 
								hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
 | 
				
			||||||
			expectedList:          []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}},
 | 
								expectedList:          []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:                  "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
 | 
								name:                  "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -608,7 +609,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			hardPodAffinityWeight: 0,
 | 
								hardPodAffinityWeight: 0,
 | 
				
			||||||
			expectedList:          []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList:          []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:                  "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
 | 
								name:                  "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,6 +24,7 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -93,7 +94,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -110,7 +111,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "nothing scheduled, nothing requested",
 | 
								name:         "nothing scheduled, nothing requested",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -127,7 +128,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 3}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "nothing scheduled, resources requested, differently sized machines",
 | 
								name:         "nothing scheduled, resources requested, differently sized machines",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -144,7 +145,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "no resources requested, pods scheduled",
 | 
								name:         "no resources requested, pods scheduled",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
									{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
				
			||||||
@@ -167,7 +168,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 7}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "no resources requested, pods scheduled with resources",
 | 
								name:         "no resources requested, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
									{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
				
			||||||
@@ -190,7 +191,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 4}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources",
 | 
								name:         "resources requested, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -211,7 +212,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 6}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources, differently sized machines",
 | 
								name:         "resources requested, pods scheduled with resources, differently sized machines",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -232,7 +233,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuOnly},
 | 
								pod:          &v1.Pod{Spec: cpuOnly},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 2}},
 | 
				
			||||||
			name:         "requested resources exceed node capacity",
 | 
								name:         "requested resources exceed node capacity",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -242,7 +243,7 @@ func TestLeastRequested(t *testing.T) {
 | 
				
			|||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "zero node resources, pods scheduled with resources",
 | 
								name:         "zero node resources, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,7 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -108,7 +108,7 @@ func TestMostRequested(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -125,7 +125,7 @@ func TestMostRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "nothing scheduled, nothing requested",
 | 
								name:         "nothing scheduled, nothing requested",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -142,7 +142,7 @@ func TestMostRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 6}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "nothing scheduled, resources requested, differently sized machines",
 | 
								name:         "nothing scheduled, resources requested, differently sized machines",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -159,7 +159,7 @@ func TestMostRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 3}, {Name: "machine2", Score: 4}},
 | 
				
			||||||
			name:         "no resources requested, pods scheduled with resources",
 | 
								name:         "no resources requested, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
									{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
 | 
				
			||||||
@@ -182,7 +182,7 @@ func TestMostRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources",
 | 
								name:         "resources requested, pods scheduled with resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: cpuOnly},
 | 
									{Spec: cpuOnly},
 | 
				
			||||||
@@ -203,7 +203,7 @@ func TestMostRequested(t *testing.T) {
 | 
				
			|||||||
			*/
 | 
								*/
 | 
				
			||||||
			pod:          &v1.Pod{Spec: bigCPUAndMemory},
 | 
								pod:          &v1.Pod{Spec: bigCPUAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 2}},
 | 
				
			||||||
			name:         "resources requested with more than the node, pods scheduled with resources",
 | 
								name:         "resources requested with more than the node, pods scheduled with resources",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,6 +23,7 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
						"k8s.io/apimachinery/pkg/labels"
 | 
				
			||||||
	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
						v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -31,10 +32,10 @@ import (
 | 
				
			|||||||
// it will get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
 | 
					// it will get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
 | 
				
			||||||
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
 | 
					// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
 | 
				
			||||||
// score the node gets.
 | 
					// score the node gets.
 | 
				
			||||||
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// default is the podspec.
 | 
						// default is the podspec.
 | 
				
			||||||
@@ -59,7 +60,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
 | 
				
			|||||||
			// TODO: Avoid computing it for all nodes if this becomes a performance problem.
 | 
								// TODO: Avoid computing it for all nodes if this becomes a performance problem.
 | 
				
			||||||
			nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
 | 
								nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return schedulerapi.HostPriority{}, err
 | 
									return framework.NodeScore{}, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if nodeSelector.Matches(labels.Set(node.Labels)) {
 | 
								if nodeSelector.Matches(labels.Set(node.Labels)) {
 | 
				
			||||||
				count += preferredSchedulingTerm.Weight
 | 
									count += preferredSchedulingTerm.Weight
 | 
				
			||||||
@@ -67,8 +68,8 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: int64(count),
 | 
							Score: int64(count),
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,6 +23,7 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -104,7 +105,7 @@ func TestNodeAffinityPriority(t *testing.T) {
 | 
				
			|||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -118,7 +119,7 @@ func TestNodeAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "all machines are same priority as NodeAffinity is nil",
 | 
								name:         "all machines are same priority as NodeAffinity is nil",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -132,7 +133,7 @@ func TestNodeAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
 | 
								name:         "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -146,7 +147,7 @@ func TestNodeAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			name:         "only machine1 matches the preferred scheduling requirements of pod",
 | 
								name:         "only machine1 matches the preferred scheduling requirements of pod",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -160,7 +161,7 @@ func TestNodeAffinityPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine5", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 3}},
 | 
				
			||||||
			name:         "all machines matches the preferred scheduling requirements of pod but with different priorities ",
 | 
								name:         "all machines matches the preferred scheduling requirements of pod but with different priorities ",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,6 +22,7 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
						"k8s.io/apimachinery/pkg/labels"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -43,10 +44,10 @@ func NewNodeLabelPriority(label string, presence bool) (PriorityMapFunction, Pri
 | 
				
			|||||||
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
 | 
					// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
 | 
				
			||||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
 | 
					// If presence is true, prioritizes nodes that have the specified label, regardless of value.
 | 
				
			||||||
// If presence is false, prioritizes nodes that do not have the specified label.
 | 
					// If presence is false, prioritizes nodes that do not have the specified label.
 | 
				
			||||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	exists := labels.Set(node.Labels).Has(n.label)
 | 
						exists := labels.Set(node.Labels).Has(n.label)
 | 
				
			||||||
@@ -54,8 +55,8 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta i
 | 
				
			|||||||
	if (exists && n.presence) || (!exists && !n.presence) {
 | 
						if (exists && n.presence) || (!exists && !n.presence) {
 | 
				
			||||||
		score = schedulerapi.MaxPriority
 | 
							score = schedulerapi.MaxPriority
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: int64(score),
 | 
							Score: int64(score),
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,12 +18,12 @@ package priorities
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"reflect"
 | 
						"reflect"
 | 
				
			||||||
	"sort"
 | 
					 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -35,7 +35,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		label        string
 | 
							label        string
 | 
				
			||||||
		presence     bool
 | 
							presence     bool
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -44,7 +44,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			label:        "baz",
 | 
								label:        "baz",
 | 
				
			||||||
			presence:     true,
 | 
								presence:     true,
 | 
				
			||||||
			name:         "no match found, presence true",
 | 
								name:         "no match found, presence true",
 | 
				
			||||||
@@ -55,7 +55,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			label:        "baz",
 | 
								label:        "baz",
 | 
				
			||||||
			presence:     false,
 | 
								presence:     false,
 | 
				
			||||||
			name:         "no match found, presence false",
 | 
								name:         "no match found, presence false",
 | 
				
			||||||
@@ -66,7 +66,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			label:        "foo",
 | 
								label:        "foo",
 | 
				
			||||||
			presence:     true,
 | 
								presence:     true,
 | 
				
			||||||
			name:         "one match found, presence true",
 | 
								name:         "one match found, presence true",
 | 
				
			||||||
@@ -77,7 +77,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			label:        "foo",
 | 
								label:        "foo",
 | 
				
			||||||
			presence:     false,
 | 
								presence:     false,
 | 
				
			||||||
			name:         "one match found, presence false",
 | 
								name:         "one match found, presence false",
 | 
				
			||||||
@@ -88,7 +88,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			label:        "bar",
 | 
								label:        "bar",
 | 
				
			||||||
			presence:     true,
 | 
								presence:     true,
 | 
				
			||||||
			name:         "two matches found, presence true",
 | 
								name:         "two matches found, presence true",
 | 
				
			||||||
@@ -99,7 +99,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}},
 | 
				
			||||||
				{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
									{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}},
 | 
				
			||||||
			label:        "bar",
 | 
								label:        "bar",
 | 
				
			||||||
			presence:     false,
 | 
								presence:     false,
 | 
				
			||||||
			name:         "two matches found, presence false",
 | 
								name:         "two matches found, presence false",
 | 
				
			||||||
@@ -118,8 +118,8 @@ func TestNewNodeLabelPriority(t *testing.T) {
 | 
				
			|||||||
				t.Errorf("unexpected error: %v", err)
 | 
									t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			// sort the two lists to avoid failures on account of different ordering
 | 
								// sort the two lists to avoid failures on account of different ordering
 | 
				
			||||||
			sort.Sort(test.expectedList)
 | 
								sortNodeScoreList(test.expectedList)
 | 
				
			||||||
			sort.Sort(list)
 | 
								sortNodeScoreList(list)
 | 
				
			||||||
			if !reflect.DeepEqual(test.expectedList, list) {
 | 
								if !reflect.DeepEqual(test.expectedList, list) {
 | 
				
			||||||
				t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
									t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,15 +23,16 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
						v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
 | 
					// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
 | 
				
			||||||
// "scheduler.alpha.kubernetes.io/preferAvoidPods".
 | 
					// "scheduler.alpha.kubernetes.io/preferAvoidPods".
 | 
				
			||||||
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	var controllerRef *metav1.OwnerReference
 | 
						var controllerRef *metav1.OwnerReference
 | 
				
			||||||
	if priorityMeta, ok := meta.(*priorityMetadata); ok {
 | 
						if priorityMeta, ok := meta.(*priorityMetadata); ok {
 | 
				
			||||||
@@ -49,19 +50,19 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, node
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if controllerRef == nil {
 | 
						if controllerRef == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
 | 
							return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations)
 | 
						avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		// If we cannot get annotation, assume it's schedulable there.
 | 
							// If we cannot get annotation, assume it's schedulable there.
 | 
				
			||||||
		return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
 | 
							return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for i := range avoids.PreferAvoidPods {
 | 
						for i := range avoids.PreferAvoidPods {
 | 
				
			||||||
		avoid := &avoids.PreferAvoidPods[i]
 | 
							avoid := &avoids.PreferAvoidPods[i]
 | 
				
			||||||
		if avoid.PodSignature.PodController.Kind == controllerRef.Kind && avoid.PodSignature.PodController.UID == controllerRef.UID {
 | 
							if avoid.PodSignature.PodController.Kind == controllerRef.Kind && avoid.PodSignature.PodController.UID == controllerRef.UID {
 | 
				
			||||||
			return schedulerapi.HostPriority{Host: node.Name, Score: 0}, nil
 | 
								return framework.NodeScore{Name: node.Name, Score: 0}, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
 | 
						return framework.NodeScore{Name: node.Name, Score: schedulerapi.MaxPriority}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,12 +18,12 @@ package priorities
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"reflect"
 | 
						"reflect"
 | 
				
			||||||
	"sort"
 | 
					 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -83,7 +83,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
 | 
				
			|||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -96,7 +96,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        testNodes,
 | 
								nodes:        testNodes,
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
 | 
								name:         "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -109,7 +109,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        testNodes,
 | 
								nodes:        testNodes,
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "ownership by random controller should be ignored",
 | 
								name:         "ownership by random controller should be ignored",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -122,7 +122,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        testNodes,
 | 
								nodes:        testNodes,
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "owner without Controller field set should be ignored",
 | 
								name:         "owner without Controller field set should be ignored",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -135,7 +135,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        testNodes,
 | 
								nodes:        testNodes,
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
 | 
								name:         "pod managed by ReplicaSet should avoid a node, this node get lowest priority score",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -148,8 +148,8 @@ func TestNodePreferAvoidPriority(t *testing.T) {
 | 
				
			|||||||
				t.Errorf("unexpected error: %v", err)
 | 
									t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			// sort the two lists to avoid failures on account of different ordering
 | 
								// sort the two lists to avoid failures on account of different ordering
 | 
				
			||||||
			sort.Sort(test.expectedList)
 | 
								sortNodeScoreList(test.expectedList)
 | 
				
			||||||
			sort.Sort(list)
 | 
								sortNodeScoreList(list)
 | 
				
			||||||
			if !reflect.DeepEqual(test.expectedList, list) {
 | 
								if !reflect.DeepEqual(test.expectedList, list) {
 | 
				
			||||||
				t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
									t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,7 +18,7 @@ package priorities
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -30,7 +30,7 @@ func NormalizeReduce(maxPriority int64, reverse bool) PriorityReduceFunction {
 | 
				
			|||||||
		_ *v1.Pod,
 | 
							_ *v1.Pod,
 | 
				
			||||||
		_ interface{},
 | 
							_ interface{},
 | 
				
			||||||
		_ map[string]*schedulernodeinfo.NodeInfo,
 | 
							_ map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
		result schedulerapi.HostPriorityList) error {
 | 
							result framework.NodeScoreList) error {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		var maxCount int64
 | 
							var maxCount int64
 | 
				
			||||||
		for i := range result {
 | 
							for i := range result {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,7 @@ import (
 | 
				
			|||||||
	"github.com/stretchr/testify/assert"
 | 
						"github.com/stretchr/testify/assert"
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -150,7 +150,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
 | 
				
			|||||||
		test               string
 | 
							test               string
 | 
				
			||||||
		requested          resources
 | 
							requested          resources
 | 
				
			||||||
		nodes              map[string]nodeResources
 | 
							nodes              map[string]nodeResources
 | 
				
			||||||
		expectedPriorities schedulerapi.HostPriorityList
 | 
							expectedPriorities framework.NodeScoreList
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tests := []test{
 | 
						tests := []test{
 | 
				
			||||||
@@ -167,7 +167,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
 | 
				
			|||||||
					used:     resources{0, 0},
 | 
										used:     resources{0, 0},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedPriorities: []schedulerapi.HostPriority{{Host: "node1", Score: 10}, {Host: "node2", Score: 10}},
 | 
								expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 10}},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			test:      "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
 | 
								test:      "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
 | 
				
			||||||
@@ -182,7 +182,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
 | 
				
			|||||||
					used:     resources{0, 0},
 | 
										used:     resources{0, 0},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedPriorities: []schedulerapi.HostPriority{{Host: "node1", Score: 4}, {Host: "node2", Score: 5}},
 | 
								expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 5}},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			test:      "no resources requested, pods scheduled with resources (default - least requested nodes have priority)",
 | 
								test:      "no resources requested, pods scheduled with resources (default - least requested nodes have priority)",
 | 
				
			||||||
@@ -197,7 +197,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
 | 
				
			|||||||
					used:     resources{3000, 5000},
 | 
										used:     resources{3000, 5000},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedPriorities: []schedulerapi.HostPriority{{Host: "node1", Score: 4}, {Host: "node2", Score: 5}},
 | 
								expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 4}, {Name: "node2", Score: 5}},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -291,7 +291,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -310,7 +310,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "nothing scheduled, nothing requested",
 | 
								name:         "nothing scheduled, nothing requested",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -330,7 +330,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: extendedResourcePod1},
 | 
								pod:          &v1.Pod{Spec: extendedResourcePod1},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with less resources",
 | 
								name:         "resources requested, pods scheduled with less resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: noResources},
 | 
									{Spec: noResources},
 | 
				
			||||||
@@ -353,7 +353,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: extendedResourcePod1},
 | 
								pod:          &v1.Pod{Spec: extendedResourcePod1},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources, on node with existing pod running ",
 | 
								name:         "resources requested, pods scheduled with resources, on node with existing pod running ",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: machine2Pod},
 | 
									{Spec: machine2Pod},
 | 
				
			||||||
@@ -376,7 +376,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: extendedResourcePod2},
 | 
								pod:          &v1.Pod{Spec: extendedResourcePod2},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 10}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with more resources",
 | 
								name:         "resources requested, pods scheduled with more resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: noResources},
 | 
									{Spec: noResources},
 | 
				
			||||||
@@ -447,7 +447,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
 | 
				
			|||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -480,7 +480,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "nothing scheduled, nothing requested",
 | 
								name:         "nothing scheduled, nothing requested",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -514,7 +514,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: extnededResourcePod1},
 | 
								pod:          &v1.Pod{Spec: extnededResourcePod1},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 3}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with less resources",
 | 
								name:         "resources requested, pods scheduled with less resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: noResources},
 | 
									{Spec: noResources},
 | 
				
			||||||
@@ -550,7 +550,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: extnededResourcePod1},
 | 
								pod:          &v1.Pod{Spec: extnededResourcePod1},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 7}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with resources, on node with existing pod running ",
 | 
								name:         "resources requested, pods scheduled with resources, on node with existing pod running ",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: machine2Pod},
 | 
									{Spec: machine2Pod},
 | 
				
			||||||
@@ -601,7 +601,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			pod:          &v1.Pod{Spec: extnededResourcePod2},
 | 
								pod:          &v1.Pod{Spec: extnededResourcePod2},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
								nodes:        []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "resources requested, pods scheduled with more resources",
 | 
								name:         "resources requested, pods scheduled with more resources",
 | 
				
			||||||
			pods: []*v1.Pod{
 | 
								pods: []*v1.Pod{
 | 
				
			||||||
				{Spec: noResources},
 | 
									{Spec: noResources},
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -25,7 +25,7 @@ import (
 | 
				
			|||||||
	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
						v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
	priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
 | 
						priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -50,13 +50,13 @@ var DefaultRequestedRatioResources = ResourceToWeightMap{v1.ResourceMemory: 1, v
 | 
				
			|||||||
func (r *ResourceAllocationPriority) PriorityMap(
 | 
					func (r *ResourceAllocationPriority) PriorityMap(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	meta interface{},
 | 
						meta interface{},
 | 
				
			||||||
	nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
						nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if r.resourceToWeightMap == nil {
 | 
						if r.resourceToWeightMap == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("resources not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("resources not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	requested := make(ResourceToValueMap, len(r.resourceToWeightMap))
 | 
						requested := make(ResourceToValueMap, len(r.resourceToWeightMap))
 | 
				
			||||||
	allocatable := make(ResourceToValueMap, len(r.resourceToWeightMap))
 | 
						allocatable := make(ResourceToValueMap, len(r.resourceToWeightMap))
 | 
				
			||||||
@@ -90,8 +90,8 @@ func (r *ResourceAllocationPriority) PriorityMap(
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: score,
 | 
							Score: score,
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -20,7 +20,7 @@ import (
 | 
				
			|||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/klog"
 | 
						"k8s.io/klog"
 | 
				
			||||||
@@ -33,10 +33,10 @@ import (
 | 
				
			|||||||
// of the pod are satisfied, the node is assigned a score of 1.
 | 
					// of the pod are satisfied, the node is assigned a score of 1.
 | 
				
			||||||
// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
 | 
					// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
 | 
				
			||||||
// same scores assigned by one of least and most requested priority functions.
 | 
					// same scores assigned by one of least and most requested priority functions.
 | 
				
			||||||
func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	allocatableResources := nodeInfo.AllocatableResource()
 | 
						allocatableResources := nodeInfo.AllocatableResource()
 | 
				
			||||||
@@ -71,8 +71,8 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
 | 
				
			|||||||
		)
 | 
							)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: score,
 | 
							Score: score,
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,8 +22,7 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
					 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -102,37 +101,37 @@ func TestResourceLimitsPriority(t *testing.T) {
 | 
				
			|||||||
		// input pod
 | 
							// input pod
 | 
				
			||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: noResources},
 | 
								pod:          &v1.Pod{Spec: noResources},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}, {Host: "machine4", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}, {Name: "machine4", Score: 0}},
 | 
				
			||||||
			name:         "pod does not specify its resource limits",
 | 
								name:         "pod does not specify its resource limits",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuOnly},
 | 
								pod:          &v1.Pod{Spec: cpuOnly},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "pod only specifies  cpu limits",
 | 
								name:         "pod only specifies  cpu limits",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: memOnly},
 | 
								pod:          &v1.Pod{Spec: memOnly},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 1}},
 | 
				
			||||||
			name:         "pod only specifies  mem limits",
 | 
								name:         "pod only specifies  mem limits",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 1}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 1}},
 | 
				
			||||||
			name:         "pod specifies both cpu and  mem limits",
 | 
								name:         "pod specifies both cpu and  mem limits",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
								pod:          &v1.Pod{Spec: cpuAndMemory},
 | 
				
			||||||
			nodes:        []*v1.Node{makeNode("machine1", 0, 0)},
 | 
								nodes:        []*v1.Node{makeNode("machine1", 0, 0)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
 | 
				
			||||||
			name:         "node does not advertise its allocatables",
 | 
								name:         "node does not advertise its allocatables",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,6 +23,7 @@ import (
 | 
				
			|||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
						"k8s.io/apimachinery/pkg/labels"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	utilnode "k8s.io/kubernetes/pkg/util/node"
 | 
						utilnode "k8s.io/kubernetes/pkg/util/node"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -63,11 +64,11 @@ func NewSelectorSpreadPriority(
 | 
				
			|||||||
// It favors nodes that have fewer existing matching pods.
 | 
					// It favors nodes that have fewer existing matching pods.
 | 
				
			||||||
// i.e. it pushes the scheduler towards a node where there's the smallest number of
 | 
					// i.e. it pushes the scheduler towards a node where there's the smallest number of
 | 
				
			||||||
// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled.
 | 
					// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled.
 | 
				
			||||||
func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	var selectors []labels.Selector
 | 
						var selectors []labels.Selector
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	priorityMeta, ok := meta.(*priorityMetadata)
 | 
						priorityMeta, ok := meta.(*priorityMetadata)
 | 
				
			||||||
@@ -78,16 +79,16 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if len(selectors) == 0 {
 | 
						if len(selectors) == 0 {
 | 
				
			||||||
		return schedulerapi.HostPriority{
 | 
							return framework.NodeScore{
 | 
				
			||||||
			Host:  node.Name,
 | 
								Name:  node.Name,
 | 
				
			||||||
			Score: 0,
 | 
								Score: 0,
 | 
				
			||||||
		}, nil
 | 
							}, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	count := countMatchingPods(pod.Namespace, selectors, nodeInfo)
 | 
						count := countMatchingPods(pod.Namespace, selectors, nodeInfo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: int64(count),
 | 
							Score: int64(count),
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -96,7 +97,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
 | 
				
			|||||||
// based on the number of existing matching pods on the node
 | 
					// based on the number of existing matching pods on the node
 | 
				
			||||||
// where zone information is included on the nodes, it favors nodes
 | 
					// where zone information is included on the nodes, it favors nodes
 | 
				
			||||||
// in zones with fewer existing matching pods.
 | 
					// in zones with fewer existing matching pods.
 | 
				
			||||||
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
 | 
					func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error {
 | 
				
			||||||
	countsByZone := make(map[string]int64, 10)
 | 
						countsByZone := make(map[string]int64, 10)
 | 
				
			||||||
	maxCountByZone := int64(0)
 | 
						maxCountByZone := int64(0)
 | 
				
			||||||
	maxCountByNodeName := int64(0)
 | 
						maxCountByNodeName := int64(0)
 | 
				
			||||||
@@ -105,7 +106,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
 | 
				
			|||||||
		if result[i].Score > maxCountByNodeName {
 | 
							if result[i].Score > maxCountByNodeName {
 | 
				
			||||||
			maxCountByNodeName = result[i].Score
 | 
								maxCountByNodeName = result[i].Score
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
 | 
							zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Name].Node())
 | 
				
			||||||
		if zoneID == "" {
 | 
							if zoneID == "" {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -132,7 +133,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		// If there is zone information present, incorporate it
 | 
							// If there is zone information present, incorporate it
 | 
				
			||||||
		if haveZones {
 | 
							if haveZones {
 | 
				
			||||||
			zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
 | 
								zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Name].Node())
 | 
				
			||||||
			if zoneID != "" {
 | 
								if zoneID != "" {
 | 
				
			||||||
				zoneScore := MaxPriorityFloat64
 | 
									zoneScore := MaxPriorityFloat64
 | 
				
			||||||
				if maxCountByZone > 0 {
 | 
									if maxCountByZone > 0 {
 | 
				
			||||||
@@ -144,7 +145,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
 | 
				
			|||||||
		result[i].Score = int64(fScore)
 | 
							result[i].Score = int64(fScore)
 | 
				
			||||||
		if klog.V(10) {
 | 
							if klog.V(10) {
 | 
				
			||||||
			klog.Infof(
 | 
								klog.Infof(
 | 
				
			||||||
				"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int64(fScore),
 | 
									"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Name, int64(fScore),
 | 
				
			||||||
			)
 | 
								)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -210,12 +211,12 @@ func countMatchingPods(namespace string, selectors []labels.Selector, nodeInfo *
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
 | 
					// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
 | 
				
			||||||
// on given machine
 | 
					// on given machine
 | 
				
			||||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	var firstServiceSelector labels.Selector
 | 
						var firstServiceSelector labels.Selector
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	priorityMeta, ok := meta.(*priorityMetadata)
 | 
						priorityMeta, ok := meta.(*priorityMetadata)
 | 
				
			||||||
	if ok {
 | 
						if ok {
 | 
				
			||||||
@@ -230,15 +231,15 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	score := countMatchingPods(pod.Namespace, selectors, nodeInfo)
 | 
						score := countMatchingPods(pod.Namespace, selectors, nodeInfo)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: int64(score),
 | 
							Score: int64(score),
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
 | 
					// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
 | 
				
			||||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
 | 
					// The label to be considered is provided to the struct (ServiceAntiAffinity).
 | 
				
			||||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
 | 
					func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error {
 | 
				
			||||||
	var numServicePods int64
 | 
						var numServicePods int64
 | 
				
			||||||
	var label string
 | 
						var label string
 | 
				
			||||||
	podCounts := map[string]int64{}
 | 
						podCounts := map[string]int64{}
 | 
				
			||||||
@@ -247,20 +248,20 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for _, hostPriority := range result {
 | 
						for _, hostPriority := range result {
 | 
				
			||||||
		numServicePods += hostPriority.Score
 | 
							numServicePods += hostPriority.Score
 | 
				
			||||||
		if !labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Has(s.label) {
 | 
							if !labels.Set(nodeNameToInfo[hostPriority.Name].Node().Labels).Has(s.label) {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		label = labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Get(s.label)
 | 
							label = labels.Set(nodeNameToInfo[hostPriority.Name].Node().Labels).Get(s.label)
 | 
				
			||||||
		labelNodesStatus[hostPriority.Host] = label
 | 
							labelNodesStatus[hostPriority.Name] = label
 | 
				
			||||||
		podCounts[label] += hostPriority.Score
 | 
							podCounts[label] += hostPriority.Score
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	//score int - scale of 0-maxPriority
 | 
						//score int - scale of 0-maxPriority
 | 
				
			||||||
	// 0 being the lowest priority and maxPriority being the highest
 | 
						// 0 being the lowest priority and maxPriority being the highest
 | 
				
			||||||
	for i, hostPriority := range result {
 | 
						for i, hostPriority := range result {
 | 
				
			||||||
		label, ok := labelNodesStatus[hostPriority.Host]
 | 
							label, ok := labelNodesStatus[hostPriority.Name]
 | 
				
			||||||
		if !ok {
 | 
							if !ok {
 | 
				
			||||||
			result[i].Host = hostPriority.Host
 | 
								result[i].Name = hostPriority.Name
 | 
				
			||||||
			result[i].Score = 0
 | 
								result[i].Score = 0
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -269,7 +270,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, m
 | 
				
			|||||||
		if numServicePods > 0 {
 | 
							if numServicePods > 0 {
 | 
				
			||||||
			fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
 | 
								fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result[i].Host = hostPriority.Host
 | 
							result[i].Name = hostPriority.Name
 | 
				
			||||||
		result[i].Score = int64(fScore)
 | 
							result[i].Score = int64(fScore)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,13 +18,13 @@ package priorities
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"reflect"
 | 
						"reflect"
 | 
				
			||||||
	"sort"
 | 
					 | 
				
			||||||
	"testing"
 | 
						"testing"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	apps "k8s.io/api/apps/v1"
 | 
						apps "k8s.io/api/apps/v1"
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
						schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -61,20 +61,20 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
		rss          []*apps.ReplicaSet
 | 
							rss          []*apps.ReplicaSet
 | 
				
			||||||
		services     []*v1.Service
 | 
							services     []*v1.Service
 | 
				
			||||||
		sss          []*apps.StatefulSet
 | 
							sss          []*apps.StatefulSet
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          new(v1.Pod),
 | 
								pod:          new(v1.Pod),
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "nothing scheduled",
 | 
								name:         "nothing scheduled",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
 | 
								pod:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
 | 
				
			||||||
			pods:         []*v1.Pod{{Spec: zone1Spec}},
 | 
								pods:         []*v1.Pod{{Spec: zone1Spec}},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "no services",
 | 
								name:         "no services",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -82,7 +82,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			pods:         []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
 | 
								pods:         []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: schedulerapi.MaxPriority}},
 | 
				
			||||||
			name:         "different services",
 | 
								name:         "different services",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -93,7 +93,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "two pods, one service pod",
 | 
								name:         "two pods, one service pod",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -107,7 +107,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "five pods, one service pod in no namespace",
 | 
								name:         "five pods, one service pod in no namespace",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -120,7 +120,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "four pods, one service pod in default namespace",
 | 
								name:         "four pods, one service pod in default namespace",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -134,7 +134,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: schedulerapi.MaxPriority}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "five pods, one service pod in specific namespace",
 | 
								name:         "five pods, one service pod in specific namespace",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -146,7 +146,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "three pods, two service pods on different machines",
 | 
								name:         "three pods, two service pods on different machines",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -159,7 +159,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "four pods, three service pods",
 | 
								name:         "four pods, three service pods",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -171,7 +171,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "service with partial pod label matches",
 | 
								name:         "service with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -186,7 +186,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
				
			||||||
			// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
 | 
								// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
 | 
				
			||||||
			// do spreading pod2 and pod3 and not pod1.
 | 
								// do spreading pod2 and pod3 and not pod1.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "service with partial pod label matches with service and replication controller",
 | 
								name:         "service with partial pod label matches with service and replication controller",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -200,7 +200,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
				
			||||||
			rss:      []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
								rss:      []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
				
			||||||
			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
								// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "service with partial pod label matches with service and replica set",
 | 
								name:         "service with partial pod label matches with service and replica set",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -213,7 +213,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
				
			||||||
			sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
								sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "service with partial pod label matches with service and stateful set",
 | 
								name:         "service with partial pod label matches with service and stateful set",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -227,7 +227,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			rcs:      []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
 | 
								rcs:      []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
 | 
				
			||||||
			// Taken together Service and Replication Controller should match no pods.
 | 
								// Taken together Service and Replication Controller should match no pods.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}},
 | 
				
			||||||
			name:         "disjoined service and replication controller matches no pods",
 | 
								name:         "disjoined service and replication controller matches no pods",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -241,7 +241,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
 | 
				
			||||||
			rss:      []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
								rss:      []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
				
			||||||
			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
								// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}},
 | 
				
			||||||
			name:         "disjoined service and replica set matches no pods",
 | 
								name:         "disjoined service and replica set matches no pods",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -254,7 +254,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
 | 
								services:     []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
 | 
				
			||||||
			sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
								sss:          []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 10}, {Name: "machine2", Score: 10}},
 | 
				
			||||||
			name:         "disjoined service and stateful set matches no pods",
 | 
								name:         "disjoined service and stateful set matches no pods",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -267,7 +267,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes: []string{"machine1", "machine2"},
 | 
								nodes: []string{"machine1", "machine2"},
 | 
				
			||||||
			rcs:   []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
 | 
								rcs:   []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
 | 
				
			||||||
			// Both Nodes have one pod from the given RC, hence both get 0 score.
 | 
								// Both Nodes have one pod from the given RC, hence both get 0 score.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "Replication controller with partial pod label matches",
 | 
								name:         "Replication controller with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -280,7 +280,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes: []string{"machine1", "machine2"},
 | 
								nodes: []string{"machine1", "machine2"},
 | 
				
			||||||
			rss:   []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
								rss:   []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
				
			||||||
			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
								// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "Replica set with partial pod label matches",
 | 
								name:         "Replica set with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -293,7 +293,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes: []string{"machine1", "machine2"},
 | 
								nodes: []string{"machine1", "machine2"},
 | 
				
			||||||
			sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
								sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
 | 
				
			||||||
			// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
 | 
								// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
 | 
				
			||||||
			name:         "StatefulSet with partial pod label matches",
 | 
								name:         "StatefulSet with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -305,7 +305,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:        []string{"machine1", "machine2"},
 | 
								nodes:        []string{"machine1", "machine2"},
 | 
				
			||||||
			rcs:          []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
								rcs:          []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "Another replication controller with partial pod label matches",
 | 
								name:         "Another replication controller with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -318,7 +318,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes: []string{"machine1", "machine2"},
 | 
								nodes: []string{"machine1", "machine2"},
 | 
				
			||||||
			rss:   []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
 | 
								rss:   []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
 | 
				
			||||||
			// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
								// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "Another replication set with partial pod label matches",
 | 
								name:         "Another replication set with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -331,7 +331,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			nodes: []string{"machine1", "machine2"},
 | 
								nodes: []string{"machine1", "machine2"},
 | 
				
			||||||
			sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
 | 
								sss:   []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
 | 
				
			||||||
			// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
 | 
								// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
 | 
								expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 5}},
 | 
				
			||||||
			name:         "Another stateful set with partial pod label matches",
 | 
								name:         "Another stateful set with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -411,31 +411,31 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
		rss          []*apps.ReplicaSet
 | 
							rss          []*apps.ReplicaSet
 | 
				
			||||||
		services     []*v1.Service
 | 
							services     []*v1.Service
 | 
				
			||||||
		sss          []*apps.StatefulSet
 | 
							sss          []*apps.StatefulSet
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod: new(v1.Pod),
 | 
								pod: new(v1.Pod),
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "nothing scheduled",
 | 
								name: "nothing scheduled",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:  buildPod("", labels1, nil),
 | 
								pod:  buildPod("", labels1, nil),
 | 
				
			||||||
			pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
 | 
								pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "no services",
 | 
								name: "no services",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -443,13 +443,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			pod:      buildPod("", labels1, nil),
 | 
								pod:      buildPod("", labels1, nil),
 | 
				
			||||||
			pods:     []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
 | 
								pods:     []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "different services",
 | 
								name: "different services",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -460,13 +460,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				buildPod(nodeMachine1Zone2, labels2, nil),
 | 
									buildPod(nodeMachine1Zone2, labels2, nil),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone2, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "two pods, 0 matching",
 | 
								name: "two pods, 0 matching",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -477,13 +477,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				buildPod(nodeMachine1Zone2, labels1, nil),
 | 
									buildPod(nodeMachine1Zone2, labels1, nil),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
 | 
									{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
 | 
									{Name: nodeMachine2Zone2, Score: 3}, // Already have pod in zone
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine2Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine3Zone3, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "two pods, 1 matching (in z2)",
 | 
								name: "two pods, 1 matching (in z2)",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -497,13 +497,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				buildPod(nodeMachine2Zone3, labels1, nil),
 | 
									buildPod(nodeMachine2Zone3, labels1, nil),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine2Zone2, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: 6}, // Pod in zone
 | 
									{Name: nodeMachine1Zone3, Score: 6}, // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: 3}, // Pod on node
 | 
									{Name: nodeMachine2Zone3, Score: 3}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: 6}, // Pod in zone
 | 
									{Name: nodeMachine3Zone3, Score: 6}, // Pod in zone
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "five pods, 3 matching (z2=2, z3=1)",
 | 
								name: "five pods, 3 matching (z2=2, z3=1)",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -516,13 +516,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				buildPod(nodeMachine1Zone3, labels1, nil),
 | 
									buildPod(nodeMachine1Zone3, labels1, nil),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone1, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: 3}, // Pod in zone
 | 
									{Name: nodeMachine2Zone2, Score: 3}, // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone3, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
 | 
									{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
 | 
									{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
 | 
								name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -535,13 +535,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				buildPod(nodeMachine2Zone2, labels2, nil),
 | 
									buildPod(nodeMachine2Zone2, labels2, nil),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone1, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: 3}, // Pod in zone
 | 
									{Name: nodeMachine2Zone2, Score: 3}, // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: 0}, // Pod on node
 | 
									{Name: nodeMachine1Zone3, Score: 0}, // Pod on node
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: 3}, // Pod in zone
 | 
									{Name: nodeMachine2Zone3, Score: 3}, // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: 3}, // Pod in zone
 | 
									{Name: nodeMachine3Zone3, Score: 3}, // Pod in zone
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
 | 
								name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -553,7 +553,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
 | 
									buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
 | 
								rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				// Note that because we put two pods on the same node (nodeMachine1Zone3),
 | 
									// Note that because we put two pods on the same node (nodeMachine1Zone3),
 | 
				
			||||||
				// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
 | 
									// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
 | 
				
			||||||
				// However they kind of make sense; zone1 is still most-highly favored.
 | 
									// However they kind of make sense; zone1 is still most-highly favored.
 | 
				
			||||||
@@ -561,12 +561,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				// We would probably prefer to see a bigger gap between putting a second
 | 
									// We would probably prefer to see a bigger gap between putting a second
 | 
				
			||||||
				// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
 | 
									// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
 | 
				
			||||||
				// This is also consistent with what we have already.
 | 
									// This is also consistent with what we have already.
 | 
				
			||||||
				{Host: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone
 | 
									{Name: nodeMachine1Zone1, Score: schedulerapi.MaxPriority}, // No pods in zone
 | 
				
			||||||
				{Host: nodeMachine1Zone2, Score: 5},                        // Pod on node
 | 
									{Name: nodeMachine1Zone2, Score: 5},                        // Pod on node
 | 
				
			||||||
				{Host: nodeMachine2Zone2, Score: 6},                        // Pod in zone
 | 
									{Name: nodeMachine2Zone2, Score: 6},                        // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine1Zone3, Score: 0},                        // Two pods on node
 | 
									{Name: nodeMachine1Zone3, Score: 0},                        // Two pods on node
 | 
				
			||||||
				{Host: nodeMachine2Zone3, Score: 3},                        // Pod in zone
 | 
									{Name: nodeMachine2Zone3, Score: 3},                        // Pod in zone
 | 
				
			||||||
				{Host: nodeMachine3Zone3, Score: 3},                        // Pod in zone
 | 
									{Name: nodeMachine3Zone3, Score: 3},                        // Pod in zone
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			name: "Replication controller spreading (z1=0, z2=1, z3=2)",
 | 
								name: "Replication controller spreading (z1=0, z2=1, z3=2)",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -594,8 +594,8 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
 | 
				
			|||||||
				t.Errorf("unexpected error: %v", err)
 | 
									t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			// sort the two lists to avoid failures on account of different ordering
 | 
								// sort the two lists to avoid failures on account of different ordering
 | 
				
			||||||
			sort.Sort(test.expectedList)
 | 
								sortNodeScoreList(test.expectedList)
 | 
				
			||||||
			sort.Sort(list)
 | 
								sortNodeScoreList(list)
 | 
				
			||||||
			if !reflect.DeepEqual(test.expectedList, list) {
 | 
								if !reflect.DeepEqual(test.expectedList, list) {
 | 
				
			||||||
				t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
									t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -640,24 +640,24 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
		pods         []*v1.Pod
 | 
							pods         []*v1.Pod
 | 
				
			||||||
		nodes        map[string]map[string]string
 | 
							nodes        map[string]map[string]string
 | 
				
			||||||
		services     []*v1.Service
 | 
							services     []*v1.Service
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:   new(v1.Pod),
 | 
								pod:   new(v1.Pod),
 | 
				
			||||||
			nodes: labeledNodes,
 | 
								nodes: labeledNodes,
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
 | 
									{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "nothing scheduled",
 | 
								name: "nothing scheduled",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			pod:   &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
 | 
								pod:   &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
 | 
				
			||||||
			pods:  []*v1.Pod{{Spec: zone1Spec}},
 | 
								pods:  []*v1.Pod{{Spec: zone1Spec}},
 | 
				
			||||||
			nodes: labeledNodes,
 | 
								nodes: labeledNodes,
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
 | 
									{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "no services",
 | 
								name: "no services",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -665,9 +665,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			pods:     []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
 | 
								pods:     []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
 | 
									{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "different services",
 | 
								name: "different services",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -679,9 +679,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: schedulerapi.MaxPriority}, {Name: "machine12", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
 | 
									{Name: "machine21", Score: 0}, {Name: "machine22", Score: 0},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "three pods, one service pod",
 | 
								name: "three pods, one service pod",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -693,9 +693,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: 5}, {Name: "machine12", Score: 5},
 | 
				
			||||||
				{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
 | 
									{Name: "machine21", Score: 5}, {Name: "machine22", Score: 5},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "three pods, two service pods on different machines",
 | 
								name: "three pods, two service pods on different machines",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -708,9 +708,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: 0}, {Name: "machine12", Score: 0},
 | 
				
			||||||
				{Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority},
 | 
									{Name: "machine21", Score: schedulerapi.MaxPriority}, {Name: "machine22", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "three service label match pods in different namespaces",
 | 
								name: "three service label match pods in different namespaces",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -723,9 +723,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: 6}, {Name: "machine12", Score: 6},
 | 
				
			||||||
				{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
 | 
									{Name: "machine21", Score: 3}, {Name: "machine22", Score: 3},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "four pods, three service pods",
 | 
								name: "four pods, three service pods",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -737,9 +737,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: 3}, {Name: "machine12", Score: 3},
 | 
				
			||||||
				{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
 | 
									{Name: "machine21", Score: 6}, {Name: "machine22", Score: 6},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "service with partial pod label matches",
 | 
								name: "service with partial pod label matches",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -752,9 +752,9 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			},
 | 
								},
 | 
				
			||||||
			nodes:    labeledNodes,
 | 
								nodes:    labeledNodes,
 | 
				
			||||||
			services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
								services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
 | 
								expectedList: []framework.NodeScore{{Name: "machine11", Score: 7}, {Name: "machine12", Score: 7},
 | 
				
			||||||
				{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
 | 
									{Name: "machine21", Score: 5}, {Name: "machine22", Score: 5},
 | 
				
			||||||
				{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
 | 
									{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
 | 
				
			||||||
			name: "service pod on non-zoned node",
 | 
								name: "service pod on non-zoned node",
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -782,8 +782,8 @@ func TestZoneSpreadPriority(t *testing.T) {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			// sort the two lists to avoid failures on account of different ordering
 | 
								// sort the two lists to avoid failures on account of different ordering
 | 
				
			||||||
			sort.Sort(test.expectedList)
 | 
								sortNodeScoreList(test.expectedList)
 | 
				
			||||||
			sort.Sort(list)
 | 
								sortNodeScoreList(list)
 | 
				
			||||||
			if !reflect.DeepEqual(test.expectedList, list) {
 | 
								if !reflect.DeepEqual(test.expectedList, list) {
 | 
				
			||||||
				t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
									t.Errorf("expected %#v, got %#v", test.expectedList, list)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,6 +22,7 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
						v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -52,10 +53,10 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node
 | 
					// ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node
 | 
				
			||||||
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// To hold all the tolerations with Effect PreferNoSchedule
 | 
						// To hold all the tolerations with Effect PreferNoSchedule
 | 
				
			||||||
	var tolerationsPreferNoSchedule []v1.Toleration
 | 
						var tolerationsPreferNoSchedule []v1.Toleration
 | 
				
			||||||
@@ -66,8 +67,8 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *
 | 
				
			|||||||
		tolerationsPreferNoSchedule = getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
 | 
							tolerationsPreferNoSchedule = getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
 | 
							Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,6 +23,7 @@ import (
 | 
				
			|||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -53,7 +54,7 @@ func TestTaintAndToleration(t *testing.T) {
 | 
				
			|||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		pod          *v1.Pod
 | 
							pod          *v1.Pod
 | 
				
			||||||
		nodes        []*v1.Node
 | 
							nodes        []*v1.Node
 | 
				
			||||||
		expectedList schedulerapi.HostPriorityList
 | 
							expectedList framework.NodeScoreList
 | 
				
			||||||
		name         string
 | 
							name         string
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		// basic test case
 | 
							// basic test case
 | 
				
			||||||
@@ -77,9 +78,9 @@ func TestTaintAndToleration(t *testing.T) {
 | 
				
			|||||||
					Effect: v1.TaintEffectPreferNoSchedule,
 | 
										Effect: v1.TaintEffectPreferNoSchedule,
 | 
				
			||||||
				}}),
 | 
									}}),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: "nodeA", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeA", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeB", Score: 0},
 | 
									{Name: "nodeB", Score: 0},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// the count of taints that are tolerated by pod, does not matter.
 | 
							// the count of taints that are tolerated by pod, does not matter.
 | 
				
			||||||
@@ -119,10 +120,10 @@ func TestTaintAndToleration(t *testing.T) {
 | 
				
			|||||||
					},
 | 
										},
 | 
				
			||||||
				}),
 | 
									}),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: "nodeA", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeA", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeB", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeB", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeC", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeC", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// the count of taints on a node that are not tolerated by pod, matters.
 | 
							// the count of taints on a node that are not tolerated by pod, matters.
 | 
				
			||||||
@@ -155,10 +156,10 @@ func TestTaintAndToleration(t *testing.T) {
 | 
				
			|||||||
					},
 | 
										},
 | 
				
			||||||
				}),
 | 
									}),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: "nodeA", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeA", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeB", Score: 5},
 | 
									{Name: "nodeB", Score: 5},
 | 
				
			||||||
				{Host: "nodeC", Score: 0},
 | 
									{Name: "nodeC", Score: 0},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
 | 
							// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
 | 
				
			||||||
@@ -198,10 +199,10 @@ func TestTaintAndToleration(t *testing.T) {
 | 
				
			|||||||
					},
 | 
										},
 | 
				
			||||||
				}),
 | 
									}),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: "nodeA", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeA", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeB", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeB", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeC", Score: 0},
 | 
									{Name: "nodeC", Score: 0},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
@@ -219,9 +220,9 @@ func TestTaintAndToleration(t *testing.T) {
 | 
				
			|||||||
					},
 | 
										},
 | 
				
			||||||
				}),
 | 
									}),
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedList: []schedulerapi.HostPriority{
 | 
								expectedList: []framework.NodeScore{
 | 
				
			||||||
				{Host: "nodeA", Score: schedulerapi.MaxPriority},
 | 
									{Name: "nodeA", Score: schedulerapi.MaxPriority},
 | 
				
			||||||
				{Host: "nodeB", Score: 0},
 | 
									{Name: "nodeB", Score: 0},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -17,10 +17,12 @@ limitations under the License.
 | 
				
			|||||||
package priorities
 | 
					package priorities
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
 | 
						"sort"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -57,8 +59,8 @@ func makeNodeWithExtendedResource(node string, milliCPU, memory int64, extendedR
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}) PriorityFunction {
 | 
					func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction, metaData interface{}) PriorityFunction {
 | 
				
			||||||
	return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
						return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
		result := make(schedulerapi.HostPriorityList, 0, len(nodes))
 | 
							result := make(framework.NodeScoreList, 0, len(nodes))
 | 
				
			||||||
		for i := range nodes {
 | 
							for i := range nodes {
 | 
				
			||||||
			hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name])
 | 
								hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name])
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
@@ -74,3 +76,12 @@ func priorityFunction(mapFn PriorityMapFunction, reduceFn PriorityReduceFunction
 | 
				
			|||||||
		return result, nil
 | 
							return result, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func sortNodeScoreList(out framework.NodeScoreList) {
 | 
				
			||||||
 | 
						sort.Slice(out, func(i, j int) bool {
 | 
				
			||||||
 | 
							if out[i].Score == out[j].Score {
 | 
				
			||||||
 | 
								return out[i].Name < out[j].Name
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return out[i].Score < out[j].Score
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,20 +18,20 @@ package priorities
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PriorityMapFunction is a function that computes per-node results for a given node.
 | 
					// PriorityMapFunction is a function that computes per-node results for a given node.
 | 
				
			||||||
// TODO: Figure out the exact API of this method.
 | 
					// TODO: Figure out the exact API of this method.
 | 
				
			||||||
// TODO: Change interface{} to a specific type.
 | 
					// TODO: Change interface{} to a specific type.
 | 
				
			||||||
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error)
 | 
					type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PriorityReduceFunction is a function that aggregated per-node results and computes
 | 
					// PriorityReduceFunction is a function that aggregated per-node results and computes
 | 
				
			||||||
// final scores for all nodes.
 | 
					// final scores for all nodes.
 | 
				
			||||||
// TODO: Figure out the exact API of this method.
 | 
					// TODO: Figure out the exact API of this method.
 | 
				
			||||||
// TODO: Change interface{} to a specific type.
 | 
					// TODO: Change interface{} to a specific type.
 | 
				
			||||||
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
 | 
					type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PriorityMetadataProducer is a function that computes metadata for a given pod. This
 | 
					// PriorityMetadataProducer is a function that computes metadata for a given pod. This
 | 
				
			||||||
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
 | 
					// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
 | 
				
			||||||
@@ -40,7 +40,7 @@ type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*sched
 | 
				
			|||||||
// PriorityFunction is a function that computes scores for all nodes.
 | 
					// PriorityFunction is a function that computes scores for all nodes.
 | 
				
			||||||
// DEPRECATED
 | 
					// DEPRECATED
 | 
				
			||||||
// Use Map-Reduce pattern for priority functions.
 | 
					// Use Map-Reduce pattern for priority functions.
 | 
				
			||||||
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
 | 
					type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PriorityConfig is a config used for a priority function.
 | 
					// PriorityConfig is a config used for a priority function.
 | 
				
			||||||
type PriorityConfig struct {
 | 
					type PriorityConfig struct {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -18,7 +18,7 @@ package algorithm
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -34,12 +34,12 @@ type SchedulerExtender interface {
 | 
				
			|||||||
	// the list of failed nodes and failure reasons.
 | 
						// the list of failed nodes and failure reasons.
 | 
				
			||||||
	Filter(pod *v1.Pod,
 | 
						Filter(pod *v1.Pod,
 | 
				
			||||||
		nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
							nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
	) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
 | 
						) (filteredNodes []*v1.Node, failedNodesMap extenderv1.FailedNodesMap, err error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Prioritize based on extender-implemented priority functions. The returned scores & weight
 | 
						// Prioritize based on extender-implemented priority functions. The returned scores & weight
 | 
				
			||||||
	// are used to compute the weighted score for an extender. The weighted scores are added to
 | 
						// are used to compute the weighted score for an extender. The weighted scores are added to
 | 
				
			||||||
	// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
 | 
						// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
 | 
				
			||||||
	Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error)
 | 
						Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Bind delegates the action of binding a pod to a node to the extender.
 | 
						// Bind delegates the action of binding a pod to a node to the extender.
 | 
				
			||||||
	Bind(binding *v1.Binding) error
 | 
						Bind(binding *v1.Binding) error
 | 
				
			||||||
@@ -61,9 +61,9 @@ type SchedulerExtender interface {
 | 
				
			|||||||
	//   2. A different set of victim pod for every given candidate node after preemption phase of extender.
 | 
						//   2. A different set of victim pod for every given candidate node after preemption phase of extender.
 | 
				
			||||||
	ProcessPreemption(
 | 
						ProcessPreemption(
 | 
				
			||||||
		pod *v1.Pod,
 | 
							pod *v1.Pod,
 | 
				
			||||||
		nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
							nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
		nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
							nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
	) (map[*v1.Node]*schedulerapi.Victims, error)
 | 
						) (map[*v1.Node]*extenderv1.Victims, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// SupportsPreemption returns if the scheduler extender support preemption or not.
 | 
						// SupportsPreemption returns if the scheduler extender support preemption or not.
 | 
				
			||||||
	SupportsPreemption() bool
 | 
						SupportsPreemption() bool
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,6 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/types"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
@@ -247,114 +246,3 @@ type ExtenderConfig struct {
 | 
				
			|||||||
	// fail when the extender returns an error or is not reachable.
 | 
						// fail when the extender returns an error or is not reachable.
 | 
				
			||||||
	Ignorable bool
 | 
						Ignorable bool
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderPreemptionResult represents the result returned by preemption phase of extender.
 | 
					 | 
				
			||||||
type ExtenderPreemptionResult struct {
 | 
					 | 
				
			||||||
	NodeNameToMetaVictims map[string]*MetaVictims
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes.
 | 
					 | 
				
			||||||
type ExtenderPreemptionArgs struct {
 | 
					 | 
				
			||||||
	// Pod being scheduled
 | 
					 | 
				
			||||||
	Pod *v1.Pod
 | 
					 | 
				
			||||||
	// Victims map generated by scheduler preemption phase
 | 
					 | 
				
			||||||
	// Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims.
 | 
					 | 
				
			||||||
	NodeNameToVictims     map[string]*Victims
 | 
					 | 
				
			||||||
	NodeNameToMetaVictims map[string]*MetaVictims
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// Victims represents:
 | 
					 | 
				
			||||||
//   pods:  a group of pods expected to be preempted.
 | 
					 | 
				
			||||||
//   numPDBViolations: the count of violations of PodDisruptionBudget
 | 
					 | 
				
			||||||
type Victims struct {
 | 
					 | 
				
			||||||
	Pods             []*v1.Pod
 | 
					 | 
				
			||||||
	NumPDBViolations int64
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// MetaPod represent identifier for a v1.Pod
 | 
					 | 
				
			||||||
type MetaPod struct {
 | 
					 | 
				
			||||||
	UID string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// MetaVictims represents:
 | 
					 | 
				
			||||||
//   pods:  a group of pods expected to be preempted.
 | 
					 | 
				
			||||||
//     Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way.
 | 
					 | 
				
			||||||
//   numPDBViolations: the count of violations of PodDisruptionBudget
 | 
					 | 
				
			||||||
type MetaVictims struct {
 | 
					 | 
				
			||||||
	Pods             []*MetaPod
 | 
					 | 
				
			||||||
	NumPDBViolations int64
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
 | 
					 | 
				
			||||||
// nodes for a pod.
 | 
					 | 
				
			||||||
type ExtenderArgs struct {
 | 
					 | 
				
			||||||
	// Pod being scheduled
 | 
					 | 
				
			||||||
	Pod *v1.Pod
 | 
					 | 
				
			||||||
	// List of candidate nodes where the pod can be scheduled; to be populated
 | 
					 | 
				
			||||||
	// only if ExtenderConfig.NodeCacheCapable == false
 | 
					 | 
				
			||||||
	Nodes *v1.NodeList
 | 
					 | 
				
			||||||
	// List of candidate node names where the pod can be scheduled; to be
 | 
					 | 
				
			||||||
	// populated only if ExtenderConfig.NodeCacheCapable == true
 | 
					 | 
				
			||||||
	NodeNames *[]string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
 | 
					 | 
				
			||||||
type FailedNodesMap map[string]string
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderFilterResult represents the results of a filter call to an extender
 | 
					 | 
				
			||||||
type ExtenderFilterResult struct {
 | 
					 | 
				
			||||||
	// Filtered set of nodes where the pod can be scheduled; to be populated
 | 
					 | 
				
			||||||
	// only if ExtenderConfig.NodeCacheCapable == false
 | 
					 | 
				
			||||||
	Nodes *v1.NodeList
 | 
					 | 
				
			||||||
	// Filtered set of nodes where the pod can be scheduled; to be populated
 | 
					 | 
				
			||||||
	// only if ExtenderConfig.NodeCacheCapable == true
 | 
					 | 
				
			||||||
	NodeNames *[]string
 | 
					 | 
				
			||||||
	// Filtered out nodes where the pod can't be scheduled and the failure messages
 | 
					 | 
				
			||||||
	FailedNodes FailedNodesMap
 | 
					 | 
				
			||||||
	// Error message indicating failure
 | 
					 | 
				
			||||||
	Error string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node.
 | 
					 | 
				
			||||||
type ExtenderBindingArgs struct {
 | 
					 | 
				
			||||||
	// PodName is the name of the pod being bound
 | 
					 | 
				
			||||||
	PodName string
 | 
					 | 
				
			||||||
	// PodNamespace is the namespace of the pod being bound
 | 
					 | 
				
			||||||
	PodNamespace string
 | 
					 | 
				
			||||||
	// PodUID is the UID of the pod being bound
 | 
					 | 
				
			||||||
	PodUID types.UID
 | 
					 | 
				
			||||||
	// Node selected by the scheduler
 | 
					 | 
				
			||||||
	Node string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderBindingResult represents the result of binding of a pod to a node from an extender.
 | 
					 | 
				
			||||||
type ExtenderBindingResult struct {
 | 
					 | 
				
			||||||
	// Error message indicating failure
 | 
					 | 
				
			||||||
	Error string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// HostPriority represents the priority of scheduling to a particular host, higher priority is better.
 | 
					 | 
				
			||||||
type HostPriority struct {
 | 
					 | 
				
			||||||
	// Name of the host
 | 
					 | 
				
			||||||
	Host string
 | 
					 | 
				
			||||||
	// Score associated with the host
 | 
					 | 
				
			||||||
	Score int64
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// HostPriorityList declares a []HostPriority type.
 | 
					 | 
				
			||||||
type HostPriorityList []HostPriority
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (h HostPriorityList) Len() int {
 | 
					 | 
				
			||||||
	return len(h)
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (h HostPriorityList) Less(i, j int) bool {
 | 
					 | 
				
			||||||
	if h[i].Score == h[j].Score {
 | 
					 | 
				
			||||||
		return h[i].Host < h[j].Host
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return h[i].Score < h[j].Score
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (h HostPriorityList) Swap(i, j int) {
 | 
					 | 
				
			||||||
	h[i], h[j] = h[j], h[i]
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -22,7 +22,6 @@ import (
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	apiv1 "k8s.io/api/core/v1"
 | 
						apiv1 "k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/types"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 | 
					// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 | 
				
			||||||
@@ -242,114 +241,3 @@ type caseInsensitiveExtenderConfig *ExtenderConfig
 | 
				
			|||||||
func (t *ExtenderConfig) UnmarshalJSON(b []byte) error {
 | 
					func (t *ExtenderConfig) UnmarshalJSON(b []byte) error {
 | 
				
			||||||
	return gojson.Unmarshal(b, caseInsensitiveExtenderConfig(t))
 | 
						return gojson.Unmarshal(b, caseInsensitiveExtenderConfig(t))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
 | 
					 | 
				
			||||||
// nodes for a pod.
 | 
					 | 
				
			||||||
type ExtenderArgs struct {
 | 
					 | 
				
			||||||
	// Pod being scheduled
 | 
					 | 
				
			||||||
	Pod *apiv1.Pod `json:"pod"`
 | 
					 | 
				
			||||||
	// List of candidate nodes where the pod can be scheduled; to be populated
 | 
					 | 
				
			||||||
	// only if ExtenderConfig.NodeCacheCapable == false
 | 
					 | 
				
			||||||
	Nodes *apiv1.NodeList `json:"nodes,omitempty"`
 | 
					 | 
				
			||||||
	// List of candidate node names where the pod can be scheduled; to be
 | 
					 | 
				
			||||||
	// populated only if ExtenderConfig.NodeCacheCapable == true
 | 
					 | 
				
			||||||
	NodeNames *[]string `json:"nodenames,omitempty"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderPreemptionResult represents the result returned by preemption phase of extender.
 | 
					 | 
				
			||||||
type ExtenderPreemptionResult struct {
 | 
					 | 
				
			||||||
	NodeNameToMetaVictims map[string]*MetaVictims `json:"nodeNameToMetaVictims,omitempty"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes.
 | 
					 | 
				
			||||||
type ExtenderPreemptionArgs struct {
 | 
					 | 
				
			||||||
	// Pod being scheduled
 | 
					 | 
				
			||||||
	Pod *apiv1.Pod `json:"pod"`
 | 
					 | 
				
			||||||
	// Victims map generated by scheduler preemption phase
 | 
					 | 
				
			||||||
	// Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims.
 | 
					 | 
				
			||||||
	NodeNameToVictims     map[string]*Victims     `json:"nodeToVictims,omitempty"`
 | 
					 | 
				
			||||||
	NodeNameToMetaVictims map[string]*MetaVictims `json:"nodeNameToMetaVictims,omitempty"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// Victims represents:
 | 
					 | 
				
			||||||
//   pods:  a group of pods expected to be preempted.
 | 
					 | 
				
			||||||
//   numPDBViolations: the count of violations of PodDisruptionBudget
 | 
					 | 
				
			||||||
type Victims struct {
 | 
					 | 
				
			||||||
	Pods             []*apiv1.Pod `json:"pods"`
 | 
					 | 
				
			||||||
	NumPDBViolations int64        `json:"numPDBViolations"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// MetaPod represent identifier for a v1.Pod
 | 
					 | 
				
			||||||
type MetaPod struct {
 | 
					 | 
				
			||||||
	UID string `json:"uid"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// MetaVictims represents:
 | 
					 | 
				
			||||||
//   pods:  a group of pods expected to be preempted.
 | 
					 | 
				
			||||||
//     Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way.
 | 
					 | 
				
			||||||
//   numPDBViolations: the count of violations of PodDisruptionBudget
 | 
					 | 
				
			||||||
type MetaVictims struct {
 | 
					 | 
				
			||||||
	Pods             []*MetaPod `json:"pods"`
 | 
					 | 
				
			||||||
	NumPDBViolations int64      `json:"numPDBViolations"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
 | 
					 | 
				
			||||||
type FailedNodesMap map[string]string
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderFilterResult represents the results of a filter call to an extender
 | 
					 | 
				
			||||||
type ExtenderFilterResult struct {
 | 
					 | 
				
			||||||
	// Filtered set of nodes where the pod can be scheduled; to be populated
 | 
					 | 
				
			||||||
	// only if ExtenderConfig.NodeCacheCapable == false
 | 
					 | 
				
			||||||
	Nodes *apiv1.NodeList `json:"nodes,omitempty"`
 | 
					 | 
				
			||||||
	// Filtered set of nodes where the pod can be scheduled; to be populated
 | 
					 | 
				
			||||||
	// only if ExtenderConfig.NodeCacheCapable == true
 | 
					 | 
				
			||||||
	NodeNames *[]string `json:"nodenames,omitempty"`
 | 
					 | 
				
			||||||
	// Filtered out nodes where the pod can't be scheduled and the failure messages
 | 
					 | 
				
			||||||
	FailedNodes FailedNodesMap `json:"failedNodes,omitempty"`
 | 
					 | 
				
			||||||
	// Error message indicating failure
 | 
					 | 
				
			||||||
	Error string `json:"error,omitempty"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node.
 | 
					 | 
				
			||||||
type ExtenderBindingArgs struct {
 | 
					 | 
				
			||||||
	// PodName is the name of the pod being bound
 | 
					 | 
				
			||||||
	PodName string
 | 
					 | 
				
			||||||
	// PodNamespace is the namespace of the pod being bound
 | 
					 | 
				
			||||||
	PodNamespace string
 | 
					 | 
				
			||||||
	// PodUID is the UID of the pod being bound
 | 
					 | 
				
			||||||
	PodUID types.UID
 | 
					 | 
				
			||||||
	// Node selected by the scheduler
 | 
					 | 
				
			||||||
	Node string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// ExtenderBindingResult represents the result of binding of a pod to a node from an extender.
 | 
					 | 
				
			||||||
type ExtenderBindingResult struct {
 | 
					 | 
				
			||||||
	// Error message indicating failure
 | 
					 | 
				
			||||||
	Error string
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// HostPriority represents the priority of scheduling to a particular host, higher priority is better.
 | 
					 | 
				
			||||||
type HostPriority struct {
 | 
					 | 
				
			||||||
	// Name of the host
 | 
					 | 
				
			||||||
	Host string `json:"host"`
 | 
					 | 
				
			||||||
	// Score associated with the host
 | 
					 | 
				
			||||||
	Score int64 `json:"score"`
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// HostPriorityList declares a []HostPriority type.
 | 
					 | 
				
			||||||
type HostPriorityList []HostPriority
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (h HostPriorityList) Len() int {
 | 
					 | 
				
			||||||
	return len(h)
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (h HostPriorityList) Less(i, j int) bool {
 | 
					 | 
				
			||||||
	if h[i].Score == h[j].Score {
 | 
					 | 
				
			||||||
		return h[i].Host < h[j].Host
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return h[i].Score < h[j].Score
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (h HostPriorityList) Swap(i, j int) {
 | 
					 | 
				
			||||||
	h[i], h[j] = h[j], h[i]
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										317
									
								
								pkg/scheduler/api/v1/zz_generated.deepcopy.go
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										317
									
								
								pkg/scheduler/api/v1/zz_generated.deepcopy.go
									
									
									
										generated
									
									
									
								
							@@ -21,77 +21,9 @@ limitations under the License.
 | 
				
			|||||||
package v1
 | 
					package v1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	corev1 "k8s.io/api/core/v1"
 | 
						"k8s.io/apimachinery/pkg/runtime"
 | 
				
			||||||
	runtime "k8s.io/apimachinery/pkg/runtime"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pod != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pod, &out.Pod
 | 
					 | 
				
			||||||
		*out = new(corev1.Pod)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.Nodes != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Nodes, &out.Nodes
 | 
					 | 
				
			||||||
		*out = new(corev1.NodeList)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNames != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNames, &out.NodeNames
 | 
					 | 
				
			||||||
		*out = new([]string)
 | 
					 | 
				
			||||||
		if **in != nil {
 | 
					 | 
				
			||||||
			in, out := *in, *out
 | 
					 | 
				
			||||||
			*out = make([]string, len(*in))
 | 
					 | 
				
			||||||
			copy(*out, *in)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs.
 | 
					 | 
				
			||||||
func (in *ExtenderArgs) DeepCopy() *ExtenderArgs {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderArgs)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderBindingArgs)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderBindingResult)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
 | 
					func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -118,43 +50,6 @@ func (in *ExtenderConfig) DeepCopy() *ExtenderConfig {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Nodes != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Nodes, &out.Nodes
 | 
					 | 
				
			||||||
		*out = new(corev1.NodeList)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNames != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNames, &out.NodeNames
 | 
					 | 
				
			||||||
		*out = new([]string)
 | 
					 | 
				
			||||||
		if **in != nil {
 | 
					 | 
				
			||||||
			in, out := *in, *out
 | 
					 | 
				
			||||||
			*out = make([]string, len(*in))
 | 
					 | 
				
			||||||
			copy(*out, *in)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.FailedNodes != nil {
 | 
					 | 
				
			||||||
		in, out := &in.FailedNodes, &out.FailedNodes
 | 
					 | 
				
			||||||
		*out = make(FailedNodesMap, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			(*out)[key] = val
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult.
 | 
					 | 
				
			||||||
func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderFilterResult)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
 | 
					func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -171,88 +66,6 @@ func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pod != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pod, &out.Pod
 | 
					 | 
				
			||||||
		*out = new(corev1.Pod)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNameToVictims != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNameToVictims, &out.NodeNameToVictims
 | 
					 | 
				
			||||||
		*out = make(map[string]*Victims, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			var outVal *Victims
 | 
					 | 
				
			||||||
			if val == nil {
 | 
					 | 
				
			||||||
				(*out)[key] = nil
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				in, out := &val, &outVal
 | 
					 | 
				
			||||||
				*out = new(Victims)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			(*out)[key] = outVal
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNameToMetaVictims != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
 | 
					 | 
				
			||||||
		*out = make(map[string]*MetaVictims, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			var outVal *MetaVictims
 | 
					 | 
				
			||||||
			if val == nil {
 | 
					 | 
				
			||||||
				(*out)[key] = nil
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				in, out := &val, &outVal
 | 
					 | 
				
			||||||
				*out = new(MetaVictims)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			(*out)[key] = outVal
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionArgs.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionArgs) DeepCopy() *ExtenderPreemptionArgs {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderPreemptionArgs)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.NodeNameToMetaVictims != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
 | 
					 | 
				
			||||||
		*out = make(map[string]*MetaVictims, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			var outVal *MetaVictims
 | 
					 | 
				
			||||||
			if val == nil {
 | 
					 | 
				
			||||||
				(*out)[key] = nil
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				in, out := &val, &outVal
 | 
					 | 
				
			||||||
				*out = new(MetaVictims)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			(*out)[key] = outVal
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionResult.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionResult) DeepCopy() *ExtenderPreemptionResult {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderPreemptionResult)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) {
 | 
					func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -284,64 +97,6 @@ func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) {
 | 
					 | 
				
			||||||
	{
 | 
					 | 
				
			||||||
		in := &in
 | 
					 | 
				
			||||||
		*out = make(FailedNodesMap, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			(*out)[key] = val
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap.
 | 
					 | 
				
			||||||
func (in FailedNodesMap) DeepCopy() FailedNodesMap {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(FailedNodesMap)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return *out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *HostPriority) DeepCopyInto(out *HostPriority) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority.
 | 
					 | 
				
			||||||
func (in *HostPriority) DeepCopy() *HostPriority {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(HostPriority)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) {
 | 
					 | 
				
			||||||
	{
 | 
					 | 
				
			||||||
		in := &in
 | 
					 | 
				
			||||||
		*out = make(HostPriorityList, len(*in))
 | 
					 | 
				
			||||||
		copy(*out, *in)
 | 
					 | 
				
			||||||
		return
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList.
 | 
					 | 
				
			||||||
func (in HostPriorityList) DeepCopy() HostPriorityList {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(HostPriorityList)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return *out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
 | 
					func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -379,49 +134,6 @@ func (in *LabelsPresence) DeepCopy() *LabelsPresence {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *MetaPod) DeepCopyInto(out *MetaPod) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaPod.
 | 
					 | 
				
			||||||
func (in *MetaPod) DeepCopy() *MetaPod {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(MetaPod)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *MetaVictims) DeepCopyInto(out *MetaVictims) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pods != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pods, &out.Pods
 | 
					 | 
				
			||||||
		*out = make([]*MetaPod, len(*in))
 | 
					 | 
				
			||||||
		for i := range *in {
 | 
					 | 
				
			||||||
			if (*in)[i] != nil {
 | 
					 | 
				
			||||||
				in, out := &(*in)[i], &(*out)[i]
 | 
					 | 
				
			||||||
				*out = new(MetaPod)
 | 
					 | 
				
			||||||
				**out = **in
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaVictims.
 | 
					 | 
				
			||||||
func (in *MetaVictims) DeepCopy() *MetaVictims {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(MetaVictims)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *Policy) DeepCopyInto(out *Policy) {
 | 
					func (in *Policy) DeepCopyInto(out *Policy) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -661,30 +373,3 @@ func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint {
 | 
				
			|||||||
	in.DeepCopyInto(out)
 | 
						in.DeepCopyInto(out)
 | 
				
			||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *Victims) DeepCopyInto(out *Victims) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pods != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pods, &out.Pods
 | 
					 | 
				
			||||||
		*out = make([]*corev1.Pod, len(*in))
 | 
					 | 
				
			||||||
		for i := range *in {
 | 
					 | 
				
			||||||
			if (*in)[i] != nil {
 | 
					 | 
				
			||||||
				in, out := &(*in)[i], &(*out)[i]
 | 
					 | 
				
			||||||
				*out = new(corev1.Pod)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Victims.
 | 
					 | 
				
			||||||
func (in *Victims) DeepCopy() *Victims {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(Victims)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										317
									
								
								pkg/scheduler/api/zz_generated.deepcopy.go
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										317
									
								
								pkg/scheduler/api/zz_generated.deepcopy.go
									
									
									
										generated
									
									
									
								
							@@ -21,77 +21,9 @@ limitations under the License.
 | 
				
			|||||||
package api
 | 
					package api
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	v1 "k8s.io/api/core/v1"
 | 
						"k8s.io/apimachinery/pkg/runtime"
 | 
				
			||||||
	runtime "k8s.io/apimachinery/pkg/runtime"
 | 
					 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pod != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pod, &out.Pod
 | 
					 | 
				
			||||||
		*out = new(v1.Pod)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.Nodes != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Nodes, &out.Nodes
 | 
					 | 
				
			||||||
		*out = new(v1.NodeList)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNames != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNames, &out.NodeNames
 | 
					 | 
				
			||||||
		*out = new([]string)
 | 
					 | 
				
			||||||
		if **in != nil {
 | 
					 | 
				
			||||||
			in, out := *in, *out
 | 
					 | 
				
			||||||
			*out = make([]string, len(*in))
 | 
					 | 
				
			||||||
			copy(*out, *in)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs.
 | 
					 | 
				
			||||||
func (in *ExtenderArgs) DeepCopy() *ExtenderArgs {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderArgs)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderBindingArgs)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult.
 | 
					 | 
				
			||||||
func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderBindingResult)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
 | 
					func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -118,43 +50,6 @@ func (in *ExtenderConfig) DeepCopy() *ExtenderConfig {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Nodes != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Nodes, &out.Nodes
 | 
					 | 
				
			||||||
		*out = new(v1.NodeList)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNames != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNames, &out.NodeNames
 | 
					 | 
				
			||||||
		*out = new([]string)
 | 
					 | 
				
			||||||
		if **in != nil {
 | 
					 | 
				
			||||||
			in, out := *in, *out
 | 
					 | 
				
			||||||
			*out = make([]string, len(*in))
 | 
					 | 
				
			||||||
			copy(*out, *in)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.FailedNodes != nil {
 | 
					 | 
				
			||||||
		in, out := &in.FailedNodes, &out.FailedNodes
 | 
					 | 
				
			||||||
		*out = make(FailedNodesMap, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			(*out)[key] = val
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult.
 | 
					 | 
				
			||||||
func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderFilterResult)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
 | 
					func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -171,88 +66,6 @@ func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pod != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pod, &out.Pod
 | 
					 | 
				
			||||||
		*out = new(v1.Pod)
 | 
					 | 
				
			||||||
		(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNameToVictims != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNameToVictims, &out.NodeNameToVictims
 | 
					 | 
				
			||||||
		*out = make(map[string]*Victims, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			var outVal *Victims
 | 
					 | 
				
			||||||
			if val == nil {
 | 
					 | 
				
			||||||
				(*out)[key] = nil
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				in, out := &val, &outVal
 | 
					 | 
				
			||||||
				*out = new(Victims)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			(*out)[key] = outVal
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if in.NodeNameToMetaVictims != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
 | 
					 | 
				
			||||||
		*out = make(map[string]*MetaVictims, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			var outVal *MetaVictims
 | 
					 | 
				
			||||||
			if val == nil {
 | 
					 | 
				
			||||||
				(*out)[key] = nil
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				in, out := &val, &outVal
 | 
					 | 
				
			||||||
				*out = new(MetaVictims)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			(*out)[key] = outVal
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionArgs.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionArgs) DeepCopy() *ExtenderPreemptionArgs {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderPreemptionArgs)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.NodeNameToMetaVictims != nil {
 | 
					 | 
				
			||||||
		in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims
 | 
					 | 
				
			||||||
		*out = make(map[string]*MetaVictims, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			var outVal *MetaVictims
 | 
					 | 
				
			||||||
			if val == nil {
 | 
					 | 
				
			||||||
				(*out)[key] = nil
 | 
					 | 
				
			||||||
			} else {
 | 
					 | 
				
			||||||
				in, out := &val, &outVal
 | 
					 | 
				
			||||||
				*out = new(MetaVictims)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			(*out)[key] = outVal
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionResult.
 | 
					 | 
				
			||||||
func (in *ExtenderPreemptionResult) DeepCopy() *ExtenderPreemptionResult {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(ExtenderPreemptionResult)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) {
 | 
					func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -284,64 +97,6 @@ func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) {
 | 
					 | 
				
			||||||
	{
 | 
					 | 
				
			||||||
		in := &in
 | 
					 | 
				
			||||||
		*out = make(FailedNodesMap, len(*in))
 | 
					 | 
				
			||||||
		for key, val := range *in {
 | 
					 | 
				
			||||||
			(*out)[key] = val
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		return
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap.
 | 
					 | 
				
			||||||
func (in FailedNodesMap) DeepCopy() FailedNodesMap {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(FailedNodesMap)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return *out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *HostPriority) DeepCopyInto(out *HostPriority) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority.
 | 
					 | 
				
			||||||
func (in *HostPriority) DeepCopy() *HostPriority {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(HostPriority)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) {
 | 
					 | 
				
			||||||
	{
 | 
					 | 
				
			||||||
		in := &in
 | 
					 | 
				
			||||||
		*out = make(HostPriorityList, len(*in))
 | 
					 | 
				
			||||||
		copy(*out, *in)
 | 
					 | 
				
			||||||
		return
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList.
 | 
					 | 
				
			||||||
func (in HostPriorityList) DeepCopy() HostPriorityList {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(HostPriorityList)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return *out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
 | 
					func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -379,49 +134,6 @@ func (in *LabelsPresence) DeepCopy() *LabelsPresence {
 | 
				
			|||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *MetaPod) DeepCopyInto(out *MetaPod) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaPod.
 | 
					 | 
				
			||||||
func (in *MetaPod) DeepCopy() *MetaPod {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(MetaPod)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *MetaVictims) DeepCopyInto(out *MetaVictims) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pods != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pods, &out.Pods
 | 
					 | 
				
			||||||
		*out = make([]*MetaPod, len(*in))
 | 
					 | 
				
			||||||
		for i := range *in {
 | 
					 | 
				
			||||||
			if (*in)[i] != nil {
 | 
					 | 
				
			||||||
				in, out := &(*in)[i], &(*out)[i]
 | 
					 | 
				
			||||||
				*out = new(MetaPod)
 | 
					 | 
				
			||||||
				**out = **in
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaVictims.
 | 
					 | 
				
			||||||
func (in *MetaVictims) DeepCopy() *MetaVictims {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(MetaVictims)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
				
			||||||
func (in *Policy) DeepCopyInto(out *Policy) {
 | 
					func (in *Policy) DeepCopyInto(out *Policy) {
 | 
				
			||||||
	*out = *in
 | 
						*out = *in
 | 
				
			||||||
@@ -661,30 +373,3 @@ func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint {
 | 
				
			|||||||
	in.DeepCopyInto(out)
 | 
						in.DeepCopyInto(out)
 | 
				
			||||||
	return out
 | 
						return out
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | 
					 | 
				
			||||||
func (in *Victims) DeepCopyInto(out *Victims) {
 | 
					 | 
				
			||||||
	*out = *in
 | 
					 | 
				
			||||||
	if in.Pods != nil {
 | 
					 | 
				
			||||||
		in, out := &in.Pods, &out.Pods
 | 
					 | 
				
			||||||
		*out = make([]*v1.Pod, len(*in))
 | 
					 | 
				
			||||||
		for i := range *in {
 | 
					 | 
				
			||||||
			if (*in)[i] != nil {
 | 
					 | 
				
			||||||
				in, out := &(*in)[i], &(*out)[i]
 | 
					 | 
				
			||||||
				*out = new(v1.Pod)
 | 
					 | 
				
			||||||
				(*in).DeepCopyInto(*out)
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Victims.
 | 
					 | 
				
			||||||
func (in *Victims) DeepCopy() *Victims {
 | 
					 | 
				
			||||||
	if in == nil {
 | 
					 | 
				
			||||||
		return nil
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	out := new(Victims)
 | 
					 | 
				
			||||||
	in.DeepCopyInto(out)
 | 
					 | 
				
			||||||
	return out
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
							
								
								
									
										20
									
								
								pkg/scheduler/apis/extender/v1/doc.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								pkg/scheduler/apis/extender/v1/doc.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,20 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2019 The Kubernetes Authors.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// +k8s:deepcopy-gen=package
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Package v1 contains scheduler API objects.
 | 
				
			||||||
 | 
					package v1 // import "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
							
								
								
									
										118
									
								
								pkg/scheduler/apis/extender/v1/types.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								pkg/scheduler/apis/extender/v1/types.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,118 @@
 | 
				
			|||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2019 The Kubernetes Authors.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package v1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
 | 
						"k8s.io/apimachinery/pkg/types"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExtenderPreemptionResult represents the result returned by preemption phase of extender.
 | 
				
			||||||
 | 
					type ExtenderPreemptionResult struct {
 | 
				
			||||||
 | 
						NodeNameToMetaVictims map[string]*MetaVictims
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes.
 | 
				
			||||||
 | 
					type ExtenderPreemptionArgs struct {
 | 
				
			||||||
 | 
						// Pod being scheduled
 | 
				
			||||||
 | 
						Pod *v1.Pod
 | 
				
			||||||
 | 
						// Victims map generated by scheduler preemption phase
 | 
				
			||||||
 | 
						// Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims.
 | 
				
			||||||
 | 
						NodeNameToVictims     map[string]*Victims
 | 
				
			||||||
 | 
						NodeNameToMetaVictims map[string]*MetaVictims
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// Victims represents:
 | 
				
			||||||
 | 
					//   pods:  a group of pods expected to be preempted.
 | 
				
			||||||
 | 
					//   numPDBViolations: the count of violations of PodDisruptionBudget
 | 
				
			||||||
 | 
					type Victims struct {
 | 
				
			||||||
 | 
						Pods             []*v1.Pod
 | 
				
			||||||
 | 
						NumPDBViolations int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// MetaPod represent identifier for a v1.Pod
 | 
				
			||||||
 | 
					type MetaPod struct {
 | 
				
			||||||
 | 
						UID string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// MetaVictims represents:
 | 
				
			||||||
 | 
					//   pods:  a group of pods expected to be preempted.
 | 
				
			||||||
 | 
					//     Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way.
 | 
				
			||||||
 | 
					//   numPDBViolations: the count of violations of PodDisruptionBudget
 | 
				
			||||||
 | 
					type MetaVictims struct {
 | 
				
			||||||
 | 
						Pods             []*MetaPod
 | 
				
			||||||
 | 
						NumPDBViolations int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
 | 
				
			||||||
 | 
					// nodes for a pod.
 | 
				
			||||||
 | 
					type ExtenderArgs struct {
 | 
				
			||||||
 | 
						// Pod being scheduled
 | 
				
			||||||
 | 
						Pod *v1.Pod
 | 
				
			||||||
 | 
						// List of candidate nodes where the pod can be scheduled; to be populated
 | 
				
			||||||
 | 
						// only if ExtenderConfig.NodeCacheCapable == false
 | 
				
			||||||
 | 
						Nodes *v1.NodeList
 | 
				
			||||||
 | 
						// List of candidate node names where the pod can be scheduled; to be
 | 
				
			||||||
 | 
						// populated only if ExtenderConfig.NodeCacheCapable == true
 | 
				
			||||||
 | 
						NodeNames *[]string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// FailedNodesMap represents the filtered out nodes, with node names and failure messages
 | 
				
			||||||
 | 
					type FailedNodesMap map[string]string
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExtenderFilterResult represents the results of a filter call to an extender
 | 
				
			||||||
 | 
					type ExtenderFilterResult struct {
 | 
				
			||||||
 | 
						// Filtered set of nodes where the pod can be scheduled; to be populated
 | 
				
			||||||
 | 
						// only if ExtenderConfig.NodeCacheCapable == false
 | 
				
			||||||
 | 
						Nodes *v1.NodeList
 | 
				
			||||||
 | 
						// Filtered set of nodes where the pod can be scheduled; to be populated
 | 
				
			||||||
 | 
						// only if ExtenderConfig.NodeCacheCapable == true
 | 
				
			||||||
 | 
						NodeNames *[]string
 | 
				
			||||||
 | 
						// Filtered out nodes where the pod can't be scheduled and the failure messages
 | 
				
			||||||
 | 
						FailedNodes FailedNodesMap
 | 
				
			||||||
 | 
						// Error message indicating failure
 | 
				
			||||||
 | 
						Error string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node.
 | 
				
			||||||
 | 
					type ExtenderBindingArgs struct {
 | 
				
			||||||
 | 
						// PodName is the name of the pod being bound
 | 
				
			||||||
 | 
						PodName string
 | 
				
			||||||
 | 
						// PodNamespace is the namespace of the pod being bound
 | 
				
			||||||
 | 
						PodNamespace string
 | 
				
			||||||
 | 
						// PodUID is the UID of the pod being bound
 | 
				
			||||||
 | 
						PodUID types.UID
 | 
				
			||||||
 | 
						// Node selected by the scheduler
 | 
				
			||||||
 | 
						Node string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ExtenderBindingResult represents the result of binding of a pod to a node from an extender.
 | 
				
			||||||
 | 
					type ExtenderBindingResult struct {
 | 
				
			||||||
 | 
						// Error message indicating failure
 | 
				
			||||||
 | 
						Error string
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// HostPriority represents the priority of scheduling to a particular host, higher priority is better.
 | 
				
			||||||
 | 
					type HostPriority struct {
 | 
				
			||||||
 | 
						// Name of the host
 | 
				
			||||||
 | 
						Host string
 | 
				
			||||||
 | 
						// Score associated with the host
 | 
				
			||||||
 | 
						Score int64
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// HostPriorityList declares a []HostPriority type.
 | 
				
			||||||
 | 
					type HostPriorityList []HostPriority
 | 
				
			||||||
@@ -30,6 +30,7 @@ import (
 | 
				
			|||||||
	restclient "k8s.io/client-go/rest"
 | 
						restclient "k8s.io/client-go/rest"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -164,12 +165,12 @@ func (h *HTTPExtender) SupportsPreemption() bool {
 | 
				
			|||||||
// ProcessPreemption returns filtered candidate nodes and victims after running preemption logic in extender.
 | 
					// ProcessPreemption returns filtered candidate nodes and victims after running preemption logic in extender.
 | 
				
			||||||
func (h *HTTPExtender) ProcessPreemption(
 | 
					func (h *HTTPExtender) ProcessPreemption(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
						nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
						nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
 | 
					) (map[*v1.Node]*extenderv1.Victims, error) {
 | 
				
			||||||
	var (
 | 
						var (
 | 
				
			||||||
		result schedulerapi.ExtenderPreemptionResult
 | 
							result extenderv1.ExtenderPreemptionResult
 | 
				
			||||||
		args   *schedulerapi.ExtenderPreemptionArgs
 | 
							args   *extenderv1.ExtenderPreemptionArgs
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if !h.SupportsPreemption() {
 | 
						if !h.SupportsPreemption() {
 | 
				
			||||||
@@ -179,13 +180,13 @@ func (h *HTTPExtender) ProcessPreemption(
 | 
				
			|||||||
	if h.nodeCacheCapable {
 | 
						if h.nodeCacheCapable {
 | 
				
			||||||
		// If extender has cached node info, pass NodeNameToMetaVictims in args.
 | 
							// If extender has cached node info, pass NodeNameToMetaVictims in args.
 | 
				
			||||||
		nodeNameToMetaVictims := convertToNodeNameToMetaVictims(nodeToVictims)
 | 
							nodeNameToMetaVictims := convertToNodeNameToMetaVictims(nodeToVictims)
 | 
				
			||||||
		args = &schedulerapi.ExtenderPreemptionArgs{
 | 
							args = &extenderv1.ExtenderPreemptionArgs{
 | 
				
			||||||
			Pod:                   pod,
 | 
								Pod:                   pod,
 | 
				
			||||||
			NodeNameToMetaVictims: nodeNameToMetaVictims,
 | 
								NodeNameToMetaVictims: nodeNameToMetaVictims,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		nodeNameToVictims := convertToNodeNameToVictims(nodeToVictims)
 | 
							nodeNameToVictims := convertToNodeNameToVictims(nodeToVictims)
 | 
				
			||||||
		args = &schedulerapi.ExtenderPreemptionArgs{
 | 
							args = &extenderv1.ExtenderPreemptionArgs{
 | 
				
			||||||
			Pod:               pod,
 | 
								Pod:               pod,
 | 
				
			||||||
			NodeNameToVictims: nodeNameToVictims,
 | 
								NodeNameToVictims: nodeNameToVictims,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -208,12 +209,12 @@ func (h *HTTPExtender) ProcessPreemption(
 | 
				
			|||||||
// convertToNodeToVictims converts "nodeNameToMetaVictims" from object identifiers,
 | 
					// convertToNodeToVictims converts "nodeNameToMetaVictims" from object identifiers,
 | 
				
			||||||
// such as UIDs and names, to object pointers.
 | 
					// such as UIDs and names, to object pointers.
 | 
				
			||||||
func (h *HTTPExtender) convertToNodeToVictims(
 | 
					func (h *HTTPExtender) convertToNodeToVictims(
 | 
				
			||||||
	nodeNameToMetaVictims map[string]*schedulerapi.MetaVictims,
 | 
						nodeNameToMetaVictims map[string]*extenderv1.MetaVictims,
 | 
				
			||||||
	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
						nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
 | 
					) (map[*v1.Node]*extenderv1.Victims, error) {
 | 
				
			||||||
	nodeToVictims := map[*v1.Node]*schedulerapi.Victims{}
 | 
						nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
 | 
				
			||||||
	for nodeName, metaVictims := range nodeNameToMetaVictims {
 | 
						for nodeName, metaVictims := range nodeNameToMetaVictims {
 | 
				
			||||||
		victims := &schedulerapi.Victims{
 | 
							victims := &extenderv1.Victims{
 | 
				
			||||||
			Pods: []*v1.Pod{},
 | 
								Pods: []*v1.Pod{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		for _, metaPod := range metaVictims.Pods {
 | 
							for _, metaPod := range metaVictims.Pods {
 | 
				
			||||||
@@ -233,7 +234,7 @@ func (h *HTTPExtender) convertToNodeToVictims(
 | 
				
			|||||||
// It should return error if there's cache inconsistency between default scheduler and extender
 | 
					// It should return error if there's cache inconsistency between default scheduler and extender
 | 
				
			||||||
// so that this pod or node is missing from nodeNameToInfo.
 | 
					// so that this pod or node is missing from nodeNameToInfo.
 | 
				
			||||||
func (h *HTTPExtender) convertPodUIDToPod(
 | 
					func (h *HTTPExtender) convertPodUIDToPod(
 | 
				
			||||||
	metaPod *schedulerapi.MetaPod,
 | 
						metaPod *extenderv1.MetaPod,
 | 
				
			||||||
	nodeName string,
 | 
						nodeName string,
 | 
				
			||||||
	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) (*v1.Pod, error) {
 | 
						nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) (*v1.Pod, error) {
 | 
				
			||||||
	var nodeInfo *schedulernodeinfo.NodeInfo
 | 
						var nodeInfo *schedulernodeinfo.NodeInfo
 | 
				
			||||||
@@ -253,15 +254,15 @@ func (h *HTTPExtender) convertPodUIDToPod(
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// convertToNodeNameToMetaVictims converts from struct type to meta types.
 | 
					// convertToNodeNameToMetaVictims converts from struct type to meta types.
 | 
				
			||||||
func convertToNodeNameToMetaVictims(
 | 
					func convertToNodeNameToMetaVictims(
 | 
				
			||||||
	nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
						nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
) map[string]*schedulerapi.MetaVictims {
 | 
					) map[string]*extenderv1.MetaVictims {
 | 
				
			||||||
	nodeNameToVictims := map[string]*schedulerapi.MetaVictims{}
 | 
						nodeNameToVictims := map[string]*extenderv1.MetaVictims{}
 | 
				
			||||||
	for node, victims := range nodeToVictims {
 | 
						for node, victims := range nodeToVictims {
 | 
				
			||||||
		metaVictims := &schedulerapi.MetaVictims{
 | 
							metaVictims := &extenderv1.MetaVictims{
 | 
				
			||||||
			Pods: []*schedulerapi.MetaPod{},
 | 
								Pods: []*extenderv1.MetaPod{},
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		for _, pod := range victims.Pods {
 | 
							for _, pod := range victims.Pods {
 | 
				
			||||||
			metaPod := &schedulerapi.MetaPod{
 | 
								metaPod := &extenderv1.MetaPod{
 | 
				
			||||||
				UID: string(pod.UID),
 | 
									UID: string(pod.UID),
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			metaVictims.Pods = append(metaVictims.Pods, metaPod)
 | 
								metaVictims.Pods = append(metaVictims.Pods, metaPod)
 | 
				
			||||||
@@ -273,9 +274,9 @@ func convertToNodeNameToMetaVictims(
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// convertToNodeNameToVictims converts from node type to node name as key.
 | 
					// convertToNodeNameToVictims converts from node type to node name as key.
 | 
				
			||||||
func convertToNodeNameToVictims(
 | 
					func convertToNodeNameToVictims(
 | 
				
			||||||
	nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
						nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
) map[string]*schedulerapi.Victims {
 | 
					) map[string]*extenderv1.Victims {
 | 
				
			||||||
	nodeNameToVictims := map[string]*schedulerapi.Victims{}
 | 
						nodeNameToVictims := map[string]*extenderv1.Victims{}
 | 
				
			||||||
	for node, victims := range nodeToVictims {
 | 
						for node, victims := range nodeToVictims {
 | 
				
			||||||
		nodeNameToVictims[node.GetName()] = victims
 | 
							nodeNameToVictims[node.GetName()] = victims
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -288,17 +289,17 @@ func convertToNodeNameToVictims(
 | 
				
			|||||||
func (h *HTTPExtender) Filter(
 | 
					func (h *HTTPExtender) Filter(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
						nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
) ([]*v1.Node, schedulerapi.FailedNodesMap, error) {
 | 
					) ([]*v1.Node, extenderv1.FailedNodesMap, error) {
 | 
				
			||||||
	var (
 | 
						var (
 | 
				
			||||||
		result     schedulerapi.ExtenderFilterResult
 | 
							result     extenderv1.ExtenderFilterResult
 | 
				
			||||||
		nodeList   *v1.NodeList
 | 
							nodeList   *v1.NodeList
 | 
				
			||||||
		nodeNames  *[]string
 | 
							nodeNames  *[]string
 | 
				
			||||||
		nodeResult []*v1.Node
 | 
							nodeResult []*v1.Node
 | 
				
			||||||
		args       *schedulerapi.ExtenderArgs
 | 
							args       *extenderv1.ExtenderArgs
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if h.filterVerb == "" {
 | 
						if h.filterVerb == "" {
 | 
				
			||||||
		return nodes, schedulerapi.FailedNodesMap{}, nil
 | 
							return nodes, extenderv1.FailedNodesMap{}, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if h.nodeCacheCapable {
 | 
						if h.nodeCacheCapable {
 | 
				
			||||||
@@ -314,7 +315,7 @@ func (h *HTTPExtender) Filter(
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	args = &schedulerapi.ExtenderArgs{
 | 
						args = &extenderv1.ExtenderArgs{
 | 
				
			||||||
		Pod:       pod,
 | 
							Pod:       pod,
 | 
				
			||||||
		Nodes:     nodeList,
 | 
							Nodes:     nodeList,
 | 
				
			||||||
		NodeNames: nodeNames,
 | 
							NodeNames: nodeNames,
 | 
				
			||||||
@@ -351,18 +352,18 @@ func (h *HTTPExtender) Filter(
 | 
				
			|||||||
// Prioritize based on extender implemented priority functions. Weight*priority is added
 | 
					// Prioritize based on extender implemented priority functions. Weight*priority is added
 | 
				
			||||||
// up for each such priority function. The returned score is added to the score computed
 | 
					// up for each such priority function. The returned score is added to the score computed
 | 
				
			||||||
// by Kubernetes scheduler. The total score is used to do the host selection.
 | 
					// by Kubernetes scheduler. The total score is used to do the host selection.
 | 
				
			||||||
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) {
 | 
					func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.HostPriorityList, int64, error) {
 | 
				
			||||||
	var (
 | 
						var (
 | 
				
			||||||
		result    schedulerapi.HostPriorityList
 | 
							result    extenderv1.HostPriorityList
 | 
				
			||||||
		nodeList  *v1.NodeList
 | 
							nodeList  *v1.NodeList
 | 
				
			||||||
		nodeNames *[]string
 | 
							nodeNames *[]string
 | 
				
			||||||
		args      *schedulerapi.ExtenderArgs
 | 
							args      *extenderv1.ExtenderArgs
 | 
				
			||||||
	)
 | 
						)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if h.prioritizeVerb == "" {
 | 
						if h.prioritizeVerb == "" {
 | 
				
			||||||
		result := schedulerapi.HostPriorityList{}
 | 
							result := extenderv1.HostPriorityList{}
 | 
				
			||||||
		for _, node := range nodes {
 | 
							for _, node := range nodes {
 | 
				
			||||||
			result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: 0})
 | 
								result = append(result, extenderv1.HostPriority{Host: node.Name, Score: 0})
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return &result, 0, nil
 | 
							return &result, 0, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -380,7 +381,7 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	args = &schedulerapi.ExtenderArgs{
 | 
						args = &extenderv1.ExtenderArgs{
 | 
				
			||||||
		Pod:       pod,
 | 
							Pod:       pod,
 | 
				
			||||||
		Nodes:     nodeList,
 | 
							Nodes:     nodeList,
 | 
				
			||||||
		NodeNames: nodeNames,
 | 
							NodeNames: nodeNames,
 | 
				
			||||||
@@ -394,12 +395,12 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Bind delegates the action of binding a pod to a node to the extender.
 | 
					// Bind delegates the action of binding a pod to a node to the extender.
 | 
				
			||||||
func (h *HTTPExtender) Bind(binding *v1.Binding) error {
 | 
					func (h *HTTPExtender) Bind(binding *v1.Binding) error {
 | 
				
			||||||
	var result schedulerapi.ExtenderBindingResult
 | 
						var result extenderv1.ExtenderBindingResult
 | 
				
			||||||
	if !h.IsBinder() {
 | 
						if !h.IsBinder() {
 | 
				
			||||||
		// This shouldn't happen as this extender wouldn't have become a Binder.
 | 
							// This shouldn't happen as this extender wouldn't have become a Binder.
 | 
				
			||||||
		return fmt.Errorf("Unexpected empty bindVerb in extender")
 | 
							return fmt.Errorf("Unexpected empty bindVerb in extender")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	req := &schedulerapi.ExtenderBindingArgs{
 | 
						req := &extenderv1.ExtenderBindingArgs{
 | 
				
			||||||
		PodName:      binding.Name,
 | 
							PodName:      binding.Name,
 | 
				
			||||||
		PodNamespace: binding.Namespace,
 | 
							PodNamespace: binding.Namespace,
 | 
				
			||||||
		PodUID:       binding.UID,
 | 
							PodUID:       binding.UID,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -32,6 +32,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
						internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
				
			||||||
	internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
 | 
						internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
 | 
				
			||||||
@@ -41,7 +42,7 @@ import (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
 | 
					type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
 | 
				
			||||||
type priorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error)
 | 
					type priorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type priorityConfig struct {
 | 
					type priorityConfig struct {
 | 
				
			||||||
	function priorityFunc
 | 
						function priorityFunc
 | 
				
			||||||
@@ -74,42 +75,42 @@ func machine2PredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
 | 
				
			|||||||
	return false, nil
 | 
						return false, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func errorPrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) {
 | 
					func errorPrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) {
 | 
				
			||||||
	return &schedulerapi.HostPriorityList{}, fmt.Errorf("Some error")
 | 
						return &framework.NodeScoreList{}, fmt.Errorf("Some error")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func machine1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) {
 | 
					func machine1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) {
 | 
				
			||||||
	result := schedulerapi.HostPriorityList{}
 | 
						result := framework.NodeScoreList{}
 | 
				
			||||||
	for _, node := range nodes {
 | 
						for _, node := range nodes {
 | 
				
			||||||
		score := 1
 | 
							score := 1
 | 
				
			||||||
		if node.Name == "machine1" {
 | 
							if node.Name == "machine1" {
 | 
				
			||||||
			score = 10
 | 
								score = 10
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)})
 | 
							result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return &result, nil
 | 
						return &result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) {
 | 
					func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) {
 | 
				
			||||||
	result := schedulerapi.HostPriorityList{}
 | 
						result := framework.NodeScoreList{}
 | 
				
			||||||
	for _, node := range nodes {
 | 
						for _, node := range nodes {
 | 
				
			||||||
		score := 1
 | 
							score := 1
 | 
				
			||||||
		if node.Name == "machine2" {
 | 
							if node.Name == "machine2" {
 | 
				
			||||||
			score = 10
 | 
								score = 10
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)})
 | 
							result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return &result, nil
 | 
						return &result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	result := []schedulerapi.HostPriority{}
 | 
						result := []framework.NodeScore{}
 | 
				
			||||||
	for _, node := range nodes {
 | 
						for _, node := range nodes {
 | 
				
			||||||
		score := 1
 | 
							score := 1
 | 
				
			||||||
		if node.Name == "machine2" {
 | 
							if node.Name == "machine2" {
 | 
				
			||||||
			score = 10
 | 
								score = 10
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int64(score)})
 | 
							result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return result, nil
 | 
						return result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -142,10 +143,10 @@ func (f *FakeExtender) SupportsPreemption() bool {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func (f *FakeExtender) ProcessPreemption(
 | 
					func (f *FakeExtender) ProcessPreemption(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
						nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
						nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
 | 
					) (map[*v1.Node]*extenderv1.Victims, error) {
 | 
				
			||||||
	nodeToVictimsCopy := map[*v1.Node]*schedulerapi.Victims{}
 | 
						nodeToVictimsCopy := map[*v1.Node]*extenderv1.Victims{}
 | 
				
			||||||
	// We don't want to change the original nodeToVictims
 | 
						// We don't want to change the original nodeToVictims
 | 
				
			||||||
	for k, v := range nodeToVictims {
 | 
						for k, v := range nodeToVictims {
 | 
				
			||||||
		// In real world implementation, extender's user should have their own way to get node object
 | 
							// In real world implementation, extender's user should have their own way to get node object
 | 
				
			||||||
@@ -271,13 +272,13 @@ func (f *FakeExtender) runPredicate(pod *v1.Pod, node *v1.Node) (bool, error) {
 | 
				
			|||||||
	return fits, nil
 | 
						return fits, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) ([]*v1.Node, schedulerapi.FailedNodesMap, error) {
 | 
					func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) ([]*v1.Node, extenderv1.FailedNodesMap, error) {
 | 
				
			||||||
	filtered := []*v1.Node{}
 | 
						filtered := []*v1.Node{}
 | 
				
			||||||
	failedNodesMap := schedulerapi.FailedNodesMap{}
 | 
						failedNodesMap := extenderv1.FailedNodesMap{}
 | 
				
			||||||
	for _, node := range nodes {
 | 
						for _, node := range nodes {
 | 
				
			||||||
		fits, err := f.runPredicate(pod, node)
 | 
							fits, err := f.runPredicate(pod, node)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return []*v1.Node{}, schedulerapi.FailedNodesMap{}, err
 | 
								return []*v1.Node{}, extenderv1.FailedNodesMap{}, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if fits {
 | 
							if fits {
 | 
				
			||||||
			filtered = append(filtered, node)
 | 
								filtered = append(filtered, node)
 | 
				
			||||||
@@ -293,8 +294,8 @@ func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[
 | 
				
			|||||||
	return filtered, failedNodesMap, nil
 | 
						return filtered, failedNodesMap, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int64, error) {
 | 
					func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.HostPriorityList, int64, error) {
 | 
				
			||||||
	result := schedulerapi.HostPriorityList{}
 | 
						result := extenderv1.HostPriorityList{}
 | 
				
			||||||
	combinedScores := map[string]int64{}
 | 
						combinedScores := map[string]int64{}
 | 
				
			||||||
	for _, prioritizer := range f.prioritizers {
 | 
						for _, prioritizer := range f.prioritizers {
 | 
				
			||||||
		weight := prioritizer.weight
 | 
							weight := prioritizer.weight
 | 
				
			||||||
@@ -304,14 +305,14 @@ func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.
 | 
				
			|||||||
		priorityFunc := prioritizer.function
 | 
							priorityFunc := prioritizer.function
 | 
				
			||||||
		prioritizedList, err := priorityFunc(pod, nodes)
 | 
							prioritizedList, err := priorityFunc(pod, nodes)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return &schedulerapi.HostPriorityList{}, 0, err
 | 
								return &extenderv1.HostPriorityList{}, 0, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		for _, hostEntry := range *prioritizedList {
 | 
							for _, hostEntry := range *prioritizedList {
 | 
				
			||||||
			combinedScores[hostEntry.Host] += hostEntry.Score * weight
 | 
								combinedScores[hostEntry.Name] += hostEntry.Score * weight
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for host, score := range combinedScores {
 | 
						for host, score := range combinedScores {
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: host, Score: score})
 | 
							result = append(result, extenderv1.HostPriority{Host: host, Score: score})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return &result, f.weight, nil
 | 
						return &result, f.weight, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -41,6 +41,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
						internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
				
			||||||
	internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
 | 
						internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
 | 
				
			||||||
@@ -290,23 +291,23 @@ func (g *genericScheduler) Extenders() []algorithm.SchedulerExtender {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// selectHost takes a prioritized list of nodes and then picks one
 | 
					// selectHost takes a prioritized list of nodes and then picks one
 | 
				
			||||||
// in a reservoir sampling manner from the nodes that had the highest score.
 | 
					// in a reservoir sampling manner from the nodes that had the highest score.
 | 
				
			||||||
func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList) (string, error) {
 | 
					func (g *genericScheduler) selectHost(nodeScoreList framework.NodeScoreList) (string, error) {
 | 
				
			||||||
	if len(priorityList) == 0 {
 | 
						if len(nodeScoreList) == 0 {
 | 
				
			||||||
		return "", fmt.Errorf("empty priorityList")
 | 
							return "", fmt.Errorf("empty priorityList")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	maxScore := priorityList[0].Score
 | 
						maxScore := nodeScoreList[0].Score
 | 
				
			||||||
	selected := priorityList[0].Host
 | 
						selected := nodeScoreList[0].Name
 | 
				
			||||||
	cntOfMaxScore := 1
 | 
						cntOfMaxScore := 1
 | 
				
			||||||
	for _, hp := range priorityList[1:] {
 | 
						for _, ns := range nodeScoreList[1:] {
 | 
				
			||||||
		if hp.Score > maxScore {
 | 
							if ns.Score > maxScore {
 | 
				
			||||||
			maxScore = hp.Score
 | 
								maxScore = ns.Score
 | 
				
			||||||
			selected = hp.Host
 | 
								selected = ns.Name
 | 
				
			||||||
			cntOfMaxScore = 1
 | 
								cntOfMaxScore = 1
 | 
				
			||||||
		} else if hp.Score == maxScore {
 | 
							} else if ns.Score == maxScore {
 | 
				
			||||||
			cntOfMaxScore++
 | 
								cntOfMaxScore++
 | 
				
			||||||
			if rand.Intn(cntOfMaxScore) == 0 {
 | 
								if rand.Intn(cntOfMaxScore) == 0 {
 | 
				
			||||||
				// Replace the candidate with probability of 1/cntOfMaxScore
 | 
									// Replace the candidate with probability of 1/cntOfMaxScore
 | 
				
			||||||
				selected = hp.Host
 | 
									selected = ns.Name
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -386,8 +387,8 @@ func (g *genericScheduler) Preempt(pluginContext *framework.PluginContext, pod *
 | 
				
			|||||||
// processPreemptionWithExtenders processes preemption with extenders
 | 
					// processPreemptionWithExtenders processes preemption with extenders
 | 
				
			||||||
func (g *genericScheduler) processPreemptionWithExtenders(
 | 
					func (g *genericScheduler) processPreemptionWithExtenders(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
						nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
 | 
					) (map[*v1.Node]*extenderv1.Victims, error) {
 | 
				
			||||||
	if len(nodeToVictims) > 0 {
 | 
						if len(nodeToVictims) > 0 {
 | 
				
			||||||
		for _, extender := range g.extenders {
 | 
							for _, extender := range g.extenders {
 | 
				
			||||||
			if extender.SupportsPreemption() && extender.IsInterested(pod) {
 | 
								if extender.SupportsPreemption() && extender.IsInterested(pod) {
 | 
				
			||||||
@@ -705,12 +706,12 @@ func PrioritizeNodes(
 | 
				
			|||||||
	priorityConfigs []priorities.PriorityConfig,
 | 
						priorityConfigs []priorities.PriorityConfig,
 | 
				
			||||||
	nodes []*v1.Node,
 | 
						nodes []*v1.Node,
 | 
				
			||||||
	extenders []algorithm.SchedulerExtender,
 | 
						extenders []algorithm.SchedulerExtender,
 | 
				
			||||||
	framework framework.Framework,
 | 
						fwk framework.Framework,
 | 
				
			||||||
	pluginContext *framework.PluginContext) (schedulerapi.HostPriorityList, error) {
 | 
						pluginContext *framework.PluginContext) (framework.NodeScoreList, error) {
 | 
				
			||||||
	// If no priority configs are provided, then the EqualPriority function is applied
 | 
						// If no priority configs are provided, then the EqualPriority function is applied
 | 
				
			||||||
	// This is required to generate the priority list in the required format
 | 
						// This is required to generate the priority list in the required format
 | 
				
			||||||
	if len(priorityConfigs) == 0 && len(extenders) == 0 {
 | 
						if len(priorityConfigs) == 0 && len(extenders) == 0 {
 | 
				
			||||||
		result := make(schedulerapi.HostPriorityList, 0, len(nodes))
 | 
							result := make(framework.NodeScoreList, 0, len(nodes))
 | 
				
			||||||
		for i := range nodes {
 | 
							for i := range nodes {
 | 
				
			||||||
			hostPriority, err := EqualPriorityMap(pod, meta, nodeNameToInfo[nodes[i].Name])
 | 
								hostPriority, err := EqualPriorityMap(pod, meta, nodeNameToInfo[nodes[i].Name])
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
@@ -732,7 +733,7 @@ func PrioritizeNodes(
 | 
				
			|||||||
		errs = append(errs, err)
 | 
							errs = append(errs, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	results := make([]schedulerapi.HostPriorityList, len(priorityConfigs), len(priorityConfigs))
 | 
						results := make([]framework.NodeScoreList, len(priorityConfigs), len(priorityConfigs))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// DEPRECATED: we can remove this when all priorityConfigs implement the
 | 
						// DEPRECATED: we can remove this when all priorityConfigs implement the
 | 
				
			||||||
	// Map-Reduce pattern.
 | 
						// Map-Reduce pattern.
 | 
				
			||||||
@@ -748,7 +749,7 @@ func PrioritizeNodes(
 | 
				
			|||||||
				}
 | 
									}
 | 
				
			||||||
			}(i)
 | 
								}(i)
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			results[i] = make(schedulerapi.HostPriorityList, len(nodes))
 | 
								results[i] = make(framework.NodeScoreList, len(nodes))
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -763,7 +764,7 @@ func PrioritizeNodes(
 | 
				
			|||||||
			results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo)
 | 
								results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				appendError(err)
 | 
									appendError(err)
 | 
				
			||||||
				results[i][index].Host = nodes[index].Name
 | 
									results[i][index].Name = nodes[index].Name
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
@@ -780,7 +781,7 @@ func PrioritizeNodes(
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
			if klog.V(10) {
 | 
								if klog.V(10) {
 | 
				
			||||||
				for _, hostPriority := range results[index] {
 | 
									for _, hostPriority := range results[index] {
 | 
				
			||||||
					klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, priorityConfigs[index].Name, hostPriority.Score)
 | 
										klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Name, priorityConfigs[index].Name, hostPriority.Score)
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}(i)
 | 
							}(i)
 | 
				
			||||||
@@ -788,26 +789,26 @@ func PrioritizeNodes(
 | 
				
			|||||||
	// Wait for all computations to be finished.
 | 
						// Wait for all computations to be finished.
 | 
				
			||||||
	wg.Wait()
 | 
						wg.Wait()
 | 
				
			||||||
	if len(errs) != 0 {
 | 
						if len(errs) != 0 {
 | 
				
			||||||
		return schedulerapi.HostPriorityList{}, errors.NewAggregate(errs)
 | 
							return framework.NodeScoreList{}, errors.NewAggregate(errs)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Run the Score plugins.
 | 
						// Run the Score plugins.
 | 
				
			||||||
	scoresMap, scoreStatus := framework.RunScorePlugins(pluginContext, pod, nodes)
 | 
						scoresMap, scoreStatus := fwk.RunScorePlugins(pluginContext, pod, nodes)
 | 
				
			||||||
	if !scoreStatus.IsSuccess() {
 | 
						if !scoreStatus.IsSuccess() {
 | 
				
			||||||
		return schedulerapi.HostPriorityList{}, scoreStatus.AsError()
 | 
							return framework.NodeScoreList{}, scoreStatus.AsError()
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Summarize all scores.
 | 
						// Summarize all scores.
 | 
				
			||||||
	result := make(schedulerapi.HostPriorityList, 0, len(nodes))
 | 
						result := make(framework.NodeScoreList, 0, len(nodes))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for i := range nodes {
 | 
						for i := range nodes {
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: nodes[i].Name, Score: 0})
 | 
							result = append(result, framework.NodeScore{Name: nodes[i].Name, Score: 0})
 | 
				
			||||||
		for j := range priorityConfigs {
 | 
							for j := range priorityConfigs {
 | 
				
			||||||
			result[i].Score += results[j][i].Score * priorityConfigs[j].Weight
 | 
								result[i].Score += results[j][i].Score * priorityConfigs[j].Weight
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for j := range scoresMap {
 | 
							for j := range scoresMap {
 | 
				
			||||||
			result[i].Score += int64(scoresMap[j][i].Score)
 | 
								result[i].Score += scoresMap[j][i].Score
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -839,26 +840,26 @@ func PrioritizeNodes(
 | 
				
			|||||||
		// wait for all go routines to finish
 | 
							// wait for all go routines to finish
 | 
				
			||||||
		wg.Wait()
 | 
							wg.Wait()
 | 
				
			||||||
		for i := range result {
 | 
							for i := range result {
 | 
				
			||||||
			result[i].Score += combinedScores[result[i].Host]
 | 
								result[i].Score += combinedScores[result[i].Name]
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if klog.V(10) {
 | 
						if klog.V(10) {
 | 
				
			||||||
		for i := range result {
 | 
							for i := range result {
 | 
				
			||||||
			klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score)
 | 
								klog.Infof("Host %s => Score %d", result[i].Name, result[i].Score)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return result, nil
 | 
						return result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes
 | 
					// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes
 | 
				
			||||||
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	node := nodeInfo.Node()
 | 
						node := nodeInfo.Node()
 | 
				
			||||||
	if node == nil {
 | 
						if node == nil {
 | 
				
			||||||
		return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
 | 
							return framework.NodeScore{}, fmt.Errorf("node not found")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  node.Name,
 | 
							Name:  node.Name,
 | 
				
			||||||
		Score: 1,
 | 
							Score: 1,
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -874,7 +875,7 @@ func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.Node
 | 
				
			|||||||
// 6. If there are still ties, the first such node is picked (sort of randomly).
 | 
					// 6. If there are still ties, the first such node is picked (sort of randomly).
 | 
				
			||||||
// The 'minNodes1' and 'minNodes2' are being reused here to save the memory
 | 
					// The 'minNodes1' and 'minNodes2' are being reused here to save the memory
 | 
				
			||||||
// allocation and garbage collection time.
 | 
					// allocation and garbage collection time.
 | 
				
			||||||
func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) *v1.Node {
 | 
					func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*extenderv1.Victims) *v1.Node {
 | 
				
			||||||
	if len(nodesToVictims) == 0 {
 | 
						if len(nodesToVictims) == 0 {
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1012,8 +1013,8 @@ func (g *genericScheduler) selectNodesForPreemption(
 | 
				
			|||||||
	metadataProducer predicates.PredicateMetadataProducer,
 | 
						metadataProducer predicates.PredicateMetadataProducer,
 | 
				
			||||||
	queue internalqueue.SchedulingQueue,
 | 
						queue internalqueue.SchedulingQueue,
 | 
				
			||||||
	pdbs []*policy.PodDisruptionBudget,
 | 
						pdbs []*policy.PodDisruptionBudget,
 | 
				
			||||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
 | 
					) (map[*v1.Node]*extenderv1.Victims, error) {
 | 
				
			||||||
	nodeToVictims := map[*v1.Node]*schedulerapi.Victims{}
 | 
						nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
 | 
				
			||||||
	var resultLock sync.Mutex
 | 
						var resultLock sync.Mutex
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// We can use the same metadata producer for all nodes.
 | 
						// We can use the same metadata producer for all nodes.
 | 
				
			||||||
@@ -1029,7 +1030,7 @@ func (g *genericScheduler) selectNodesForPreemption(
 | 
				
			|||||||
			pluginContextClone, pod, metaCopy, nodeNameToInfo[nodeName], fitPredicates, queue, pdbs)
 | 
								pluginContextClone, pod, metaCopy, nodeNameToInfo[nodeName], fitPredicates, queue, pdbs)
 | 
				
			||||||
		if fits {
 | 
							if fits {
 | 
				
			||||||
			resultLock.Lock()
 | 
								resultLock.Lock()
 | 
				
			||||||
			victims := schedulerapi.Victims{
 | 
								victims := extenderv1.Victims{
 | 
				
			||||||
				Pods:             pods,
 | 
									Pods:             pods,
 | 
				
			||||||
				NumPDBViolations: int64(numPDBViolations),
 | 
									NumPDBViolations: int64(numPDBViolations),
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -41,6 +41,7 @@ import (
 | 
				
			|||||||
	priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
 | 
						priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
	schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
						schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
				
			||||||
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
						internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
				
			||||||
	internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
 | 
						internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
 | 
				
			||||||
@@ -79,25 +80,25 @@ func hasNoPodsPredicate(pod *v1.Pod, meta algorithmpredicates.PredicateMetadata,
 | 
				
			|||||||
	return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
 | 
						return false, []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	result := []schedulerapi.HostPriority{}
 | 
						result := []framework.NodeScore{}
 | 
				
			||||||
	for _, node := range nodes {
 | 
						for _, node := range nodes {
 | 
				
			||||||
		score, err := strconv.Atoi(node.Name)
 | 
							score, err := strconv.Atoi(node.Name)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{
 | 
							result = append(result, framework.NodeScore{
 | 
				
			||||||
			Host:  node.Name,
 | 
								Name:  node.Name,
 | 
				
			||||||
			Score: int64(score),
 | 
								Score: int64(score),
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return result, nil
 | 
						return result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	var maxScore float64
 | 
						var maxScore float64
 | 
				
			||||||
	minScore := math.MaxFloat64
 | 
						minScore := math.MaxFloat64
 | 
				
			||||||
	reverseResult := []schedulerapi.HostPriority{}
 | 
						reverseResult := []framework.NodeScore{}
 | 
				
			||||||
	result, err := numericPriority(pod, nodeNameToInfo, nodes)
 | 
						result, err := numericPriority(pod, nodeNameToInfo, nodes)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return nil, err
 | 
							return nil, err
 | 
				
			||||||
@@ -108,8 +109,8 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernod
 | 
				
			|||||||
		minScore = math.Min(minScore, float64(hostPriority.Score))
 | 
							minScore = math.Min(minScore, float64(hostPriority.Score))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for _, hostPriority := range result {
 | 
						for _, hostPriority := range result {
 | 
				
			||||||
		reverseResult = append(reverseResult, schedulerapi.HostPriority{
 | 
							reverseResult = append(reverseResult, framework.NodeScore{
 | 
				
			||||||
			Host:  hostPriority.Host,
 | 
								Name:  hostPriority.Name,
 | 
				
			||||||
			Score: int64(maxScore + minScore - float64(hostPriority.Score)),
 | 
								Score: int64(maxScore + minScore - float64(hostPriority.Score)),
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -117,20 +118,20 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernod
 | 
				
			|||||||
	return reverseResult, nil
 | 
						return reverseResult, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	return schedulerapi.HostPriority{
 | 
						return framework.NodeScore{
 | 
				
			||||||
		Host:  nodeInfo.Node().Name,
 | 
							Name:  nodeInfo.Node().Name,
 | 
				
			||||||
		Score: 1,
 | 
							Score: 1,
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
 | 
					func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
 | 
				
			||||||
	return schedulerapi.HostPriority{}, errPrioritize
 | 
						return framework.NodeScore{}, errPrioritize
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
 | 
					func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result framework.NodeScoreList) error {
 | 
				
			||||||
	for _, host := range result {
 | 
						for _, host := range result {
 | 
				
			||||||
		if host.Host == "" {
 | 
							if host.Name == "" {
 | 
				
			||||||
			return fmt.Errorf("unexpected empty host name")
 | 
								return fmt.Errorf("unexpected empty host name")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -189,45 +190,45 @@ func TestSelectHost(t *testing.T) {
 | 
				
			|||||||
	scheduler := genericScheduler{}
 | 
						scheduler := genericScheduler{}
 | 
				
			||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		name          string
 | 
							name          string
 | 
				
			||||||
		list          schedulerapi.HostPriorityList
 | 
							list          framework.NodeScoreList
 | 
				
			||||||
		possibleHosts sets.String
 | 
							possibleHosts sets.String
 | 
				
			||||||
		expectsErr    bool
 | 
							expectsErr    bool
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "unique properly ordered scores",
 | 
								name: "unique properly ordered scores",
 | 
				
			||||||
			list: []schedulerapi.HostPriority{
 | 
								list: []framework.NodeScore{
 | 
				
			||||||
				{Host: "machine1.1", Score: 1},
 | 
									{Name: "machine1.1", Score: 1},
 | 
				
			||||||
				{Host: "machine2.1", Score: 2},
 | 
									{Name: "machine2.1", Score: 2},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			possibleHosts: sets.NewString("machine2.1"),
 | 
								possibleHosts: sets.NewString("machine2.1"),
 | 
				
			||||||
			expectsErr:    false,
 | 
								expectsErr:    false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "equal scores",
 | 
								name: "equal scores",
 | 
				
			||||||
			list: []schedulerapi.HostPriority{
 | 
								list: []framework.NodeScore{
 | 
				
			||||||
				{Host: "machine1.1", Score: 1},
 | 
									{Name: "machine1.1", Score: 1},
 | 
				
			||||||
				{Host: "machine1.2", Score: 2},
 | 
									{Name: "machine1.2", Score: 2},
 | 
				
			||||||
				{Host: "machine1.3", Score: 2},
 | 
									{Name: "machine1.3", Score: 2},
 | 
				
			||||||
				{Host: "machine2.1", Score: 2},
 | 
									{Name: "machine2.1", Score: 2},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
 | 
								possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"),
 | 
				
			||||||
			expectsErr:    false,
 | 
								expectsErr:    false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name: "out of order scores",
 | 
								name: "out of order scores",
 | 
				
			||||||
			list: []schedulerapi.HostPriority{
 | 
								list: []framework.NodeScore{
 | 
				
			||||||
				{Host: "machine1.1", Score: 3},
 | 
									{Name: "machine1.1", Score: 3},
 | 
				
			||||||
				{Host: "machine1.2", Score: 3},
 | 
									{Name: "machine1.2", Score: 3},
 | 
				
			||||||
				{Host: "machine2.1", Score: 2},
 | 
									{Name: "machine2.1", Score: 2},
 | 
				
			||||||
				{Host: "machine3.1", Score: 1},
 | 
									{Name: "machine3.1", Score: 1},
 | 
				
			||||||
				{Host: "machine1.3", Score: 3},
 | 
									{Name: "machine1.3", Score: 3},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
 | 
								possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"),
 | 
				
			||||||
			expectsErr:    false,
 | 
								expectsErr:    false,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			name:          "empty priority list",
 | 
								name:          "empty priority list",
 | 
				
			||||||
			list:          []schedulerapi.HostPriority{},
 | 
								list:          []framework.NodeScore{},
 | 
				
			||||||
			possibleHosts: sets.NewString(),
 | 
								possibleHosts: sets.NewString(),
 | 
				
			||||||
			expectsErr:    true,
 | 
								expectsErr:    true,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
@@ -1011,7 +1012,7 @@ func TestZeroRequest(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func printNodeToVictims(nodeToVictims map[*v1.Node]*schedulerapi.Victims) string {
 | 
					func printNodeToVictims(nodeToVictims map[*v1.Node]*extenderv1.Victims) string {
 | 
				
			||||||
	var output string
 | 
						var output string
 | 
				
			||||||
	for node, victims := range nodeToVictims {
 | 
						for node, victims := range nodeToVictims {
 | 
				
			||||||
		output += node.Name + ": ["
 | 
							output += node.Name + ": ["
 | 
				
			||||||
@@ -1023,7 +1024,7 @@ func printNodeToVictims(nodeToVictims map[*v1.Node]*schedulerapi.Victims) string
 | 
				
			|||||||
	return output
 | 
						return output
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[*v1.Node]*schedulerapi.Victims) error {
 | 
					func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[*v1.Node]*extenderv1.Victims) error {
 | 
				
			||||||
	if len(expected) == len(nodeToPods) {
 | 
						if len(expected) == len(nodeToPods) {
 | 
				
			||||||
		for k, victims := range nodeToPods {
 | 
							for k, victims := range nodeToPods {
 | 
				
			||||||
			if expPods, ok := expected[k.Name]; ok {
 | 
								if expPods, ok := expected[k.Name]; ok {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -42,7 +42,8 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
	latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
 | 
						latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
 | 
				
			||||||
	config "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
						"k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
				
			||||||
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
 | 
						frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
 | 
				
			||||||
	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
						framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
						internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
 | 
				
			||||||
@@ -239,8 +240,8 @@ func PredicateFunc(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *sch
 | 
				
			|||||||
	return true, nil, nil
 | 
						return true, nil, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func PriorityFunc(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func PriorityFunc(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	return []schedulerapi.HostPriority{}, nil
 | 
						return []framework.NodeScore{}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestDefaultErrorFunc(t *testing.T) {
 | 
					func TestDefaultErrorFunc(t *testing.T) {
 | 
				
			||||||
@@ -523,9 +524,9 @@ func (f *fakeExtender) IsIgnorable() bool {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func (f *fakeExtender) ProcessPreemption(
 | 
					func (f *fakeExtender) ProcessPreemption(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodeToVictims map[*v1.Node]*schedulerapi.Victims,
 | 
						nodeToVictims map[*v1.Node]*extenderv1.Victims,
 | 
				
			||||||
	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
						nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
) (map[*v1.Node]*schedulerapi.Victims, error) {
 | 
					) (map[*v1.Node]*extenderv1.Victims, error) {
 | 
				
			||||||
	return nil, nil
 | 
						return nil, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -537,14 +538,14 @@ func (f *fakeExtender) Filter(
 | 
				
			|||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodes []*v1.Node,
 | 
						nodes []*v1.Node,
 | 
				
			||||||
	nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
						nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
 | 
				
			||||||
) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) {
 | 
					) (filteredNodes []*v1.Node, failedNodesMap extenderv1.FailedNodesMap, err error) {
 | 
				
			||||||
	return nil, nil, nil
 | 
						return nil, nil, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (f *fakeExtender) Prioritize(
 | 
					func (f *fakeExtender) Prioritize(
 | 
				
			||||||
	pod *v1.Pod,
 | 
						pod *v1.Pod,
 | 
				
			||||||
	nodes []*v1.Node,
 | 
						nodes []*v1.Node,
 | 
				
			||||||
) (hostPriorities *schedulerapi.HostPriorityList, weight int64, err error) {
 | 
					) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) {
 | 
				
			||||||
	return nil, 0, nil
 | 
						return nil, 0, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -403,7 +403,7 @@ func (f *framework) RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1.
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
			pluginToNodeScores[pl.Name()][index] = NodeScore{
 | 
								pluginToNodeScores[pl.Name()][index] = NodeScore{
 | 
				
			||||||
				Name:  nodeName,
 | 
									Name:  nodeName,
 | 
				
			||||||
				Score: score,
 | 
									Score: int64(score),
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
@@ -439,12 +439,12 @@ func (f *framework) RunScorePlugins(pc *PluginContext, pod *v1.Pod, nodes []*v1.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		for i, nodeScore := range nodeScoreList {
 | 
							for i, nodeScore := range nodeScoreList {
 | 
				
			||||||
			// return error if score plugin returns invalid score.
 | 
								// return error if score plugin returns invalid score.
 | 
				
			||||||
			if nodeScore.Score > MaxNodeScore || nodeScore.Score < MinNodeScore {
 | 
								if nodeScore.Score > int64(MaxNodeScore) || nodeScore.Score < int64(MinNodeScore) {
 | 
				
			||||||
				err := fmt.Errorf("score plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), nodeScore.Score, MinNodeScore, MaxNodeScore)
 | 
									err := fmt.Errorf("score plugin %q returns an invalid score %v, it should in the range of [%v, %v] after normalizing", pl.Name(), nodeScore.Score, MinNodeScore, MaxNodeScore)
 | 
				
			||||||
				errCh.SendErrorWithCancel(err, cancel)
 | 
									errCh.SendErrorWithCancel(err, cancel)
 | 
				
			||||||
				return
 | 
									return
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			nodeScoreList[i].Score = nodeScore.Score * weight
 | 
								nodeScoreList[i].Score = nodeScore.Score * int64(weight)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	if err := errCh.ReceiveError(); err != nil {
 | 
						if err := errCh.ReceiveError(); err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -473,10 +473,10 @@ func buildConfigWithWeights(weights map[string]int32, ps ...string) *config.Plug
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type injectedResult struct {
 | 
					type injectedResult struct {
 | 
				
			||||||
	ScoreRes     int  `json:"scoreRes,omitempty"`
 | 
						ScoreRes     int   `json:"scoreRes,omitempty"`
 | 
				
			||||||
	NormalizeRes int  `json:"normalizeRes,omitempty"`
 | 
						NormalizeRes int64 `json:"normalizeRes,omitempty"`
 | 
				
			||||||
	ScoreErr     bool `json:"scoreErr,omitempty"`
 | 
						ScoreErr     bool  `json:"scoreErr,omitempty"`
 | 
				
			||||||
	NormalizeErr bool `json:"normalizeErr,omitempty"`
 | 
						NormalizeErr bool  `json:"normalizeErr,omitempty"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func setScoreRes(inj injectedResult) (int, *Status) {
 | 
					func setScoreRes(inj injectedResult) (int, *Status) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -37,7 +37,7 @@ type NodeScoreList []NodeScore
 | 
				
			|||||||
// NodeScore is a struct with node name and score.
 | 
					// NodeScore is a struct with node name and score.
 | 
				
			||||||
type NodeScore struct {
 | 
					type NodeScore struct {
 | 
				
			||||||
	Name  string
 | 
						Name  string
 | 
				
			||||||
	Score int
 | 
						Score int64
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// PluginToNodeScores declares a map from plugin name to its NodeScoreList.
 | 
					// PluginToNodeScores declares a map from plugin name to its NodeScoreList.
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -144,8 +144,8 @@ func PredicateOne(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *sche
 | 
				
			|||||||
	return true, nil, nil
 | 
						return true, nil, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (framework.NodeScoreList, error) {
 | 
				
			||||||
	return []schedulerapi.HostPriority{}, nil
 | 
						return []framework.NodeScore{}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type mockScheduler struct {
 | 
					type mockScheduler struct {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -24,7 +24,7 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/klog"
 | 
						"k8s.io/klog"
 | 
				
			||||||
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						podutil "k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/api"
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair
 | 
					// GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair
 | 
				
			||||||
@@ -64,7 +64,7 @@ func GetPodStartTime(pod *v1.Pod) *metav1.Time {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// GetEarliestPodStartTime returns the earliest start time of all pods that
 | 
					// GetEarliestPodStartTime returns the earliest start time of all pods that
 | 
				
			||||||
// have the highest priority among all victims.
 | 
					// have the highest priority among all victims.
 | 
				
			||||||
func GetEarliestPodStartTime(victims *api.Victims) *metav1.Time {
 | 
					func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time {
 | 
				
			||||||
	if len(victims.Pods) == 0 {
 | 
						if len(victims.Pods) == 0 {
 | 
				
			||||||
		// should not reach here.
 | 
							// should not reach here.
 | 
				
			||||||
		klog.Errorf("victims.Pods is empty. Should not reach here.")
 | 
							klog.Errorf("victims.Pods is empty. Should not reach here.")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -26,7 +26,7 @@ import (
 | 
				
			|||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/diff"
 | 
						"k8s.io/apimachinery/pkg/util/diff"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/api/v1/pod"
 | 
						"k8s.io/kubernetes/pkg/api/v1/pod"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/api"
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// TestSortableList tests SortableList by storing pods in the list and sorting
 | 
					// TestSortableList tests SortableList by storing pods in the list and sorting
 | 
				
			||||||
@@ -209,7 +209,7 @@ func TestGetEarliestPodStartTime(t *testing.T) {
 | 
				
			|||||||
	pod1 := newPriorityPodWithStartTime("pod1", 1, currentTime.Add(time.Second))
 | 
						pod1 := newPriorityPodWithStartTime("pod1", 1, currentTime.Add(time.Second))
 | 
				
			||||||
	pod2 := newPriorityPodWithStartTime("pod2", 2, currentTime.Add(time.Second))
 | 
						pod2 := newPriorityPodWithStartTime("pod2", 2, currentTime.Add(time.Second))
 | 
				
			||||||
	pod3 := newPriorityPodWithStartTime("pod3", 2, currentTime)
 | 
						pod3 := newPriorityPodWithStartTime("pod3", 2, currentTime)
 | 
				
			||||||
	victims := &api.Victims{
 | 
						victims := &extenderv1.Victims{
 | 
				
			||||||
		Pods: []*v1.Pod{pod1, pod2, pod3},
 | 
							Pods: []*v1.Pod{pod1, pod2, pod3},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	startTime := GetEarliestPodStartTime(victims)
 | 
						startTime := GetEarliestPodStartTime(victims)
 | 
				
			||||||
@@ -220,7 +220,7 @@ func TestGetEarliestPodStartTime(t *testing.T) {
 | 
				
			|||||||
	pod1 = newPriorityPodWithStartTime("pod1", 2, currentTime)
 | 
						pod1 = newPriorityPodWithStartTime("pod1", 2, currentTime)
 | 
				
			||||||
	pod2 = newPriorityPodWithStartTime("pod2", 2, currentTime.Add(time.Second))
 | 
						pod2 = newPriorityPodWithStartTime("pod2", 2, currentTime.Add(time.Second))
 | 
				
			||||||
	pod3 = newPriorityPodWithStartTime("pod3", 2, currentTime.Add(2*time.Second))
 | 
						pod3 = newPriorityPodWithStartTime("pod3", 2, currentTime.Add(2*time.Second))
 | 
				
			||||||
	victims = &api.Victims{
 | 
						victims = &extenderv1.Victims{
 | 
				
			||||||
		Pods: []*v1.Pod{pod1, pod2, pod3},
 | 
							Pods: []*v1.Pod{pod1, pod2, pod3},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	startTime = GetEarliestPodStartTime(victims)
 | 
						startTime = GetEarliestPodStartTime(victims)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -34,6 +34,7 @@ import (
 | 
				
			|||||||
	clientset "k8s.io/client-go/kubernetes"
 | 
						clientset "k8s.io/client-go/kubernetes"
 | 
				
			||||||
	_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
 | 
						_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
						schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
				
			||||||
 | 
						extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
 | 
				
			||||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
						imageutils "k8s.io/kubernetes/test/utils/image"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -45,7 +46,7 @@ const (
 | 
				
			|||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
 | 
					type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
 | 
				
			||||||
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
 | 
					type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*extenderv1.HostPriorityList, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type priorityConfig struct {
 | 
					type priorityConfig struct {
 | 
				
			||||||
	function priorityFunc
 | 
						function priorityFunc
 | 
				
			||||||
@@ -67,7 +68,7 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
 | 
				
			|||||||
	encoder := json.NewEncoder(w)
 | 
						encoder := json.NewEncoder(w)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if strings.Contains(req.URL.Path, filter) || strings.Contains(req.URL.Path, prioritize) {
 | 
						if strings.Contains(req.URL.Path, filter) || strings.Contains(req.URL.Path, prioritize) {
 | 
				
			||||||
		var args schedulerapi.ExtenderArgs
 | 
							var args extenderv1.ExtenderArgs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if err := decoder.Decode(&args); err != nil {
 | 
							if err := decoder.Decode(&args); err != nil {
 | 
				
			||||||
			http.Error(w, "Decode error", http.StatusBadRequest)
 | 
								http.Error(w, "Decode error", http.StatusBadRequest)
 | 
				
			||||||
@@ -93,14 +94,14 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else if strings.Contains(req.URL.Path, bind) {
 | 
						} else if strings.Contains(req.URL.Path, bind) {
 | 
				
			||||||
		var args schedulerapi.ExtenderBindingArgs
 | 
							var args extenderv1.ExtenderBindingArgs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if err := decoder.Decode(&args); err != nil {
 | 
							if err := decoder.Decode(&args); err != nil {
 | 
				
			||||||
			http.Error(w, "Decode error", http.StatusBadRequest)
 | 
								http.Error(w, "Decode error", http.StatusBadRequest)
 | 
				
			||||||
			return
 | 
								return
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		resp := &schedulerapi.ExtenderBindingResult{}
 | 
							resp := &extenderv1.ExtenderBindingResult{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if err := e.Bind(&args); err != nil {
 | 
							if err := e.Bind(&args); err != nil {
 | 
				
			||||||
			resp.Error = err.Error()
 | 
								resp.Error = err.Error()
 | 
				
			||||||
@@ -114,19 +115,19 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (e *Extender) filterUsingNodeCache(args *schedulerapi.ExtenderArgs) (*schedulerapi.ExtenderFilterResult, error) {
 | 
					func (e *Extender) filterUsingNodeCache(args *extenderv1.ExtenderArgs) (*extenderv1.ExtenderFilterResult, error) {
 | 
				
			||||||
	nodeSlice := make([]string, 0)
 | 
						nodeSlice := make([]string, 0)
 | 
				
			||||||
	failedNodesMap := schedulerapi.FailedNodesMap{}
 | 
						failedNodesMap := extenderv1.FailedNodesMap{}
 | 
				
			||||||
	for _, nodeName := range *args.NodeNames {
 | 
						for _, nodeName := range *args.NodeNames {
 | 
				
			||||||
		fits := true
 | 
							fits := true
 | 
				
			||||||
		for _, predicate := range e.predicates {
 | 
							for _, predicate := range e.predicates {
 | 
				
			||||||
			fit, err := predicate(args.Pod,
 | 
								fit, err := predicate(args.Pod,
 | 
				
			||||||
				&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
 | 
									&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return &schedulerapi.ExtenderFilterResult{
 | 
									return &extenderv1.ExtenderFilterResult{
 | 
				
			||||||
					Nodes:       nil,
 | 
										Nodes:       nil,
 | 
				
			||||||
					NodeNames:   nil,
 | 
										NodeNames:   nil,
 | 
				
			||||||
					FailedNodes: schedulerapi.FailedNodesMap{},
 | 
										FailedNodes: extenderv1.FailedNodesMap{},
 | 
				
			||||||
					Error:       err.Error(),
 | 
										Error:       err.Error(),
 | 
				
			||||||
				}, err
 | 
									}, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -142,16 +143,16 @@ func (e *Extender) filterUsingNodeCache(args *schedulerapi.ExtenderArgs) (*sched
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return &schedulerapi.ExtenderFilterResult{
 | 
						return &extenderv1.ExtenderFilterResult{
 | 
				
			||||||
		Nodes:       nil,
 | 
							Nodes:       nil,
 | 
				
			||||||
		NodeNames:   &nodeSlice,
 | 
							NodeNames:   &nodeSlice,
 | 
				
			||||||
		FailedNodes: failedNodesMap,
 | 
							FailedNodes: failedNodesMap,
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.ExtenderFilterResult, error) {
 | 
					func (e *Extender) Filter(args *extenderv1.ExtenderArgs) (*extenderv1.ExtenderFilterResult, error) {
 | 
				
			||||||
	filtered := []v1.Node{}
 | 
						filtered := []v1.Node{}
 | 
				
			||||||
	failedNodesMap := schedulerapi.FailedNodesMap{}
 | 
						failedNodesMap := extenderv1.FailedNodesMap{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if e.nodeCacheCapable {
 | 
						if e.nodeCacheCapable {
 | 
				
			||||||
		return e.filterUsingNodeCache(args)
 | 
							return e.filterUsingNodeCache(args)
 | 
				
			||||||
@@ -162,10 +163,10 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
 | 
				
			|||||||
		for _, predicate := range e.predicates {
 | 
							for _, predicate := range e.predicates {
 | 
				
			||||||
			fit, err := predicate(args.Pod, &node)
 | 
								fit, err := predicate(args.Pod, &node)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return &schedulerapi.ExtenderFilterResult{
 | 
									return &extenderv1.ExtenderFilterResult{
 | 
				
			||||||
					Nodes:       &v1.NodeList{},
 | 
										Nodes:       &v1.NodeList{},
 | 
				
			||||||
					NodeNames:   nil,
 | 
										NodeNames:   nil,
 | 
				
			||||||
					FailedNodes: schedulerapi.FailedNodesMap{},
 | 
										FailedNodes: extenderv1.FailedNodesMap{},
 | 
				
			||||||
					Error:       err.Error(),
 | 
										Error:       err.Error(),
 | 
				
			||||||
				}, err
 | 
									}, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
@@ -181,15 +182,15 @@ func (e *Extender) Filter(args *schedulerapi.ExtenderArgs) (*schedulerapi.Extend
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return &schedulerapi.ExtenderFilterResult{
 | 
						return &extenderv1.ExtenderFilterResult{
 | 
				
			||||||
		Nodes:       &v1.NodeList{Items: filtered},
 | 
							Nodes:       &v1.NodeList{Items: filtered},
 | 
				
			||||||
		NodeNames:   nil,
 | 
							NodeNames:   nil,
 | 
				
			||||||
		FailedNodes: failedNodesMap,
 | 
							FailedNodes: failedNodesMap,
 | 
				
			||||||
	}, nil
 | 
						}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.HostPriorityList, error) {
 | 
					func (e *Extender) Prioritize(args *extenderv1.ExtenderArgs) (*extenderv1.HostPriorityList, error) {
 | 
				
			||||||
	result := schedulerapi.HostPriorityList{}
 | 
						result := extenderv1.HostPriorityList{}
 | 
				
			||||||
	combinedScores := map[string]int64{}
 | 
						combinedScores := map[string]int64{}
 | 
				
			||||||
	var nodes = &v1.NodeList{Items: []v1.Node{}}
 | 
						var nodes = &v1.NodeList{Items: []v1.Node{}}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -209,19 +210,19 @@ func (e *Extender) Prioritize(args *schedulerapi.ExtenderArgs) (*schedulerapi.Ho
 | 
				
			|||||||
		priorityFunc := prioritizer.function
 | 
							priorityFunc := prioritizer.function
 | 
				
			||||||
		prioritizedList, err := priorityFunc(args.Pod, nodes)
 | 
							prioritizedList, err := priorityFunc(args.Pod, nodes)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return &schedulerapi.HostPriorityList{}, err
 | 
								return &extenderv1.HostPriorityList{}, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		for _, hostEntry := range *prioritizedList {
 | 
							for _, hostEntry := range *prioritizedList {
 | 
				
			||||||
			combinedScores[hostEntry.Host] += hostEntry.Score * weight
 | 
								combinedScores[hostEntry.Host] += hostEntry.Score * weight
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for host, score := range combinedScores {
 | 
						for host, score := range combinedScores {
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{Host: host, Score: score})
 | 
							result = append(result, extenderv1.HostPriority{Host: host, Score: score})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return &result, nil
 | 
						return &result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (e *Extender) Bind(binding *schedulerapi.ExtenderBindingArgs) error {
 | 
					func (e *Extender) Bind(binding *extenderv1.ExtenderBindingArgs) error {
 | 
				
			||||||
	b := &v1.Binding{
 | 
						b := &v1.Binding{
 | 
				
			||||||
		ObjectMeta: metav1.ObjectMeta{Namespace: binding.PodNamespace, Name: binding.PodName, UID: binding.PodUID},
 | 
							ObjectMeta: metav1.ObjectMeta{Namespace: binding.PodNamespace, Name: binding.PodName, UID: binding.PodUID},
 | 
				
			||||||
		Target: v1.ObjectReference{
 | 
							Target: v1.ObjectReference{
 | 
				
			||||||
@@ -247,14 +248,14 @@ func machine2_3_5Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
 | 
				
			|||||||
	return false, nil
 | 
						return false, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
 | 
					func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*extenderv1.HostPriorityList, error) {
 | 
				
			||||||
	result := schedulerapi.HostPriorityList{}
 | 
						result := extenderv1.HostPriorityList{}
 | 
				
			||||||
	for _, node := range nodes.Items {
 | 
						for _, node := range nodes.Items {
 | 
				
			||||||
		score := 1
 | 
							score := 1
 | 
				
			||||||
		if node.Name == "machine2" {
 | 
							if node.Name == "machine2" {
 | 
				
			||||||
			score = 10
 | 
								score = 10
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{
 | 
							result = append(result, extenderv1.HostPriority{
 | 
				
			||||||
			Host:  node.Name,
 | 
								Host:  node.Name,
 | 
				
			||||||
			Score: int64(score),
 | 
								Score: int64(score),
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
@@ -262,14 +263,14 @@ func machine2Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPri
 | 
				
			|||||||
	return &result, nil
 | 
						return &result, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
 | 
					func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*extenderv1.HostPriorityList, error) {
 | 
				
			||||||
	result := schedulerapi.HostPriorityList{}
 | 
						result := extenderv1.HostPriorityList{}
 | 
				
			||||||
	for _, node := range nodes.Items {
 | 
						for _, node := range nodes.Items {
 | 
				
			||||||
		score := 1
 | 
							score := 1
 | 
				
			||||||
		if node.Name == "machine3" {
 | 
							if node.Name == "machine3" {
 | 
				
			||||||
			score = 10
 | 
								score = 10
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		result = append(result, schedulerapi.HostPriority{
 | 
							result = append(result, extenderv1.HostPriority{
 | 
				
			||||||
			Host:  node.Name,
 | 
								Host:  node.Name,
 | 
				
			||||||
			Score: int64(score),
 | 
								Score: int64(score),
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -39,10 +39,10 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler"
 | 
						"k8s.io/kubernetes/pkg/scheduler"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
						"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
				
			||||||
	_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
 | 
						_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
 | 
				
			||||||
	schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
 | 
					 | 
				
			||||||
	kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
						kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/scheduler/factory"
 | 
						"k8s.io/kubernetes/pkg/scheduler/factory"
 | 
				
			||||||
	schedulerplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
 | 
						schedulerplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
 | 
				
			||||||
 | 
						schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
 | 
				
			||||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
						schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
				
			||||||
	"k8s.io/kubernetes/test/integration/framework"
 | 
						"k8s.io/kubernetes/test/integration/framework"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -62,12 +62,12 @@ func PredicateTwo(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo *sche
 | 
				
			|||||||
	return true, nil, nil
 | 
						return true, nil, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerframework.NodeScoreList, error) {
 | 
				
			||||||
	return []schedulerapi.HostPriority{}, nil
 | 
						return []schedulerframework.NodeScore{}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
 | 
					func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerframework.NodeScoreList, error) {
 | 
				
			||||||
	return []schedulerapi.HostPriority{}, nil
 | 
						return []schedulerframework.NodeScore{}, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// TestSchedulerCreationFromConfigMap verifies that scheduler can be created
 | 
					// TestSchedulerCreationFromConfigMap verifies that scheduler can be created
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user