mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Enable paralellism in scheduler unit tests
Unit tests run in parallel at the package level. This change allows the execution of sig/scheduling unit tests in parallel. Signed-off-by: Victor Morales <v.morales@samsung.com>
This commit is contained in:
		@@ -48,6 +48,7 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestNodeAllocatableChanged(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	newQuantity := func(value int64) resource.Quantity {
 | 
			
		||||
		return *resource.NewQuantity(value, resource.BinarySI)
 | 
			
		||||
	}
 | 
			
		||||
@@ -70,7 +71,9 @@ func TestNodeAllocatableChanged(t *testing.T) {
 | 
			
		||||
			NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024), v1.ResourceStorage: newQuantity(1024)},
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.Name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.OldAllocatable}}
 | 
			
		||||
			newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.NewAllocatable}}
 | 
			
		||||
			changed := nodeAllocatableChanged(newNode, oldNode)
 | 
			
		||||
@@ -82,6 +85,7 @@ func TestNodeAllocatableChanged(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNodeLabelsChanged(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	for _, test := range []struct {
 | 
			
		||||
		Name      string
 | 
			
		||||
		Changed   bool
 | 
			
		||||
@@ -102,7 +106,9 @@ func TestNodeLabelsChanged(t *testing.T) {
 | 
			
		||||
			NewLabels: map[string]string{"foo": "bar", "test": "value"},
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.Name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.OldLabels}}
 | 
			
		||||
			newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.NewLabels}}
 | 
			
		||||
			changed := nodeLabelsChanged(newNode, oldNode)
 | 
			
		||||
@@ -114,6 +120,7 @@ func TestNodeLabelsChanged(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNodeTaintsChanged(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	for _, test := range []struct {
 | 
			
		||||
		Name      string
 | 
			
		||||
		Changed   bool
 | 
			
		||||
@@ -133,7 +140,9 @@ func TestNodeTaintsChanged(t *testing.T) {
 | 
			
		||||
			NewTaints: []v1.Taint{{Key: "key", Value: "value2"}},
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.Name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.OldTaints}}
 | 
			
		||||
			newNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.NewTaints}}
 | 
			
		||||
			changed := nodeTaintsChanged(newNode, oldNode)
 | 
			
		||||
@@ -145,6 +154,7 @@ func TestNodeTaintsChanged(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNodeConditionsChanged(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	nodeConditionType := reflect.TypeOf(v1.NodeCondition{})
 | 
			
		||||
	if nodeConditionType.NumField() != 6 {
 | 
			
		||||
		t.Errorf("NodeCondition type has changed. The nodeConditionsChanged() function must be reevaluated.")
 | 
			
		||||
@@ -187,7 +197,9 @@ func TestNodeConditionsChanged(t *testing.T) {
 | 
			
		||||
			NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.Name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.OldConditions}}
 | 
			
		||||
			newNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.NewConditions}}
 | 
			
		||||
			changed := nodeConditionsChanged(newNode, oldNode)
 | 
			
		||||
@@ -199,6 +211,7 @@ func TestNodeConditionsChanged(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestUpdatePodInCache(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	ttl := 10 * time.Second
 | 
			
		||||
	nodeName := "node"
 | 
			
		||||
 | 
			
		||||
@@ -219,7 +232,9 @@ func TestUpdatePodInCache(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			sched := &Scheduler{
 | 
			
		||||
@@ -251,6 +266,7 @@ func withPodName(pod *v1.Pod, name string) *v1.Pod {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPreCheckForNode(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	cpu4 := map[v1.ResourceName]string{v1.ResourceCPU: "4"}
 | 
			
		||||
	cpu8 := map[v1.ResourceName]string{v1.ResourceCPU: "8"}
 | 
			
		||||
	cpu16 := map[v1.ResourceName]string{v1.ResourceCPU: "16"}
 | 
			
		||||
@@ -337,7 +353,9 @@ func TestPreCheckForNode(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			nodeInfo := framework.NewNodeInfo(tt.existingPods...)
 | 
			
		||||
			nodeInfo.SetNode(tt.nodeFn())
 | 
			
		||||
			preCheckFn := preCheckForNode(nodeInfo)
 | 
			
		||||
@@ -356,6 +374,7 @@ func TestPreCheckForNode(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
// test for informers of resources we care about is registered
 | 
			
		||||
func TestAddAllEventHandlers(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name                   string
 | 
			
		||||
		gvkMap                 map[framework.GVK]framework.ActionType
 | 
			
		||||
@@ -429,7 +448,9 @@ func TestAddAllEventHandlers(t *testing.T) {
 | 
			
		||||
	localSchemeBuilder.AddToScheme(scheme)
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
@@ -461,6 +482,7 @@ func TestAddAllEventHandlers(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestAdmissionCheck(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	nodeaffinityError := AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod}
 | 
			
		||||
	nodenameError := AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason}
 | 
			
		||||
	nodeportsError := AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason}
 | 
			
		||||
@@ -502,7 +524,9 @@ func TestAdmissionCheck(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			nodeInfo := framework.NewNodeInfo(tt.existingPods...)
 | 
			
		||||
			nodeInfo.SetNode(tt.node)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -41,6 +41,7 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestSchedulerWithExtenders(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name            string
 | 
			
		||||
		registerPlugins []st.RegisterPluginFunc
 | 
			
		||||
@@ -330,6 +331,7 @@ func createNode(name string) *v1.Node {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestIsInterested(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	mem := &HTTPExtender{
 | 
			
		||||
		managedResources: sets.NewString(),
 | 
			
		||||
	}
 | 
			
		||||
@@ -372,7 +374,9 @@ func TestIsInterested(t *testing.T) {
 | 
			
		||||
			want: true,
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		tc := tc
 | 
			
		||||
		t.Run(tc.label, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			if got := tc.extender.IsInterested(tc.pod); got != tc.want {
 | 
			
		||||
				t.Fatalf("IsInterested(%v) = %v, wanted %v", tc.pod, got, tc.want)
 | 
			
		||||
			}
 | 
			
		||||
@@ -381,6 +385,7 @@ func TestIsInterested(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestConvertToMetaVictims(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name              string
 | 
			
		||||
		nodeNameToVictims map[string]*extenderv1.Victims
 | 
			
		||||
@@ -423,7 +428,9 @@ func TestConvertToMetaVictims(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			if got := convertToMetaVictims(tt.nodeNameToVictims); !reflect.DeepEqual(got, tt.want) {
 | 
			
		||||
				t.Errorf("convertToMetaVictims() = %v, want %v", got, tt.want)
 | 
			
		||||
			}
 | 
			
		||||
@@ -432,6 +439,7 @@ func TestConvertToMetaVictims(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestConvertToVictims(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name                  string
 | 
			
		||||
		httpExtender          *HTTPExtender
 | 
			
		||||
@@ -488,7 +496,9 @@ func TestConvertToVictims(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			// nodeInfos instantiations
 | 
			
		||||
			nodeInfoList := make([]*framework.NodeInfo, 0, len(tt.nodeNames))
 | 
			
		||||
			for i, nm := range tt.nodeNames {
 | 
			
		||||
 
 | 
			
		||||
@@ -319,6 +319,7 @@ func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, po
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	nodes := []runtime.Object{
 | 
			
		||||
		st.MakeNode().Name("node1").UID("node1").Obj(),
 | 
			
		||||
		st.MakeNode().Name("node2").UID("node2").Obj(),
 | 
			
		||||
@@ -445,6 +446,7 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerScheduleOne(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset(&testNode)
 | 
			
		||||
	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
@@ -639,6 +641,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
 | 
			
		||||
@@ -704,6 +707,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
 | 
			
		||||
@@ -773,6 +777,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerFailedSchedulingReasons(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
 | 
			
		||||
@@ -855,6 +860,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerWithVolumeBinding(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	findErr := fmt.Errorf("find err")
 | 
			
		||||
	assumeErr := fmt.Errorf("assume err")
 | 
			
		||||
	bindErr := fmt.Errorf("bind err")
 | 
			
		||||
@@ -1001,6 +1007,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerBinding(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	table := []struct {
 | 
			
		||||
		podName      string
 | 
			
		||||
		extenders    []framework.Extender
 | 
			
		||||
@@ -1036,7 +1043,9 @@ func TestSchedulerBinding(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range table {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			pod := st.MakePod().Name(test.podName).Obj()
 | 
			
		||||
			defaultBound := false
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset(pod)
 | 
			
		||||
@@ -1084,6 +1093,7 @@ func TestSchedulerBinding(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestUpdatePod(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name                     string
 | 
			
		||||
		currentPodConditions     []v1.PodCondition
 | 
			
		||||
@@ -1225,7 +1235,9 @@ func TestUpdatePod(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			actualPatchRequests := 0
 | 
			
		||||
			var actualPatchData string
 | 
			
		||||
			cs := &clientsetfake.Clientset{}
 | 
			
		||||
@@ -1263,6 +1275,7 @@ func TestUpdatePod(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSelectHost(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name          string
 | 
			
		||||
		list          framework.NodeScoreList
 | 
			
		||||
@@ -1310,7 +1323,9 @@ func TestSelectHost(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			// increase the randomness
 | 
			
		||||
			for i := 0; i < 10; i++ {
 | 
			
		||||
				got, err := selectHost(test.list)
 | 
			
		||||
@@ -1332,6 +1347,7 @@ func TestSelectHost(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFindNodesThatPassExtenders(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name                  string
 | 
			
		||||
		extenders             []st.FakeExtender
 | 
			
		||||
@@ -1483,7 +1499,9 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			var extenders []framework.Extender
 | 
			
		||||
			for ii := range tt.extenders {
 | 
			
		||||
				extenders = append(extenders, &tt.extenders[ii])
 | 
			
		||||
@@ -1511,6 +1529,7 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSchedulerSchedulePod(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	fts := feature.Features{}
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name               string
 | 
			
		||||
@@ -1976,7 +1995,9 @@ func TestSchedulerSchedulePod(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			cache := internalcache.New(time.Duration(0), wait.NeverStop)
 | 
			
		||||
			for _, pod := range test.pods {
 | 
			
		||||
				cache.AddPod(pod)
 | 
			
		||||
@@ -2125,10 +2146,12 @@ func TestFindFitSomeError(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, node := range nodes {
 | 
			
		||||
		node := node
 | 
			
		||||
		if node.Name == pod.Name {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		t.Run(node.Name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			status, found := diagnosis.NodeToStatusMap[node.Name]
 | 
			
		||||
			if !found {
 | 
			
		||||
				t.Errorf("failed to find node %v in %v", node.Name, diagnosis.NodeToStatusMap)
 | 
			
		||||
@@ -2209,6 +2232,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
 | 
			
		||||
//     is the one being scheduled.
 | 
			
		||||
//   - don't get the same score no matter what we schedule.
 | 
			
		||||
func TestZeroRequest(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	// A pod with no resources. We expect spreading to count it as having the default resources.
 | 
			
		||||
	noResources := v1.PodSpec{
 | 
			
		||||
		Containers: []v1.Container{
 | 
			
		||||
@@ -2297,7 +2321,9 @@ func TestZeroRequest(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
 | 
			
		||||
@@ -2357,6 +2383,7 @@ func TestZeroRequest(t *testing.T) {
 | 
			
		||||
var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
 | 
			
		||||
 | 
			
		||||
func TestNumFeasibleNodesToFind(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name                     string
 | 
			
		||||
		percentageOfNodesToScore int32
 | 
			
		||||
@@ -2398,7 +2425,9 @@ func TestNumFeasibleNodesToFind(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			sched := &Scheduler{
 | 
			
		||||
				percentageOfNodesToScore: tt.percentageOfNodesToScore,
 | 
			
		||||
			}
 | 
			
		||||
@@ -2453,6 +2482,7 @@ func TestFairEvaluationForNodes(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name                  string
 | 
			
		||||
		pod                   *v1.Pod
 | 
			
		||||
@@ -2479,7 +2509,9 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		test := test
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			// create three nodes in the cluster.
 | 
			
		||||
			nodes := makeNodeList([]string{"node1", "node2", "node3"})
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset(test.pod)
 | 
			
		||||
 
 | 
			
		||||
@@ -49,6 +49,7 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestSchedulerCreation(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	invalidRegistry := map[string]frameworkruntime.PluginFactory{
 | 
			
		||||
		defaultbinder.Name: defaultbinder.New,
 | 
			
		||||
	}
 | 
			
		||||
@@ -166,7 +167,9 @@ func TestSchedulerCreation(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tc := range cases {
 | 
			
		||||
		tc := tc
 | 
			
		||||
		t.Run(tc.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			client := fake.NewSimpleClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
 | 
			
		||||
@@ -231,6 +234,7 @@ func TestSchedulerCreation(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFailureHandler(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj()
 | 
			
		||||
	testPodUpdated := testPod.DeepCopy()
 | 
			
		||||
	testPodUpdated.Labels = map[string]string{"foo": ""}
 | 
			
		||||
@@ -262,7 +266,9 @@ func TestFailureHandler(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
@@ -314,6 +320,7 @@ func TestFailureHandler(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFailureHandler_NodeNotFound(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	nodeFoo := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
 | 
			
		||||
	nodeBar := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "bar"}}
 | 
			
		||||
	testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj()
 | 
			
		||||
@@ -340,7 +347,9 @@ func TestFailureHandler_NodeNotFound(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
@@ -383,6 +392,7 @@ func TestFailureHandler_NodeNotFound(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFailureHandler_PodAlreadyBound(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
@@ -476,6 +486,7 @@ func initScheduler(stop <-chan struct{}, cache internalcache.Cache, queue intern
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInitPluginsWithIndexers(t *testing.T) {
 | 
			
		||||
	t.Parallel()
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name string
 | 
			
		||||
		// the plugin registration ordering must not matter, being map traversal random
 | 
			
		||||
@@ -538,7 +549,9 @@ func TestInitPluginsWithIndexers(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		tt := tt
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			t.Parallel()
 | 
			
		||||
			fakeInformerFactory := NewInformerFactory(&fake.Clientset{}, 0*time.Second)
 | 
			
		||||
 | 
			
		||||
			var registerPluginFuncs []st.RegisterPluginFunc
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user