mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	chore: call close framework when finishing
Signed-off-by: googs1025 <googs1025@gmail.com>
This commit is contained in:
		@@ -482,7 +482,7 @@ func TestAddAllEventHandlers(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(), 0)
 | 
			
		||||
			schedulingQueue := queue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
 | 
			
		||||
			testSched := Scheduler{
 | 
			
		||||
				StopEverything:  ctx.Done(),
 | 
			
		||||
 
 | 
			
		||||
@@ -317,7 +317,7 @@ func TestSchedulerWithExtenders(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
			client := clientsetfake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
 | 
			
		||||
			var extenders []framework.Extender
 | 
			
		||||
 
 | 
			
		||||
@@ -704,7 +704,7 @@ type Handle interface {
 | 
			
		||||
 | 
			
		||||
	SharedInformerFactory() informers.SharedInformerFactory
 | 
			
		||||
 | 
			
		||||
	// ResourceClaimInfos returns an assume cache of ResourceClaim objects
 | 
			
		||||
	// ResourceClaimCache returns an assume cache of ResourceClaim objects
 | 
			
		||||
	// which gets populated by the shared informer factory and the dynamic resources
 | 
			
		||||
	// plugin.
 | 
			
		||||
	ResourceClaimCache() *assumecache.AssumeCache
 | 
			
		||||
 
 | 
			
		||||
@@ -58,7 +58,7 @@ func TestDefaultBinder(t *testing.T) {
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
			var gotBinding *v1.Binding
 | 
			
		||||
			client := fake.NewSimpleClientset(testPod)
 | 
			
		||||
			client := fake.NewClientset(testPod)
 | 
			
		||||
			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
 | 
			
		||||
				if action.GetSubresource() != "binding" {
 | 
			
		||||
					return false, nil, nil
 | 
			
		||||
 
 | 
			
		||||
@@ -340,7 +340,7 @@ func TestPostFilter(t *testing.T) {
 | 
			
		||||
			for _, pod := range tt.pods {
 | 
			
		||||
				podItems = append(podItems, *pod)
 | 
			
		||||
			}
 | 
			
		||||
			cs := clientsetfake.NewSimpleClientset(&v1.PodList{Items: podItems})
 | 
			
		||||
			cs := clientsetfake.NewClientset(&v1.PodList{Items: podItems})
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(cs, 0)
 | 
			
		||||
			podInformer := informerFactory.Core().V1().Pods().Informer()
 | 
			
		||||
			podInformer.GetStore().Add(tt.pod)
 | 
			
		||||
@@ -1087,7 +1087,7 @@ func TestDryRunPreemption(t *testing.T) {
 | 
			
		||||
			for _, n := range nodes {
 | 
			
		||||
				objs = append(objs, n)
 | 
			
		||||
			}
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objs...), 0)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
 | 
			
		||||
			parallelism := parallelize.DefaultParallelism
 | 
			
		||||
			if tt.disableParallelism {
 | 
			
		||||
				// We need disableParallelism because of the non-deterministic nature
 | 
			
		||||
@@ -1347,7 +1347,7 @@ func TestSelectBestCandidate(t *testing.T) {
 | 
			
		||||
			for _, pod := range tt.pods {
 | 
			
		||||
				objs = append(objs, pod)
 | 
			
		||||
			}
 | 
			
		||||
			cs := clientsetfake.NewSimpleClientset(objs...)
 | 
			
		||||
			cs := clientsetfake.NewClientset(objs...)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(cs, 0)
 | 
			
		||||
			snapshot := internalcache.NewSnapshot(tt.pods, nodes)
 | 
			
		||||
			logger, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
@@ -1685,7 +1685,7 @@ func TestPreempt(t *testing.T) {
 | 
			
		||||
	labelKeys := []string{"hostname", "zone", "region"}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
			client := clientsetfake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
			podInformer := informerFactory.Core().V1().Pods().Informer()
 | 
			
		||||
			podInformer.GetStore().Add(test.pod)
 | 
			
		||||
 
 | 
			
		||||
@@ -89,7 +89,7 @@ func TestPreScoreSkip(t *testing.T) {
 | 
			
		||||
			_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(tt.objs...), 0)
 | 
			
		||||
			f, err := frameworkruntime.NewFramework(ctx, nil, nil,
 | 
			
		||||
				frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
 | 
			
		||||
				frameworkruntime.WithInformerFactory(informerFactory))
 | 
			
		||||
@@ -576,7 +576,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
 | 
			
		||||
			_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(tt.objs...), 0)
 | 
			
		||||
			f, err := frameworkruntime.NewFramework(ctx, nil, nil,
 | 
			
		||||
				frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
 | 
			
		||||
				frameworkruntime.WithInformerFactory(informerFactory))
 | 
			
		||||
 
 | 
			
		||||
@@ -42,7 +42,7 @@ func SetupPluginWithInformers(
 | 
			
		||||
	objs []runtime.Object,
 | 
			
		||||
) framework.Plugin {
 | 
			
		||||
	objs = append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, objs...)
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(objs...), 0)
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0)
 | 
			
		||||
	fh, err := frameworkruntime.NewFramework(ctx, nil, nil,
 | 
			
		||||
		frameworkruntime.WithSnapshotSharedLister(sharedLister),
 | 
			
		||||
		frameworkruntime.WithInformerFactory(informerFactory))
 | 
			
		||||
 
 | 
			
		||||
@@ -333,7 +333,7 @@ func TestDryRunPreemption(t *testing.T) {
 | 
			
		||||
			for _, n := range tt.nodes {
 | 
			
		||||
				objs = append(objs, n)
 | 
			
		||||
			}
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objs...), 0)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
 | 
			
		||||
			parallelism := parallelize.DefaultParallelism
 | 
			
		||||
			_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
@@ -437,7 +437,7 @@ func TestSelectCandidate(t *testing.T) {
 | 
			
		||||
			for _, pod := range tt.testPods {
 | 
			
		||||
				objs = append(objs, pod)
 | 
			
		||||
			}
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objs...), 0)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
 | 
			
		||||
			snapshot := internalcache.NewSnapshot(tt.testPods, nodes)
 | 
			
		||||
			_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
 
 | 
			
		||||
@@ -904,6 +904,11 @@ func TestNewFrameworkMultiPointExpansion(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			fw, err := NewFramework(ctx, registry, &config.KubeSchedulerProfile{Plugins: tc.plugins})
 | 
			
		||||
			defer func() {
 | 
			
		||||
				if fw != nil {
 | 
			
		||||
					_ = fw.Close()
 | 
			
		||||
				}
 | 
			
		||||
			}()
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) {
 | 
			
		||||
					t.Fatalf("Unexpected error, got %v, expect: %s", err, tc.wantErr)
 | 
			
		||||
@@ -913,7 +918,6 @@ func TestNewFrameworkMultiPointExpansion(t *testing.T) {
 | 
			
		||||
					t.Fatalf("Unexpected error, got %v, expect: %s", err, tc.wantErr)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if tc.wantErr == "" {
 | 
			
		||||
				if diff := cmp.Diff(tc.wantPlugins, fw.ListPlugins()); diff != "" {
 | 
			
		||||
					t.Fatalf("Unexpected eventToPlugin map (-want,+got):%s", diff)
 | 
			
		||||
@@ -969,7 +973,9 @@ func TestPreEnqueuePlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			got := f.PreEnqueuePlugins()
 | 
			
		||||
			if !reflect.DeepEqual(got, tt.want) {
 | 
			
		||||
				t.Errorf("PreEnqueuePlugins(): want %v, but got %v", tt.want, got)
 | 
			
		||||
@@ -1092,7 +1098,9 @@ func TestRunPreScorePlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			state := framework.NewCycleState()
 | 
			
		||||
			status := f.RunPreScorePlugins(ctx, state, nil, nil)
 | 
			
		||||
			if status.Code() != tt.wantStatusCode {
 | 
			
		||||
@@ -1486,6 +1494,9 @@ func TestRunScorePlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			state := framework.NewCycleState()
 | 
			
		||||
			state.SkipScorePlugins = tt.skippedPlugins
 | 
			
		||||
@@ -1530,6 +1541,9 @@ func TestPreFilterPlugins(t *testing.T) {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		defer func() {
 | 
			
		||||
			_ = f.Close()
 | 
			
		||||
		}()
 | 
			
		||||
		state := framework.NewCycleState()
 | 
			
		||||
 | 
			
		||||
		f.RunPreFilterPlugins(ctx, state, nil)
 | 
			
		||||
@@ -1719,6 +1733,9 @@ func TestRunPreFilterPlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			state := framework.NewCycleState()
 | 
			
		||||
			result, status := f.RunPreFilterPlugins(ctx, state, nil)
 | 
			
		||||
@@ -1809,6 +1826,9 @@ func TestRunPreFilterExtensionRemovePod(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			state := framework.NewCycleState()
 | 
			
		||||
			state.SkipFilterPlugins = tt.skippedPluginNames
 | 
			
		||||
@@ -1893,6 +1913,9 @@ func TestRunPreFilterExtensionAddPod(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			state := framework.NewCycleState()
 | 
			
		||||
			state.SkipFilterPlugins = tt.skippedPluginNames
 | 
			
		||||
@@ -2096,6 +2119,9 @@ func TestFilterPlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			state := framework.NewCycleState()
 | 
			
		||||
			state.SkipFilterPlugins = tt.skippedPlugins
 | 
			
		||||
			gotStatus := f.RunFilterPlugins(ctx, state, pod, nil)
 | 
			
		||||
@@ -2220,6 +2246,9 @@ func TestPostFilterPlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			_, gotStatus := f.RunPostFilterPlugins(ctx, nil, pod, nil)
 | 
			
		||||
			if !reflect.DeepEqual(gotStatus, tt.wantStatus) {
 | 
			
		||||
				t.Errorf("Unexpected status. got: %v, want: %v", gotStatus, tt.wantStatus)
 | 
			
		||||
@@ -2371,6 +2400,9 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			tt.nodeInfo.SetNode(tt.node)
 | 
			
		||||
			gotStatus := f.RunFilterPluginsWithNominatedPods(ctx, framework.NewCycleState(), tt.pod, tt.nodeInfo)
 | 
			
		||||
			if diff := cmp.Diff(gotStatus, tt.wantStatus, cmpOpts...); diff != "" {
 | 
			
		||||
@@ -2528,6 +2560,9 @@ func TestPreBindPlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			status := f.RunPreBindPlugins(ctx, nil, pod, "")
 | 
			
		||||
 | 
			
		||||
@@ -2683,6 +2718,9 @@ func TestReservePlugins(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -2811,6 +2849,9 @@ func TestPermitPlugins(t *testing.T) {
 | 
			
		||||
			f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile,
 | 
			
		||||
				WithWaitingPods(NewWaitingPodsMap()),
 | 
			
		||||
			)
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("fail to create framework: %s", err)
 | 
			
		||||
			}
 | 
			
		||||
@@ -3000,6 +3041,9 @@ func TestRecordingMetrics(t *testing.T) {
 | 
			
		||||
				cancel()
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			tt.action(f)
 | 
			
		||||
 | 
			
		||||
@@ -3113,6 +3157,9 @@ func TestRunBindPlugins(t *testing.T) {
 | 
			
		||||
				cancel()
 | 
			
		||||
				t.Fatal(err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = fwk.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			st := fwk.RunBindPlugins(context.Background(), state, pod, "")
 | 
			
		||||
			if st.Code() != tt.wantStatus {
 | 
			
		||||
@@ -3171,6 +3218,9 @@ func TestPermitWaitDurationMetric(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			f.RunPermitPlugins(ctx, nil, pod, "")
 | 
			
		||||
			f.WaitOnPermit(ctx, pod)
 | 
			
		||||
@@ -3229,6 +3279,9 @@ func TestWaitOnPermit(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			runPermitPluginsStatus := f.RunPermitPlugins(ctx, nil, pod, "")
 | 
			
		||||
			if runPermitPluginsStatus.Code() != framework.Wait {
 | 
			
		||||
@@ -3283,6 +3336,9 @@ func TestListPlugins(t *testing.T) {
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("Failed to create framework for testing: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			defer func() {
 | 
			
		||||
				_ = f.Close()
 | 
			
		||||
			}()
 | 
			
		||||
			got := f.ListPlugins()
 | 
			
		||||
			if diff := cmp.Diff(tt.want, got); diff != "" {
 | 
			
		||||
				t.Errorf("unexpected plugins (-want,+got):\n%s", diff)
 | 
			
		||||
 
 | 
			
		||||
@@ -2039,7 +2039,7 @@ func TestPriorityQueue_NominatedPodDeleted(t *testing.T) {
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			logger, _ := ktesting.NewTestContext(t)
 | 
			
		||||
			cs := fake.NewSimpleClientset(tt.podInfo.Pod)
 | 
			
		||||
			cs := fake.NewClientset(tt.podInfo.Pod)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(cs, 0)
 | 
			
		||||
			podLister := informerFactory.Core().V1().Pods().Lister()
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -38,7 +38,7 @@ func NewTestQueueWithObjects(
 | 
			
		||||
	objs []runtime.Object,
 | 
			
		||||
	opts ...Option,
 | 
			
		||||
) *PriorityQueue {
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(objs...), 0)
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(fake.NewClientset(objs...), 0)
 | 
			
		||||
	return NewTestQueueWithInformerFactory(ctx, lessFn, informerFactory, opts...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -407,7 +407,7 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
 | 
			
		||||
	// profiles, each with a different node in the filter configuration.
 | 
			
		||||
	objs := append([]runtime.Object{
 | 
			
		||||
		&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset(objs...)
 | 
			
		||||
	client := clientsetfake.NewClientset(objs...)
 | 
			
		||||
	broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
	ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
	defer cancel()
 | 
			
		||||
@@ -547,7 +547,7 @@ func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
 | 
			
		||||
	objs := []runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: fakeNamespace}}}
 | 
			
		||||
	objs = append(objs, initialNodes...)
 | 
			
		||||
	objs = append(objs, initialPods...)
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset(objs...)
 | 
			
		||||
	client := clientsetfake.NewClientset(objs...)
 | 
			
		||||
	broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
@@ -645,7 +645,7 @@ func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
func TestSchedulerScheduleOne(t *testing.T) {
 | 
			
		||||
	testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset(&testNode)
 | 
			
		||||
	client := clientsetfake.NewClientset(&testNode)
 | 
			
		||||
	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
	errS := errors.New("scheduler")
 | 
			
		||||
	errB := errors.New("binder")
 | 
			
		||||
@@ -760,7 +760,7 @@ func TestSchedulerScheduleOne(t *testing.T) {
 | 
			
		||||
					return pod.UID == gotAssumedPod.UID
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset(item.sendPod)
 | 
			
		||||
			client := clientsetfake.NewClientset(item.sendPod)
 | 
			
		||||
			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
 | 
			
		||||
				if action.GetSubresource() != "binding" {
 | 
			
		||||
					return false, nil, nil
 | 
			
		||||
@@ -1031,7 +1031,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
 | 
			
		||||
		tf.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objects...), 0)
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objects...), 0)
 | 
			
		||||
	scheduler, _, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, nil, fns...)
 | 
			
		||||
 | 
			
		||||
	queuedPodStore.Add(podWithTooBigResourceRequests)
 | 
			
		||||
@@ -1061,7 +1061,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
 | 
			
		||||
	findErr := fmt.Errorf("find err")
 | 
			
		||||
	assumeErr := fmt.Errorf("assume err")
 | 
			
		||||
	bindErr := fmt.Errorf("bind err")
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
	client := clientsetfake.NewClientset()
 | 
			
		||||
 | 
			
		||||
	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
 | 
			
		||||
@@ -1253,7 +1253,7 @@ func TestSchedulerBinding(t *testing.T) {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			pod := st.MakePod().Name(test.podName).Obj()
 | 
			
		||||
			defaultBound := false
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset(pod)
 | 
			
		||||
			client := clientsetfake.NewClientset(pod)
 | 
			
		||||
			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
 | 
			
		||||
				if action.GetSubresource() == "binding" {
 | 
			
		||||
					defaultBound = true
 | 
			
		||||
@@ -2466,7 +2466,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
 | 
			
		||||
				cache.AddNode(logger, node)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			cs := clientsetfake.NewSimpleClientset()
 | 
			
		||||
			cs := clientsetfake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(cs, 0)
 | 
			
		||||
			for _, pvc := range test.pvcs {
 | 
			
		||||
				metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true")
 | 
			
		||||
@@ -2784,7 +2784,7 @@ func TestZeroRequest(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
			client := clientsetfake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
 | 
			
		||||
			snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
 | 
			
		||||
@@ -3187,7 +3187,7 @@ func Test_prioritizeNodes(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
			client := clientsetfake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
 | 
			
		||||
			ctx, cancel := context.WithCancel(context.Background())
 | 
			
		||||
@@ -3383,7 +3383,7 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
			// create three nodes in the cluster.
 | 
			
		||||
			nodes := makeNodeList([]string{"node1", "node2", "node3"})
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset(test.pod)
 | 
			
		||||
			client := clientsetfake.NewClientset(test.pod)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
			cache := internalcache.New(ctx, time.Duration(0))
 | 
			
		||||
			for _, n := range nodes {
 | 
			
		||||
@@ -3534,7 +3534,7 @@ func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queue
 | 
			
		||||
// scache: scheduler cache that might contain assumed pods.
 | 
			
		||||
func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
 | 
			
		||||
	bindingChan := make(chan *v1.Binding, 1)
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
	client := clientsetfake.NewClientset()
 | 
			
		||||
	client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
 | 
			
		||||
		var b *v1.Binding
 | 
			
		||||
		if action.GetSubresource() == "binding" {
 | 
			
		||||
@@ -3552,7 +3552,7 @@ func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clien
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if informerFactory == nil {
 | 
			
		||||
		informerFactory = informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0)
 | 
			
		||||
		informerFactory = informers.NewSharedInformerFactory(clientsetfake.NewClientset(), 0)
 | 
			
		||||
	}
 | 
			
		||||
	schedulingQueue := internalqueue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
 | 
			
		||||
	waitingPods := frameworkruntime.NewWaitingPodsMap()
 | 
			
		||||
@@ -3604,7 +3604,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volu
 | 
			
		||||
	scache := internalcache.New(ctx, 10*time.Minute)
 | 
			
		||||
	scache.AddNode(logger, &testNode)
 | 
			
		||||
	testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
 | 
			
		||||
	client := clientsetfake.NewSimpleClientset(&testNode, &testPVC)
 | 
			
		||||
	client := clientsetfake.NewClientset(&testNode, &testPVC)
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
	pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
 | 
			
		||||
	pvcInformer.Informer().GetStore().Add(&testPVC)
 | 
			
		||||
 
 | 
			
		||||
@@ -180,7 +180,7 @@ func TestSchedulerCreation(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tc := range cases {
 | 
			
		||||
		t.Run(tc.name, func(t *testing.T) {
 | 
			
		||||
			client := fake.NewSimpleClientset()
 | 
			
		||||
			client := fake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
 | 
			
		||||
			eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
@@ -277,7 +277,7 @@ func TestFailureHandler(t *testing.T) {
 | 
			
		||||
			ctx, cancel := context.WithCancel(ctx)
 | 
			
		||||
			defer cancel()
 | 
			
		||||
 | 
			
		||||
			client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
 | 
			
		||||
			client := fake.NewClientset(&v1.PodList{Items: []v1.Pod{*testPod}})
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
			podInformer := informerFactory.Core().V1().Pods()
 | 
			
		||||
			// Need to add/update/delete testPod to the store.
 | 
			
		||||
@@ -337,7 +337,7 @@ func TestFailureHandler_PodAlreadyBound(t *testing.T) {
 | 
			
		||||
	nodeFoo := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
 | 
			
		||||
	testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Node("foo").Obj()
 | 
			
		||||
 | 
			
		||||
	client := fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: []v1.Node{nodeFoo}})
 | 
			
		||||
	client := fake.NewClientset(&v1.PodList{Items: []v1.Pod{*testPod}}, &v1.NodeList{Items: []v1.Node{nodeFoo}})
 | 
			
		||||
	informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
	podInformer := informerFactory.Core().V1().Pods()
 | 
			
		||||
	// Need to add testPod to the store.
 | 
			
		||||
@@ -384,7 +384,7 @@ func TestWithPercentageOfNodesToScore(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	for _, tt := range tests {
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			client := fake.NewSimpleClientset()
 | 
			
		||||
			client := fake.NewClientset()
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(client, 0)
 | 
			
		||||
			eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
 | 
			
		||||
			_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
@@ -910,7 +910,7 @@ func Test_UnionedGVKs(t *testing.T) {
 | 
			
		||||
func newFramework(ctx context.Context, r frameworkruntime.Registry, profile schedulerapi.KubeSchedulerProfile) (framework.Framework, error) {
 | 
			
		||||
	return frameworkruntime.NewFramework(ctx, r, &profile,
 | 
			
		||||
		frameworkruntime.WithSnapshotSharedLister(internalcache.NewSnapshot(nil, nil)),
 | 
			
		||||
		frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)),
 | 
			
		||||
		frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(fake.NewClientset(), 0)),
 | 
			
		||||
	)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -994,7 +994,7 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
 | 
			
		||||
		t.Run(tc.name, func(t *testing.T) {
 | 
			
		||||
			// Set up scheduler for the 3 nodes.
 | 
			
		||||
			objs := append([]runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
 | 
			
		||||
			fakeClient := fake.NewSimpleClientset(objs...)
 | 
			
		||||
			fakeClient := fake.NewClientset(objs...)
 | 
			
		||||
			informerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
 | 
			
		||||
			eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: fakeClient.EventsV1()})
 | 
			
		||||
			defer eventBroadcaster.Shutdown()
 | 
			
		||||
 
 | 
			
		||||
@@ -226,7 +226,7 @@ func TestPatchPodStatus(t *testing.T) {
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:   "Should update pod conditions successfully",
 | 
			
		||||
			client: clientsetfake.NewSimpleClientset(),
 | 
			
		||||
			client: clientsetfake.NewClientset(),
 | 
			
		||||
			pod: v1.Pod{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Namespace: "ns",
 | 
			
		||||
@@ -250,7 +250,7 @@ func TestPatchPodStatus(t *testing.T) {
 | 
			
		||||
			// which would fail the 2-way merge patch generation on Pod patches
 | 
			
		||||
			// due to the mergeKey being the name field
 | 
			
		||||
			name:   "Should update pod conditions successfully on a pod Spec with secrets with empty name",
 | 
			
		||||
			client: clientsetfake.NewSimpleClientset(),
 | 
			
		||||
			client: clientsetfake.NewClientset(),
 | 
			
		||||
			pod: v1.Pod{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Namespace: "ns",
 | 
			
		||||
@@ -273,7 +273,7 @@ func TestPatchPodStatus(t *testing.T) {
 | 
			
		||||
		{
 | 
			
		||||
			name: "retry patch request when an 'connection refused' error is returned",
 | 
			
		||||
			client: func() *clientsetfake.Clientset {
 | 
			
		||||
				client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
				client := clientsetfake.NewClientset()
 | 
			
		||||
 | 
			
		||||
				reqcount := 0
 | 
			
		||||
				client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
 | 
			
		||||
@@ -314,7 +314,7 @@ func TestPatchPodStatus(t *testing.T) {
 | 
			
		||||
		{
 | 
			
		||||
			name: "only 4 retries at most",
 | 
			
		||||
			client: func() *clientsetfake.Clientset {
 | 
			
		||||
				client := clientsetfake.NewSimpleClientset()
 | 
			
		||||
				client := clientsetfake.NewClientset()
 | 
			
		||||
 | 
			
		||||
				reqcount := 0
 | 
			
		||||
				client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user