mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #81744 from praseodym/fix-staticcheck-pkg/scheduler
Fix staticcheck failures for pkg/scheduler/...
This commit is contained in:
		@@ -293,6 +293,7 @@ func buildHandlerChain(handler http.Handler, authn authenticator.Request, authz
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func installMetricHandler(pathRecorderMux *mux.PathRecorderMux) {
 | 
					func installMetricHandler(pathRecorderMux *mux.PathRecorderMux) {
 | 
				
			||||||
	configz.InstallHandler(pathRecorderMux)
 | 
						configz.InstallHandler(pathRecorderMux)
 | 
				
			||||||
 | 
						//lint:ignore SA1019 See the Metrics Stability Migration KEP
 | 
				
			||||||
	defaultMetricsHandler := legacyregistry.Handler().ServeHTTP
 | 
						defaultMetricsHandler := legacyregistry.Handler().ServeHTTP
 | 
				
			||||||
	pathRecorderMux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
 | 
						pathRecorderMux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
 | 
				
			||||||
		if req.Method == "DELETE" {
 | 
							if req.Method == "DELETE" {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -2,12 +2,9 @@ cluster/images/etcd-version-monitor
 | 
				
			|||||||
cluster/images/etcd/migrate
 | 
					cluster/images/etcd/migrate
 | 
				
			||||||
cmd/kube-controller-manager/app
 | 
					cmd/kube-controller-manager/app
 | 
				
			||||||
cmd/kube-proxy/app
 | 
					cmd/kube-proxy/app
 | 
				
			||||||
cmd/kube-scheduler/app
 | 
					 | 
				
			||||||
cmd/linkcheck
 | 
					cmd/linkcheck
 | 
				
			||||||
cmd/preferredimports
 | 
					cmd/preferredimports
 | 
				
			||||||
hack/make-rules/helpers/go2make/testdata/dir-with-gofiles
 | 
					hack/make-rules/helpers/go2make/testdata/dir-with-gofiles
 | 
				
			||||||
pkg/apis/scheduling/v1alpha1
 | 
					 | 
				
			||||||
pkg/apis/scheduling/v1beta1
 | 
					 | 
				
			||||||
pkg/client/tests
 | 
					pkg/client/tests
 | 
				
			||||||
pkg/controller/daemon
 | 
					pkg/controller/daemon
 | 
				
			||||||
pkg/controller/deployment
 | 
					pkg/controller/deployment
 | 
				
			||||||
@@ -47,11 +44,6 @@ pkg/registry/core/service/ipallocator
 | 
				
			|||||||
pkg/registry/core/service/portallocator
 | 
					pkg/registry/core/service/portallocator
 | 
				
			||||||
pkg/registry/core/service/storage
 | 
					pkg/registry/core/service/storage
 | 
				
			||||||
pkg/registry/extensions/controller/storage
 | 
					pkg/registry/extensions/controller/storage
 | 
				
			||||||
pkg/scheduler
 | 
					 | 
				
			||||||
pkg/scheduler/algorithm/predicates
 | 
					 | 
				
			||||||
pkg/scheduler/algorithm/priorities
 | 
					 | 
				
			||||||
pkg/scheduler/api/v1
 | 
					 | 
				
			||||||
pkg/scheduler/internal/queue
 | 
					 | 
				
			||||||
pkg/util/coverage
 | 
					pkg/util/coverage
 | 
				
			||||||
pkg/util/ebtables
 | 
					pkg/util/ebtables
 | 
				
			||||||
pkg/util/ipconfig
 | 
					pkg/util/ipconfig
 | 
				
			||||||
@@ -98,7 +90,6 @@ test/e2e/manifest
 | 
				
			|||||||
test/e2e/network
 | 
					test/e2e/network
 | 
				
			||||||
test/e2e/node
 | 
					test/e2e/node
 | 
				
			||||||
test/e2e/scalability
 | 
					test/e2e/scalability
 | 
				
			||||||
test/e2e/scheduling
 | 
					 | 
				
			||||||
test/e2e/storage
 | 
					test/e2e/storage
 | 
				
			||||||
test/e2e/storage/drivers
 | 
					test/e2e/storage/drivers
 | 
				
			||||||
test/e2e/storage/testsuites
 | 
					test/e2e/storage/testsuites
 | 
				
			||||||
@@ -129,13 +120,11 @@ test/integration/kubelet
 | 
				
			|||||||
test/integration/master
 | 
					test/integration/master
 | 
				
			||||||
test/integration/replicationcontroller
 | 
					test/integration/replicationcontroller
 | 
				
			||||||
test/integration/scale
 | 
					test/integration/scale
 | 
				
			||||||
test/integration/scheduler
 | 
					 | 
				
			||||||
test/integration/scheduler_perf
 | 
					test/integration/scheduler_perf
 | 
				
			||||||
test/integration/serviceaccount
 | 
					test/integration/serviceaccount
 | 
				
			||||||
test/integration/serving
 | 
					test/integration/serving
 | 
				
			||||||
test/integration/ttlcontroller
 | 
					test/integration/ttlcontroller
 | 
				
			||||||
test/integration/volume
 | 
					test/integration/volume
 | 
				
			||||||
test/integration/volumescheduling
 | 
					 | 
				
			||||||
test/utils
 | 
					test/utils
 | 
				
			||||||
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver
 | 
					vendor/k8s.io/apiextensions-apiserver/pkg/apiserver
 | 
				
			||||||
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion
 | 
					vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -19,15 +19,10 @@ package v1alpha1
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	apiv1 "k8s.io/api/core/v1"
 | 
						apiv1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/api/scheduling/v1alpha1"
 | 
						"k8s.io/api/scheduling/v1alpha1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/runtime"
 | 
					 | 
				
			||||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
						utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
 | 
					 | 
				
			||||||
	return RegisterDefaults(scheme)
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
 | 
					// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
 | 
				
			||||||
// in extensions.
 | 
					// in extensions.
 | 
				
			||||||
func SetDefaults_PriorityClass(obj *v1alpha1.PriorityClass) {
 | 
					func SetDefaults_PriorityClass(obj *v1alpha1.PriorityClass) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -19,15 +19,10 @@ package v1beta1
 | 
				
			|||||||
import (
 | 
					import (
 | 
				
			||||||
	apiv1 "k8s.io/api/core/v1"
 | 
						apiv1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/api/scheduling/v1beta1"
 | 
						"k8s.io/api/scheduling/v1beta1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/runtime"
 | 
					 | 
				
			||||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
						utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
				
			||||||
	"k8s.io/kubernetes/pkg/features"
 | 
						"k8s.io/kubernetes/pkg/features"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
 | 
					 | 
				
			||||||
	return RegisterDefaults(scheme)
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
 | 
					// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
 | 
				
			||||||
// in extensions.
 | 
					// in extensions.
 | 
				
			||||||
func SetDefaults_PriorityClass(obj *v1beta1.PriorityClass) {
 | 
					func SetDefaults_PriorityClass(obj *v1beta1.PriorityClass) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -62,7 +62,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error {
 | 
				
			|||||||
	if meta1.podBestEffort != meta2.podBestEffort {
 | 
						if meta1.podBestEffort != meta2.podBestEffort {
 | 
				
			||||||
		return fmt.Errorf("podBestEfforts are not equal")
 | 
							return fmt.Errorf("podBestEfforts are not equal")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if meta1.serviceAffinityInUse != meta1.serviceAffinityInUse {
 | 
						if meta1.serviceAffinityInUse != meta2.serviceAffinityInUse {
 | 
				
			||||||
		return fmt.Errorf("serviceAffinityInUses are not equal")
 | 
							return fmt.Errorf("serviceAffinityInUses are not equal")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if len(meta1.podPorts) != len(meta2.podPorts) {
 | 
						if len(meta1.podPorts) != len(meta2.podPorts) {
 | 
				
			||||||
@@ -1697,15 +1697,6 @@ var (
 | 
				
			|||||||
	softSpread = v1.ScheduleAnyway
 | 
						softSpread = v1.ScheduleAnyway
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func newPairSet(kv ...string) topologyPairSet {
 | 
					 | 
				
			||||||
	result := make(topologyPairSet)
 | 
					 | 
				
			||||||
	for i := 0; i < len(kv); i += 2 {
 | 
					 | 
				
			||||||
		pair := topologyPair{key: kv[i], value: kv[i+1]}
 | 
					 | 
				
			||||||
		result[pair] = struct{}{}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return result
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// sortCriticalPaths is only served for testing purpose.
 | 
					// sortCriticalPaths is only served for testing purpose.
 | 
				
			||||||
func (c *podSpreadCache) sortCriticalPaths() {
 | 
					func (c *podSpreadCache) sortCriticalPaths() {
 | 
				
			||||||
	for _, paths := range c.tpKeyToCriticalPaths {
 | 
						for _, paths := range c.tpKeyToCriticalPaths {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -493,7 +493,4 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var (
 | 
					var softSpread = v1.ScheduleAnyway
 | 
				
			||||||
	hardSpread = v1.DoNotSchedule
 | 
					 | 
				
			||||||
	softSpread = v1.ScheduleAnyway
 | 
					 | 
				
			||||||
)
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -145,7 +145,7 @@ type UtilizationShapePoint struct {
 | 
				
			|||||||
// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
 | 
					// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
 | 
				
			||||||
type ResourceSpec struct {
 | 
					type ResourceSpec struct {
 | 
				
			||||||
	// Name of the resource to be managed by RequestedToCapacityRatio function.
 | 
						// Name of the resource to be managed by RequestedToCapacityRatio function.
 | 
				
			||||||
	Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
 | 
						Name apiv1.ResourceName `json:"name"`
 | 
				
			||||||
	// Weight of the resource.
 | 
						// Weight of the resource.
 | 
				
			||||||
	Weight int64 `json:"weight,omitempty"`
 | 
						Weight int64 `json:"weight,omitempty"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -154,7 +154,7 @@ type ResourceSpec struct {
 | 
				
			|||||||
// managed by an extender.
 | 
					// managed by an extender.
 | 
				
			||||||
type ExtenderManagedResource struct {
 | 
					type ExtenderManagedResource struct {
 | 
				
			||||||
	// Name is the extended resource name.
 | 
						// Name is the extended resource name.
 | 
				
			||||||
	Name apiv1.ResourceName `json:"name,casttype=ResourceName"`
 | 
						Name apiv1.ResourceName `json:"name"`
 | 
				
			||||||
	// IgnoredByScheduler indicates whether kube-scheduler should ignore this
 | 
						// IgnoredByScheduler indicates whether kube-scheduler should ignore this
 | 
				
			||||||
	// resource when applying predicates.
 | 
						// resource when applying predicates.
 | 
				
			||||||
	IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"`
 | 
						IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"`
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -35,7 +35,7 @@ import (
 | 
				
			|||||||
	"k8s.io/kubernetes/pkg/scheduler/util"
 | 
						"k8s.io/kubernetes/pkg/scheduler/util"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int32(-100), int32(0), int32(100), int32(1000), int32(10000)
 | 
					var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
 | 
				
			||||||
var mediumPriority = (lowPriority + highPriority) / 2
 | 
					var mediumPriority = (lowPriority + highPriority) / 2
 | 
				
			||||||
var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{
 | 
					var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{
 | 
				
			||||||
	ObjectMeta: metav1.ObjectMeta{
 | 
						ObjectMeta: metav1.ObjectMeta{
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -352,6 +352,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	waitPodExpireChan := make(chan struct{})
 | 
						waitPodExpireChan := make(chan struct{})
 | 
				
			||||||
	timeout := make(chan struct{})
 | 
						timeout := make(chan struct{})
 | 
				
			||||||
 | 
						errChan := make(chan error)
 | 
				
			||||||
	go func() {
 | 
						go func() {
 | 
				
			||||||
		for {
 | 
							for {
 | 
				
			||||||
			select {
 | 
								select {
 | 
				
			||||||
@@ -361,7 +362,8 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
			pods, err := scache.List(labels.Everything())
 | 
								pods, err := scache.List(labels.Everything())
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				t.Fatalf("cache.List failed: %v", err)
 | 
									errChan <- fmt.Errorf("cache.List failed: %v", err)
 | 
				
			||||||
 | 
									return
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if len(pods) == 0 {
 | 
								if len(pods) == 0 {
 | 
				
			||||||
				close(waitPodExpireChan)
 | 
									close(waitPodExpireChan)
 | 
				
			||||||
@@ -372,6 +374,8 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
 | 
				
			|||||||
	}()
 | 
						}()
 | 
				
			||||||
	// waiting for the assumed pod to expire
 | 
						// waiting for the assumed pod to expire
 | 
				
			||||||
	select {
 | 
						select {
 | 
				
			||||||
 | 
						case err := <-errChan:
 | 
				
			||||||
 | 
							t.Fatal(err)
 | 
				
			||||||
	case <-waitPodExpireChan:
 | 
						case <-waitPodExpireChan:
 | 
				
			||||||
	case <-time.After(wait.ForeverTestTimeout):
 | 
						case <-time.After(wait.ForeverTestTimeout):
 | 
				
			||||||
		close(timeout)
 | 
							close(timeout)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -168,12 +168,12 @@ var _ = SIGDescribe("LimitRange", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Failing to create a Pod with less than min resources")
 | 
							ginkgo.By("Failing to create a Pod with less than min resources")
 | 
				
			||||||
		pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
 | 
							pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
 | 
				
			||||||
		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
							_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Failing to create a Pod with more than max resources")
 | 
							ginkgo.By("Failing to create a Pod with more than max resources")
 | 
				
			||||||
		pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
 | 
							pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
 | 
				
			||||||
		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
							_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Updating a LimitRange")
 | 
							ginkgo.By("Updating a LimitRange")
 | 
				
			||||||
@@ -192,12 +192,12 @@ var _ = SIGDescribe("LimitRange", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating a Pod with less than former min resources")
 | 
							ginkgo.By("Creating a Pod with less than former min resources")
 | 
				
			||||||
		pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
 | 
							pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
 | 
				
			||||||
		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
							_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Failing to create a Pod with more than max resources")
 | 
							ginkgo.By("Failing to create a Pod with more than max resources")
 | 
				
			||||||
		pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
 | 
							pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
 | 
				
			||||||
		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
							_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
				
			||||||
		framework.ExpectError(err)
 | 
							framework.ExpectError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Deleting a LimitRange")
 | 
							ginkgo.By("Deleting a LimitRange")
 | 
				
			||||||
@@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Creating a Pod with more than former max resources")
 | 
							ginkgo.By("Creating a Pod with more than former max resources")
 | 
				
			||||||
		pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
 | 
							pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
 | 
				
			||||||
		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
							_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -23,7 +23,6 @@ import (
 | 
				
			|||||||
	v1 "k8s.io/api/core/v1"
 | 
						v1 "k8s.io/api/core/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
						"k8s.io/apimachinery/pkg/api/resource"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/labels"
 | 
					 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
						"k8s.io/apimachinery/pkg/util/sets"
 | 
				
			||||||
	"k8s.io/apimachinery/pkg/util/uuid"
 | 
						"k8s.io/apimachinery/pkg/util/uuid"
 | 
				
			||||||
	utilversion "k8s.io/apimachinery/pkg/util/version"
 | 
						utilversion "k8s.io/apimachinery/pkg/util/version"
 | 
				
			||||||
@@ -759,35 +758,6 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
 | 
				
			|||||||
	framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
 | 
						framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
 | 
					 | 
				
			||||||
func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) {
 | 
					 | 
				
			||||||
	allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName})
 | 
					 | 
				
			||||||
	scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
 | 
					 | 
				
			||||||
	framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
 | 
					 | 
				
			||||||
	selector := labels.SelectorFromSet(labels.Set(labelsMap))
 | 
					 | 
				
			||||||
	allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
 | 
					 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
					 | 
				
			||||||
	return allPods
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, string) {
 | 
					 | 
				
			||||||
	// launch a pod to find a node which can launch a pod. We intentionally do
 | 
					 | 
				
			||||||
	// not just take the node list and choose the first of them. Depending on the
 | 
					 | 
				
			||||||
	// cluster and the scheduler it might be that a "normal" pod cannot be
 | 
					 | 
				
			||||||
	// scheduled onto it.
 | 
					 | 
				
			||||||
	ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.")
 | 
					 | 
				
			||||||
	pod := runPausePod(f, pausePodConfig{
 | 
					 | 
				
			||||||
		Name:   "with-label-" + string(uuid.NewUUID()),
 | 
					 | 
				
			||||||
		Labels: map[string]string{"security": "S1"},
 | 
					 | 
				
			||||||
	})
 | 
					 | 
				
			||||||
	return pod.Spec.NodeName, pod.Name
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
 | 
					// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
 | 
				
			||||||
func GetNodeThatCanRunPod(f *framework.Framework) string {
 | 
					func GetNodeThatCanRunPod(f *framework.Framework) string {
 | 
				
			||||||
	ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
 | 
						ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -425,6 +425,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		ginkgo.By("Starting pods...")
 | 
							ginkgo.By("Starting pods...")
 | 
				
			||||||
		nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
 | 
							nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
 | 
				
			||||||
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
 | 
							node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
 | 
				
			||||||
		framework.ExpectNoError(err)
 | 
							framework.ExpectNoError(err)
 | 
				
			||||||
		nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
 | 
							nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -215,12 +215,12 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar
 | 
				
			|||||||
	}()
 | 
						}()
 | 
				
			||||||
	// List the pods, making sure we observe all the replicas.
 | 
						// List the pods, making sure we observe all the replicas.
 | 
				
			||||||
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
						selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
 | 
				
			||||||
	pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
 | 
						_, err = e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Wait for all of them to be scheduled
 | 
						// Wait for all of them to be scheduled
 | 
				
			||||||
	ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled.  Selector: %v", replicaCount, name, selector))
 | 
						ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled.  Selector: %v", replicaCount, name, selector))
 | 
				
			||||||
	pods, err = e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
 | 
						pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
 | 
				
			||||||
	framework.ExpectNoError(err)
 | 
						framework.ExpectNoError(err)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Now make sure they're spread across zones
 | 
						// Now make sure they're spread across zones
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -75,7 +75,6 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if strings.Contains(req.URL.Path, filter) {
 | 
							if strings.Contains(req.URL.Path, filter) {
 | 
				
			||||||
			resp := &schedulerapi.ExtenderFilterResult{}
 | 
					 | 
				
			||||||
			resp, err := e.Filter(&args)
 | 
								resp, err := e.Filter(&args)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				resp.Error = err.Error()
 | 
									resp.Error = err.Error()
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -511,7 +511,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	pods := []*v1.Pod{}
 | 
						pods := []*v1.Pod{}
 | 
				
			||||||
	pvcs := []*v1.PersistentVolumeClaim{}
 | 
						pvcs := []*v1.PersistentVolumeClaim{}
 | 
				
			||||||
	pvs := []*v1.PersistentVolume{}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create PVs for the first node
 | 
						// Create PVs for the first node
 | 
				
			||||||
	for i := 0; i < numPVsFirstNode; i++ {
 | 
						for i := 0; i < numPVsFirstNode; i++ {
 | 
				
			||||||
@@ -519,7 +518,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n
 | 
				
			|||||||
		if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
 | 
							if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
 | 
				
			||||||
			t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
 | 
								t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		pvs = append(pvs, pv)
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create 1 PV per Node for the remaining nodes
 | 
						// Create 1 PV per Node for the remaining nodes
 | 
				
			||||||
@@ -528,7 +526,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n
 | 
				
			|||||||
		if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
 | 
							if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
 | 
				
			||||||
			t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
 | 
								t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		pvs = append(pvs, pv)
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Create pods
 | 
						// Create pods
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user