Use the generic Set in scheduler

This commit is contained in:
sarab
2023-03-27 13:16:13 +05:30
parent 8f15859afc
commit 8d18ae6fc2
50 changed files with 348 additions and 346 deletions

View File

@@ -247,13 +247,13 @@ func (p *Plugins) Names() []string {
p.Permit,
p.QueueSort,
}
n := sets.NewString()
n := sets.New[string]()
for _, e := range extensions {
for _, pg := range e.Enabled {
n.Insert(pg.Name)
}
}
return n.List()
return sets.List(n)
}
// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty,

View File

@@ -110,7 +110,7 @@ type pluginIndex struct {
}
func mergePluginSet(logger klog.Logger, defaultPluginSet, customPluginSet v1.PluginSet) v1.PluginSet {
disabledPlugins := sets.NewString()
disabledPlugins := sets.New[string]()
enabledCustomPlugins := make(map[string]pluginIndex)
// replacedPluginIndex is a set of index of plugins, which have replaced the default plugins.
replacedPluginIndex := sets.NewInt()

View File

@@ -57,13 +57,13 @@ func pluginsNames(p *configv1.Plugins) []string {
p.PreEnqueue,
p.QueueSort,
}
n := sets.NewString()
n := sets.New[string]()
for _, e := range extensions {
for _, pg := range e.Enabled {
n.Insert(pg.Name)
}
}
return n.List()
return sets.List(n)
}
func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSchedulerProfile) {
@@ -71,7 +71,7 @@ func setDefaults_KubeSchedulerProfile(logger klog.Logger, prof *configv1.KubeSch
prof.Plugins = mergePlugins(logger, getDefaultPlugins(), prof.Plugins)
// Set default plugin configs.
scheme := GetPluginArgConversionScheme()
existingConfigs := sets.NewString()
existingConfigs := sets.New[string]()
for j := range prof.PluginConfig {
existingConfigs.Insert(prof.PluginConfig[j].Name)
args := prof.PluginConfig[j].Args.Object

View File

@@ -150,7 +150,7 @@ type pluginIndex struct {
}
func mergePluginSet(defaultPluginSet, customPluginSet v1beta2.PluginSet) v1beta2.PluginSet {
disabledPlugins := sets.NewString()
disabledPlugins := sets.New[string]()
enabledCustomPlugins := make(map[string]pluginIndex)
// replacedPluginIndex is a set of index of plugins, which have replaced the default plugins.
replacedPluginIndex := sets.NewInt()

View File

@@ -54,13 +54,13 @@ func pluginsNames(p *v1beta2.Plugins) []string {
p.Permit,
p.QueueSort,
}
n := sets.NewString()
n := sets.New[string]()
for _, e := range extensions {
for _, pg := range e.Enabled {
n.Insert(pg.Name)
}
}
return n.List()
return sets.List(n)
}
func setDefaults_KubeSchedulerProfile(prof *v1beta2.KubeSchedulerProfile) {
@@ -69,7 +69,7 @@ func setDefaults_KubeSchedulerProfile(prof *v1beta2.KubeSchedulerProfile) {
// Set default plugin configs.
scheme := GetPluginArgConversionScheme()
existingConfigs := sets.NewString()
existingConfigs := sets.New[string]()
for j := range prof.PluginConfig {
existingConfigs.Insert(prof.PluginConfig[j].Name)
args := prof.PluginConfig[j].Args.Object

View File

@@ -92,7 +92,7 @@ type pluginIndex struct {
}
func mergePluginSet(defaultPluginSet, customPluginSet v1beta3.PluginSet) v1beta3.PluginSet {
disabledPlugins := sets.NewString()
disabledPlugins := sets.New[string]()
enabledCustomPlugins := make(map[string]pluginIndex)
// replacedPluginIndex is a set of index of plugins, which have replaced the default plugins.
replacedPluginIndex := sets.NewInt()

View File

@@ -55,13 +55,13 @@ func pluginsNames(p *v1beta3.Plugins) []string {
p.Permit,
p.QueueSort,
}
n := sets.NewString()
n := sets.New[string]()
for _, e := range extensions {
for _, pg := range e.Enabled {
n.Insert(pg.Name)
}
}
return n.List()
return sets.List(n)
}
func setDefaults_KubeSchedulerProfile(prof *v1beta3.KubeSchedulerProfile) {
@@ -69,7 +69,7 @@ func setDefaults_KubeSchedulerProfile(prof *v1beta3.KubeSchedulerProfile) {
prof.Plugins = mergePlugins(getDefaultPlugins(), prof.Plugins)
// Set default plugin configs.
scheme := GetPluginArgConversionScheme()
existingConfigs := sets.NewString()
existingConfigs := sets.New[string]()
for j := range prof.PluginConfig {
existingConfigs.Insert(prof.PluginConfig[j].Name)
args := prof.PluginConfig[j].Args.Object

View File

@@ -227,7 +227,7 @@ func validatePluginConfig(path *field.Path, apiVersion string, profile *config.K
}
}
seenPluginConfig := make(sets.String)
seenPluginConfig := sets.New[string]()
for i := range profile.PluginConfig {
pluginConfigPath := path.Child("pluginConfig").Index(i)
@@ -298,7 +298,7 @@ func validateCommonQueueSort(path *field.Path, profiles []config.KubeSchedulerPr
func validateExtenders(fldPath *field.Path, extenders []config.Extender) []error {
var errs []error
binders := 0
extenderManagedResources := sets.NewString()
extenderManagedResources := sets.New[string]()
for i, extender := range extenders {
path := fldPath.Index(i)
if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 {

View File

@@ -31,7 +31,8 @@ import (
"k8s.io/kubernetes/pkg/scheduler/apis/config"
)
var supportedScoringStrategyTypes = sets.NewString(
// supportedScoringStrategyTypes has to be a set of strings for use with field.Unsupported
var supportedScoringStrategyTypes = sets.New(
string(config.LeastAllocated),
string(config.MostAllocated),
string(config.RequestedToCapacityRatio),
@@ -148,13 +149,13 @@ func validateTopologyKey(p *field.Path, v string) field.ErrorList {
}
func validateWhenUnsatisfiable(p *field.Path, v v1.UnsatisfiableConstraintAction) *field.Error {
supportedScheduleActions := sets.NewString(string(v1.DoNotSchedule), string(v1.ScheduleAnyway))
supportedScheduleActions := sets.New(string(v1.DoNotSchedule), string(v1.ScheduleAnyway))
if len(v) == 0 {
return field.Required(p, "can not be empty")
}
if !supportedScheduleActions.Has(string(v)) {
return field.NotSupported(p, v, supportedScheduleActions.List())
return field.NotSupported(p, v, sets.List(supportedScheduleActions))
}
return nil
}
@@ -221,7 +222,7 @@ func validateResources(resources []config.ResourceSpec, p *field.Path) field.Err
// ValidateNodeResourcesBalancedAllocationArgs validates that NodeResourcesBalancedAllocationArgs are set correctly.
func ValidateNodeResourcesBalancedAllocationArgs(path *field.Path, args *config.NodeResourcesBalancedAllocationArgs) error {
var allErrs field.ErrorList
seenResources := sets.NewString()
seenResources := sets.New[string]()
for i, resource := range args.Resources {
if seenResources.Has(resource.Name) {
allErrs = append(allErrs, field.Duplicate(path.Child("resources").Index(i).Child("name"), resource.Name))
@@ -313,7 +314,7 @@ func ValidateNodeResourcesFitArgs(path *field.Path, args *config.NodeResourcesFi
strategyPath := path.Child("scoringStrategy")
if args.ScoringStrategy != nil {
if !supportedScoringStrategyTypes.Has(string(args.ScoringStrategy.Type)) {
allErrs = append(allErrs, field.NotSupported(strategyPath.Child("type"), args.ScoringStrategy.Type, supportedScoringStrategyTypes.List()))
allErrs = append(allErrs, field.NotSupported(strategyPath.Child("type"), args.ScoringStrategy.Type, sets.List(supportedScoringStrategyTypes)))
}
allErrs = append(allErrs, validateResources(args.ScoringStrategy.Resources, strategyPath.Child("resources"))...)
if args.ScoringStrategy.RequestedToCapacityRatio != nil {

View File

@@ -48,7 +48,7 @@ type HTTPExtender struct {
weight int64
client *http.Client
nodeCacheCapable bool
managedResources sets.String
managedResources sets.Set[string]
ignorable bool
}
@@ -96,7 +96,7 @@ func NewHTTPExtender(config *schedulerapi.Extender) (framework.Extender, error)
Transport: transport,
Timeout: config.HTTPTimeout.Duration,
}
managedResources := sets.NewString()
managedResources := sets.New[string]()
for _, r := range config.ManagedResources {
managedResources.Insert(string(r.Name))
}

View File

@@ -329,7 +329,7 @@ func createNode(name string) *v1.Node {
func TestIsInterested(t *testing.T) {
mem := &HTTPExtender{
managedResources: sets.NewString(),
managedResources: sets.New[string](),
}
mem.managedResources.Insert("memory")
@@ -342,7 +342,7 @@ func TestIsInterested(t *testing.T) {
{
label: "Empty managed resources",
extender: &HTTPExtender{
managedResources: sets.NewString(),
managedResources: sets.New[string](),
},
pod: &v1.Pod{},
want: true,

View File

@@ -641,7 +641,7 @@ type Handle interface {
type PreFilterResult struct {
// The set of nodes that should be considered downstream; if nil then
// all nodes are eligible.
NodeNames sets.String
NodeNames sets.Set[string]
}
func (p *PreFilterResult) AllNodes() bool {

View File

@@ -147,41 +147,41 @@ func TestPreFilterResultMerge(t *testing.T) {
}{
"all nil": {},
"nil receiver empty input": {
in: &PreFilterResult{NodeNames: sets.NewString()},
want: &PreFilterResult{NodeNames: sets.NewString()},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"empty receiver nil input": {
receiver: &PreFilterResult{NodeNames: sets.NewString()},
want: &PreFilterResult{NodeNames: sets.NewString()},
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"empty receiver empty input": {
receiver: &PreFilterResult{NodeNames: sets.NewString()},
in: &PreFilterResult{NodeNames: sets.NewString()},
want: &PreFilterResult{NodeNames: sets.NewString()},
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"nil receiver populated input": {
in: &PreFilterResult{NodeNames: sets.NewString("node1")},
want: &PreFilterResult{NodeNames: sets.NewString("node1")},
in: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New("node1")},
},
"empty receiver populated input": {
receiver: &PreFilterResult{NodeNames: sets.NewString()},
in: &PreFilterResult{NodeNames: sets.NewString("node1")},
want: &PreFilterResult{NodeNames: sets.NewString()},
receiver: &PreFilterResult{NodeNames: sets.New[string]()},
in: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"populated receiver nil input": {
receiver: &PreFilterResult{NodeNames: sets.NewString("node1")},
want: &PreFilterResult{NodeNames: sets.NewString("node1")},
receiver: &PreFilterResult{NodeNames: sets.New("node1")},
want: &PreFilterResult{NodeNames: sets.New("node1")},
},
"populated receiver empty input": {
receiver: &PreFilterResult{NodeNames: sets.NewString("node1")},
in: &PreFilterResult{NodeNames: sets.NewString()},
want: &PreFilterResult{NodeNames: sets.NewString()},
receiver: &PreFilterResult{NodeNames: sets.New("node1")},
in: &PreFilterResult{NodeNames: sets.New[string]()},
want: &PreFilterResult{NodeNames: sets.New[string]()},
},
"populated receiver and input": {
receiver: &PreFilterResult{NodeNames: sets.NewString("node1", "node2")},
in: &PreFilterResult{NodeNames: sets.NewString("node2", "node3")},
want: &PreFilterResult{NodeNames: sets.NewString("node2")},
receiver: &PreFilterResult{NodeNames: sets.New("node1", "node2")},
in: &PreFilterResult{NodeNames: sets.New("node2", "node3")},
want: &PreFilterResult{NodeNames: sets.New("node2")},
},
}
for name, test := range tests {

View File

@@ -1676,8 +1676,8 @@ func TestPreempt(t *testing.T) {
podInformer.GetStore().Add(test.pods[i])
}
deletedPodNames := make(sets.String)
patchedPodNames := make(sets.String)
deletedPodNames := sets.New[string]()
patchedPodNames := sets.New[string]()
client.PrependReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
patchedPodNames.Insert(action.(clienttesting.PatchAction).GetName())
return true, nil, nil
@@ -1769,7 +1769,7 @@ func TestPreempt(t *testing.T) {
if len(deletedPodNames) != len(test.expectedPods) {
t.Errorf("expected %v pods, got %v.", len(test.expectedPods), len(deletedPodNames))
}
if diff := cmp.Diff(patchedPodNames.List(), deletedPodNames.List()); diff != "" {
if diff := cmp.Diff(sets.List(patchedPodNames), sets.List(deletedPodNames)); diff != "" {
t.Errorf("unexpected difference in the set of patched and deleted pods: %s", diff)
}
for victimName := range deletedPodNames {

View File

@@ -107,14 +107,14 @@ func (pl *NodeAffinity) PreFilter(ctx context.Context, cycleState *framework.Cyc
// Check if there is affinity to a specific node and return it.
terms := affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
var nodeNames sets.String
var nodeNames sets.Set[string]
for _, t := range terms {
var termNodeNames sets.String
var termNodeNames sets.Set[string]
for _, r := range t.MatchFields {
if r.Key == metav1.ObjectNameField && r.Operator == v1.NodeSelectorOpIn {
// The requirements represent ANDed constraints, and so we need to
// find the intersection of nodes.
s := sets.NewString(r.Values...)
s := sets.New(r.Values...)
if termNodeNames == nil {
termNodeNames = s
} else {

View File

@@ -491,7 +491,7 @@ func TestNodeAffinity(t *testing.T) {
},
},
nodeName: "node1",
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.NewString("node1")},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
runPreFilter: true,
},
{
@@ -519,7 +519,7 @@ func TestNodeAffinity(t *testing.T) {
},
nodeName: "node2",
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonPod),
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.NewString("node1")},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
runPreFilter: true,
},
{
@@ -595,7 +595,7 @@ func TestNodeAffinity(t *testing.T) {
},
nodeName: "node2",
labels: map[string]string{"foo": "bar"},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.NewString("node1")},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonPod),
runPreFilter: true,
},
@@ -631,7 +631,7 @@ func TestNodeAffinity(t *testing.T) {
},
nodeName: "node1",
labels: map[string]string{"foo": "bar"},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.NewString("node1")},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1")},
runPreFilter: true,
},
{
@@ -704,7 +704,7 @@ func TestNodeAffinity(t *testing.T) {
},
},
nodeName: "node2",
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.NewString("node1", "node2")},
wantPreFilterResult: &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")},
runPreFilter: true,
},
{

View File

@@ -82,8 +82,8 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
// Fit is a plugin that checks if a node has sufficient resources.
type Fit struct {
ignoredResources sets.String
ignoredResourceGroups sets.String
ignoredResources sets.Set[string]
ignoredResourceGroups sets.Set[string]
enableInPlacePodVerticalScaling bool
handle framework.Handle
resourceAllocationScorer
@@ -165,8 +165,8 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr
}
return &Fit{
ignoredResources: sets.NewString(args.IgnoredResources...),
ignoredResourceGroups: sets.NewString(args.IgnoredResourceGroups...),
ignoredResources: sets.New(args.IgnoredResources...),
ignoredResourceGroups: sets.New(args.IgnoredResourceGroups...),
enableInPlacePodVerticalScaling: fts.EnableInPlacePodVerticalScaling,
handle: h,
resourceAllocationScorer: *scorePlugin(args),
@@ -286,7 +286,7 @@ func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod), nodeInfo, nil, nil)
}
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.String) []InsufficientResource {
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.Set[string]) []InsufficientResource {
insufficientResources := make([]InsufficientResource, 0, 4)
allowedPodNumber := nodeInfo.Allocatable.AllowedPodNumber

View File

@@ -591,9 +591,9 @@ func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
nodeInfoAnnotations = map[string]string{}
}
newAnnotationSet := sets.NewString()
newAnnotationSet := sets.New[string]()
newAnnotationSet.Insert(pluginName)
nas := strings.Join(newAnnotationSet.List(), ",")
nas := strings.Join(sets.List(newAnnotationSet), ",")
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
csiNode.Annotations = nodeInfoAnnotations

View File

@@ -216,7 +216,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
return nil
}
newVolumes := make(sets.String)
newVolumes := sets.New[string]()
if err := pl.filterVolumes(pod, true /* new pod */, newVolumes); err != nil {
return framework.AsStatus(err)
}
@@ -248,7 +248,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
}
// count unique volumes
existingVolumes := make(sets.String)
existingVolumes := sets.New[string]()
for _, existingPod := range nodeInfo.Pods {
if err := pl.filterVolumes(existingPod.Pod, false /* existing pod */, existingVolumes); err != nil {
return framework.AsStatus(err)
@@ -274,7 +274,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
return nil
}
func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes sets.String) error {
func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes sets.Set[string]) error {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]

View File

@@ -68,13 +68,13 @@ func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
return false
}
var mpaSet sets.String
var mpaSet sets.Set[string]
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
if len(mpa) == 0 {
mpaSet = sets.NewString()
mpaSet = sets.New[string]()
} else {
tok := strings.Split(mpa, ",")
mpaSet = sets.NewString(tok...)
mpaSet = sets.New(tok...)
}
return mpaSet.Has(pluginName)

View File

@@ -36,7 +36,7 @@ const invalidScore = -1
type preScoreState struct {
Constraints []topologySpreadConstraint
// IgnoredNodes is a set of node names which miss some Constraints[*].topologyKey.
IgnoredNodes sets.String
IgnoredNodes sets.Set[string]
// TopologyPairToPodCounts is keyed with topologyPair, and valued with the number of matching pods.
TopologyPairToPodCounts map[topologyPair]*int64
// TopologyNormalizingWeight is the weight we give to the counts per topology.
@@ -126,7 +126,7 @@ func (pl *PodTopologySpread) PreScore(
}
state := &preScoreState{
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: make(map[topologyPair]*int64),
}
// Only require that nodes have all the topology labels if using

View File

@@ -84,7 +84,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -116,7 +116,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -157,7 +157,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString("node-x"),
IgnoredNodes: sets.New("node-x"),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
},
@@ -199,7 +199,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: v1.LabelTopologyZone, value: "mars"}: pointer.Int64(0),
{key: v1.LabelTopologyZone, value: ""}: pointer.Int64(0),
@@ -250,7 +250,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "planet", value: "mars"}: pointer.Int64(0),
},
@@ -314,7 +314,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{"planet", "mars"}: pointer.Int64(0),
},
@@ -346,7 +346,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -380,7 +380,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -414,7 +414,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -448,7 +448,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -481,7 +481,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyHonor,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),
@@ -514,7 +514,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore,
},
},
IgnoredNodes: sets.NewString(),
IgnoredNodes: sets.New[string](),
TopologyPairToPodCounts: map[topologyPair]*int64{
{key: "zone", value: "zone1"}: pointer.Int64(0),
{key: "zone", value: "zone2"}: pointer.Int64(0),

View File

@@ -157,7 +157,7 @@ type SchedulerVolumeBinder interface {
//
// If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made
// and all nodes should be considered.
GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String)
GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string])
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the
// node and returns pod's volumes information.
@@ -386,7 +386,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, podVolumeClaims *PodVolumeCla
//
// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes
// should be considered.
func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) {
func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
if len(boundClaims) == 0 {
return
}
@@ -407,13 +407,13 @@ func (b *volumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim)
// on the first found list of eligible nodes for the local PersistentVolume,
// insert to the eligible node set.
if eligibleNodes == nil {
eligibleNodes = sets.NewString(nodeNames...)
eligibleNodes = sets.New(nodeNames...)
} else {
// for subsequent finding of eligible nodes for the local PersistentVolume,
// take the intersection of the nodes with the existing eligible nodes
// for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2,
// then the eligible node list should be empty.
eligibleNodes = eligibleNodes.Intersection(sets.NewString(nodeNames...))
eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...))
}
}
}
@@ -1112,13 +1112,13 @@ func isPluginMigratedToCSIOnNode(pluginName string, csiNode *storagev1.CSINode)
return false
}
var mpaSet sets.String
var mpaSet sets.Set[string]
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
if len(mpa) == 0 {
mpaSet = sets.NewString()
mpaSet = sets.New[string]()
} else {
tok := strings.Split(mpa, ",")
mpaSet = sets.NewString(tok...)
mpaSet = sets.New(tok...)
}
return mpaSet.Has(pluginName)

View File

@@ -2343,7 +2343,7 @@ func TestGetEligibleNodes(t *testing.T) {
nodes []*v1.Node
// Expected return values
eligibleNodes sets.String
eligibleNodes sets.Set[string]
}
scenarios := map[string]scenarioType{
@@ -2389,7 +2389,7 @@ func TestGetEligibleNodes(t *testing.T) {
node1,
node2,
},
eligibleNodes: sets.NewString("node1"),
eligibleNodes: sets.New("node1"),
},
"multi-local-pv-with-different-nodes": {
pvcs: []*v1.PersistentVolumeClaim{
@@ -2406,7 +2406,7 @@ func TestGetEligibleNodes(t *testing.T) {
node1,
node2,
},
eligibleNodes: sets.NewString(),
eligibleNodes: sets.New[string](),
},
"local-and-non-local-pv": {
pvcs: []*v1.PersistentVolumeClaim{
@@ -2426,7 +2426,7 @@ func TestGetEligibleNodes(t *testing.T) {
node1,
node2,
},
eligibleNodes: sets.NewString("node1"),
eligibleNodes: sets.New("node1"),
},
}
@@ -2449,7 +2449,7 @@ func TestGetEligibleNodes(t *testing.T) {
fmt.Println("foo")
}
if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.String) bool {
if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.Set[string]) bool {
return reflect.DeepEqual(a, b)
})); compDiff != "" {
t.Errorf("Unexpected eligible nodes (-want +got):\n%s", compDiff)

View File

@@ -55,7 +55,7 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(pod *v1.Pod) (podVolumeClaims *Pod
}
// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes.
func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.String) {
func (b *FakeVolumeBinder) GetEligibleNodes(boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) {
return nil
}

View File

@@ -148,7 +148,7 @@ func TestVolumeBinding(t *testing.T) {
}).PersistentVolume,
},
wantPreFilterResult: &framework.PreFilterResult{
NodeNames: sets.NewString("node-a"),
NodeNames: sets.New("node-a"),
},
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{

View File

@@ -60,7 +60,7 @@ const (
type pvTopology struct {
pvName string
key string
values sets.String
values sets.Set[string]
}
// the state is initialized in PreFilter phase. because we save the pointer in
@@ -160,7 +160,7 @@ func (pl *VolumeZone) getPVbyPod(ctx context.Context, pod *v1.Pod) ([]pvTopology
podPVTopologies = append(podPVTopologies, pvTopology{
pvName: pv.Name,
key: key,
values: volumeVSet,
values: sets.Set[string](volumeVSet),
})
}
}

View File

@@ -86,7 +86,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
tests := []struct {
name string
nodesStatuses framework.NodeToStatusMap
expected sets.String // set of expected node names.
expected sets.Set[string] // set of expected node names.
}{
{
name: "No node should be attempted",
@@ -96,7 +96,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
},
expected: sets.NewString(),
expected: sets.New[string](),
},
{
name: "ErrReasonAntiAffinityRulesNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod anti-affinity",
@@ -105,7 +105,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnschedulable),
},
expected: sets.NewString("node1", "node4"),
expected: sets.New("node1", "node4"),
},
{
name: "ErrReasonAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrReasonAntiAffinityRulesNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod anti-affinity",
@@ -113,7 +113,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
"node2": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAntiAffinityRulesNotMatch),
},
expected: sets.NewString("node2", "node3", "node4"),
expected: sets.New("node2", "node3", "node4"),
},
{
name: "Mix of failed predicates works fine",
@@ -121,14 +121,14 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
"node2": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
},
expected: sets.NewString("node2", "node3", "node4"),
expected: sets.New("node2", "node3", "node4"),
},
{
name: "Node condition errors should be considered unresolvable",
nodesStatuses: framework.NodeToStatusMap{
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
},
expected: sets.NewString("node2", "node3", "node4"),
expected: sets.New("node2", "node3", "node4"),
},
{
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
@@ -137,7 +137,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonNodeConflict)),
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonBindConflict)),
},
expected: sets.NewString("node4"),
expected: sets.New("node4"),
},
{
name: "ErrReasonConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
@@ -146,7 +146,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"node3": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
},
expected: sets.NewString("node1", "node3", "node4"),
expected: sets.New("node1", "node3", "node4"),
},
{
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
@@ -155,7 +155,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node3": framework.NewStatus(framework.Unschedulable, ""),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
},
expected: sets.NewString("node1", "node3"),
expected: sets.New("node1", "node3"),
},
{
name: "ErrReasonNodeLabelNotMatch should not be tried as it indicates that the pod is unschedulable due to node doesn't have the required label",
@@ -164,7 +164,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node3": framework.NewStatus(framework.Unschedulable, ""),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
},
expected: sets.NewString("node1", "node3"),
expected: sets.New("node1", "node3"),
},
}

View File

@@ -132,7 +132,7 @@ type frameworkOptions struct {
podNominator framework.PodNominator
extenders []framework.Extender
captureProfile CaptureProfile
clusterEventMap map[framework.ClusterEvent]sets.String
clusterEventMap map[framework.ClusterEvent]sets.Set[string]
parallelizer parallelize.Parallelizer
}
@@ -216,7 +216,7 @@ func WithCaptureProfile(c CaptureProfile) Option {
}
// WithClusterEventMap sets clusterEventMap for the scheduling frameworkImpl.
func WithClusterEventMap(m map[framework.ClusterEvent]sets.String) Option {
func WithClusterEventMap(m map[framework.ClusterEvent]sets.Set[string]) Option {
return func(o *frameworkOptions) {
o.clusterEventMap = m
}
@@ -233,7 +233,7 @@ func WithMetricsRecorder(r *metrics.MetricAsyncRecorder) Option {
func defaultFrameworkOptions(stopCh <-chan struct{}) frameworkOptions {
return frameworkOptions{
metricsRecorder: metrics.NewMetricsAsyncRecorder(1000, time.Second, stopCh),
clusterEventMap: make(map[framework.ClusterEvent]sets.String),
clusterEventMap: make(map[framework.ClusterEvent]sets.Set[string]),
parallelizer: parallelize.NewParallelizer(parallelize.DefaultParallelism),
}
}
@@ -441,7 +441,7 @@ func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerPro
enabledSet.insert(plugin.Name)
}
disabledSet := sets.NewString()
disabledSet := sets.New[string]()
for _, disabledPlugin := range e.plugins.Disabled {
disabledSet.Insert(disabledPlugin.Name)
}
@@ -516,7 +516,7 @@ func (f *frameworkImpl) expandMultiPointPlugins(profile *config.KubeSchedulerPro
return nil
}
func fillEventToPluginMap(p framework.Plugin, eventToPlugins map[framework.ClusterEvent]sets.String) {
func fillEventToPluginMap(p framework.Plugin, eventToPlugins map[framework.ClusterEvent]sets.Set[string]) {
ext, ok := p.(framework.EnqueueExtensions)
if !ok {
// If interface EnqueueExtensions is not implemented, register the default events
@@ -537,10 +537,10 @@ func fillEventToPluginMap(p framework.Plugin, eventToPlugins map[framework.Clust
registerClusterEvents(p.Name(), eventToPlugins, events)
}
func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEvent]sets.String, evts []framework.ClusterEvent) {
func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEvent]sets.Set[string], evts []framework.ClusterEvent) {
for _, evt := range evts {
if eventToPlugins[evt] == nil {
eventToPlugins[evt] = sets.NewString(name)
eventToPlugins[evt] = sets.New(name)
} else {
eventToPlugins[evt].Insert(name)
}
@@ -550,7 +550,7 @@ func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEven
func updatePluginList(pluginList interface{}, pluginSet config.PluginSet, pluginsMap map[string]framework.Plugin) error {
plugins := reflect.ValueOf(pluginList).Elem()
pluginType := plugins.Type().Elem()
set := sets.NewString()
set := sets.New[string]()
for _, ep := range pluginSet.Enabled {
pg, ok := pluginsMap[ep.Name]
if !ok {
@@ -1362,8 +1362,8 @@ func (f *frameworkImpl) SharedInformerFactory() informers.SharedInformerFactory
return f.informerFactory
}
func (f *frameworkImpl) pluginsNeeded(plugins *config.Plugins) sets.String {
pgSet := sets.String{}
func (f *frameworkImpl) pluginsNeeded(plugins *config.Plugins) sets.Set[string] {
pgSet := sets.Set[string]{}
if plugins == nil {
return pgSet

View File

@@ -906,74 +906,74 @@ func TestNewFrameworkFillEventToPluginMap(t *testing.T) {
tests := []struct {
name string
plugins []framework.Plugin
want map[framework.ClusterEvent]sets.String
want map[framework.ClusterEvent]sets.Set[string]
}{
{
name: "no-op plugin",
plugins: []framework.Plugin{&fakeNoopPlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
want: map[framework.ClusterEvent]sets.Set[string]{
{Resource: framework.Pod, ActionType: framework.All}: sets.New("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.All}: sets.New("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.New("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.New("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.New("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.New("fakeNoop", bindPlugin, queueSortPlugin),
},
},
{
name: "node plugin",
plugins: []framework.Plugin{&fakeNodePlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakeNode", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
want: map[framework.ClusterEvent]sets.Set[string]{
{Resource: framework.Pod, ActionType: framework.All}: sets.New("fakeNode", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.Delete}: sets.New("fakeNode"),
{Resource: framework.Node, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete}: sets.New("fakeNode"),
{Resource: framework.CSINode, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
},
},
{
name: "pod plugin",
plugins: []framework.Plugin{&fakePodPlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakePod", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.Add | framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
want: map[framework.ClusterEvent]sets.Set[string]{
{Resource: framework.Pod, ActionType: framework.All}: sets.New("fakePod", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.Add | framework.Delete}: sets.New("fakePod"),
{Resource: framework.Node, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.Delete}: sets.New("fakePod"),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
},
},
{
name: "node and pod plugin",
plugins: []framework.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.Node, ActionType: framework.Add | framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakeNode", "fakePod", bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
want: map[framework.ClusterEvent]sets.Set[string]{
{Resource: framework.Node, ActionType: framework.Delete}: sets.New("fakeNode"),
{Resource: framework.Node, ActionType: framework.Add | framework.Delete}: sets.New("fakePod"),
{Resource: framework.Pod, ActionType: framework.All}: sets.New("fakeNode", "fakePod", bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete}: sets.New("fakeNode"),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.Delete}: sets.New("fakePod"),
{Resource: framework.Node, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
},
},
{
name: "no-op runtime plugin",
plugins: []framework.Plugin{&fakeNoopRuntimePlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
want: map[framework.ClusterEvent]sets.Set[string]{
{Resource: framework.Pod, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.New(bindPlugin, queueSortPlugin),
},
},
}
@@ -991,7 +991,7 @@ func TestNewFrameworkFillEventToPluginMap(t *testing.T) {
cfgPls.Filter.Enabled = append(cfgPls.Filter.Enabled, config.Plugin{Name: pl.Name()})
}
got := make(map[framework.ClusterEvent]sets.String)
got := make(map[framework.ClusterEvent]sets.Set[string])
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
stopCh := make(chan struct{})
defer close(stopCh)

View File

@@ -108,7 +108,7 @@ type QueuedPodInfo struct {
// latency for a pod.
InitialAttemptTimestamp time.Time
// If a Pod failed in a scheduling cycle, record the plugin names it failed by.
UnschedulablePlugins sets.String
UnschedulablePlugins sets.Set[string]
// Whether the Pod is scheduling gated (by PreEnqueuePlugins) or not.
Gated bool
}
@@ -197,7 +197,7 @@ func (pi *PodInfo) Update(pod *v1.Pod) error {
// AffinityTerm is a processed version of v1.PodAffinityTerm.
type AffinityTerm struct {
Namespaces sets.String
Namespaces sets.Set[string]
Selector labels.Selector
TopologyKey string
NamespaceSelector labels.Selector
@@ -220,7 +220,7 @@ type WeightedAffinityTerm struct {
// Diagnosis records the details to diagnose a scheduling failure.
type Diagnosis struct {
NodeToStatusMap NodeToStatusMap
UnschedulablePlugins sets.String
UnschedulablePlugins sets.Set[string]
// PreFilterMsg records the messages returned from PreFilter plugins.
PreFilterMsg string
// PostFilterMsg records the messages returned from PostFilter plugins.
@@ -364,8 +364,8 @@ func getPodAntiAffinityTerms(affinity *v1.Affinity) (terms []v1.PodAffinityTerm)
// returns a set of names according to the namespaces indicated in podAffinityTerm.
// If namespaces is empty it considers the given pod's namespace.
func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String {
names := sets.String{}
func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.Set[string] {
names := sets.Set[string]{}
if len(podAffinityTerm.Namespaces) == 0 && podAffinityTerm.NamespaceSelector == nil {
names.Insert(pod.Namespace)
} else {

View File

@@ -1339,26 +1339,26 @@ func TestGetNamespacesFromPodAffinityTerm(t *testing.T) {
tests := []struct {
name string
term *v1.PodAffinityTerm
want sets.String
want sets.Set[string]
}{
{
name: "podAffinityTerm_namespace_empty",
term: &v1.PodAffinityTerm{},
want: sets.String{metav1.NamespaceDefault: sets.Empty{}},
want: sets.Set[string]{metav1.NamespaceDefault: sets.Empty{}},
},
{
name: "podAffinityTerm_namespace_not_empty",
term: &v1.PodAffinityTerm{
Namespaces: []string{metav1.NamespacePublic, metav1.NamespaceSystem},
},
want: sets.NewString(metav1.NamespacePublic, metav1.NamespaceSystem),
want: sets.New(metav1.NamespacePublic, metav1.NamespaceSystem),
},
{
name: "podAffinityTerm_namespace_selector_not_nil",
term: &v1.PodAffinityTerm{
NamespaceSelector: &metav1.LabelSelector{},
},
want: sets.String{},
want: sets.Set[string]{},
},
}

View File

@@ -61,7 +61,7 @@ type cacheImpl struct {
mu sync.RWMutex
// a set of assumed pod keys.
// The key could further be used to get an entry in podStates.
assumedPods sets.String
assumedPods sets.Set[string]
// a map from pod key to podState.
podStates map[string]*podState
nodes map[string]*nodeInfoListItem
@@ -86,7 +86,7 @@ type imageState struct {
// Size of the image
size int64
// A set of node names for nodes having this image present
nodes sets.String
nodes sets.Set[string]
}
// createImageStateSummary returns a summarizing snapshot of the given image's state.
@@ -105,7 +105,7 @@ func newCache(ttl, period time.Duration, stop <-chan struct{}) *cacheImpl {
nodes: make(map[string]*nodeInfoListItem),
nodeTree: newNodeTree(nil),
assumedPods: make(sets.String),
assumedPods: sets.New[string](),
podStates: make(map[string]*podState),
imageStates: make(map[string]*imageState),
}
@@ -293,7 +293,7 @@ func (cache *cacheImpl) UpdateSnapshot(nodeSnapshot *Snapshot) error {
func (cache *cacheImpl) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
snapshot.havePodsWithAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
snapshot.usedPVCSet = sets.NewString()
snapshot.usedPVCSet = sets.New[string]()
if updateAll {
// Take a snapshot of the nodes order in the tree
snapshot.nodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
@@ -693,7 +693,7 @@ func (cache *cacheImpl) addNodeImageStates(node *v1.Node, nodeInfo *framework.No
if !ok {
state = &imageState{
size: image.SizeBytes,
nodes: sets.NewString(node.Name),
nodes: sets.New(node.Name),
}
cache.imageStates[name] = state
} else {

View File

@@ -1427,19 +1427,19 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
operations []operation
expected []*v1.Node
expectedHavePodsWithAffinity int
expectedUsedPVCSet sets.String
expectedUsedPVCSet sets.Set[string]
}{
{
name: "Empty cache",
operations: []operation{},
expected: []*v1.Node{},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Single node",
operations: []operation{addNode(1)},
expected: []*v1.Node{nodes[1]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add node, remove it, add it again",
@@ -1447,7 +1447,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(1), updateSnapshot(), removeNode(1), addNode(1),
},
expected: []*v1.Node{nodes[1]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add node and remove it in the same cycle, add it again",
@@ -1455,7 +1455,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(1), updateSnapshot(), addNode(2), removeNode(1),
},
expected: []*v1.Node{nodes[2]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add a few nodes, and snapshot in the middle",
@@ -1464,7 +1464,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
updateSnapshot(), addNode(3),
},
expected: []*v1.Node{nodes[3], nodes[2], nodes[1], nodes[0]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add a few nodes, and snapshot in the end",
@@ -1472,7 +1472,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addNode(2), addNode(5), addNode(6),
},
expected: []*v1.Node{nodes[6], nodes[5], nodes[2], nodes[0]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Update some nodes",
@@ -1480,7 +1480,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addNode(1), addNode(5), updateSnapshot(), updateNode(1),
},
expected: []*v1.Node{nodes[1], nodes[5], nodes[0]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add a few nodes, and remove all of them",
@@ -1489,7 +1489,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
removeNode(0), removeNode(2), removeNode(5), removeNode(6),
},
expected: []*v1.Node{},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add a few nodes, and remove some of them",
@@ -1498,7 +1498,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
removeNode(0), removeNode(6),
},
expected: []*v1.Node{nodes[5], nodes[2]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add a few nodes, remove all of them, and add more",
@@ -1508,7 +1508,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(7), addNode(9),
},
expected: []*v1.Node{nodes[9], nodes[7]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Update nodes in particular order",
@@ -1517,7 +1517,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(1),
},
expected: []*v1.Node{nodes[1], nodes[8], nodes[2]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add some nodes and some pods",
@@ -1526,7 +1526,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addPod(8), addPod(2),
},
expected: []*v1.Node{nodes[2], nodes[8], nodes[0]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Updating a pod moves its node to the head",
@@ -1534,7 +1534,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addPod(0), addNode(2), addNode(4), updatePod(0),
},
expected: []*v1.Node{nodes[0], nodes[4], nodes[2]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add pod before its node",
@@ -1542,7 +1542,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addPod(1), updatePod(1), addNode(1),
},
expected: []*v1.Node{nodes[1], nodes[0]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Remove node before its pods",
@@ -1552,7 +1552,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
updatePod(1), updatePod(11), removePod(1), removePod(11),
},
expected: []*v1.Node{nodes[0]},
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add Pods with affinity",
@@ -1561,7 +1561,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
},
expected: []*v1.Node{nodes[1], nodes[0]},
expectedHavePodsWithAffinity: 1,
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add Pods with PVC",
@@ -1569,7 +1569,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addPodWithPVC(0), updateSnapshot(), addNode(1),
},
expected: []*v1.Node{nodes[1], nodes[0]},
expectedUsedPVCSet: sets.NewString("test-ns/test-pvc0"),
expectedUsedPVCSet: sets.New("test-ns/test-pvc0"),
},
{
name: "Add multiple nodes with pods with affinity",
@@ -1578,7 +1578,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
},
expected: []*v1.Node{nodes[1], nodes[0]},
expectedHavePodsWithAffinity: 2,
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add multiple nodes with pods with PVC",
@@ -1586,7 +1586,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addPodWithPVC(0), updateSnapshot(), addNode(1), addPodWithPVC(1), updateSnapshot(),
},
expected: []*v1.Node{nodes[1], nodes[0]},
expectedUsedPVCSet: sets.NewString("test-ns/test-pvc0", "test-ns/test-pvc1"),
expectedUsedPVCSet: sets.New("test-ns/test-pvc0", "test-ns/test-pvc1"),
},
{
name: "Add then Remove pods with affinity",
@@ -1595,7 +1595,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
},
expected: []*v1.Node{nodes[0], nodes[1]},
expectedHavePodsWithAffinity: 0,
expectedUsedPVCSet: sets.NewString(),
expectedUsedPVCSet: sets.New[string](),
},
{
name: "Add then Remove pod with PVC",
@@ -1603,7 +1603,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addPodWithPVC(0), updateSnapshot(), removePodWithPVC(0), addPodWithPVC(2), updateSnapshot(),
},
expected: []*v1.Node{nodes[0]},
expectedUsedPVCSet: sets.NewString("test-ns/test-pvc2"),
expectedUsedPVCSet: sets.New("test-ns/test-pvc2"),
},
{
name: "Add then Remove pod with PVC and add same pod again",
@@ -1611,7 +1611,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
addNode(0), addPodWithPVC(0), updateSnapshot(), removePodWithPVC(0), addPodWithPVC(0), updateSnapshot(),
},
expected: []*v1.Node{nodes[0]},
expectedUsedPVCSet: sets.NewString("test-ns/test-pvc0"),
expectedUsedPVCSet: sets.New("test-ns/test-pvc0"),
},
{
name: "Add and Remove multiple pods with PVC with same ref count length different content",
@@ -1620,7 +1620,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
removePodWithPVC(0), removePodWithPVC(1), addPodWithPVC(2), addPodWithPVC(3), updateSnapshot(),
},
expected: []*v1.Node{nodes[1], nodes[0]},
expectedUsedPVCSet: sets.NewString("test-ns/test-pvc2", "test-ns/test-pvc3"),
expectedUsedPVCSet: sets.New("test-ns/test-pvc2", "test-ns/test-pvc3"),
},
{
name: "Add and Remove multiple pods with PVC",
@@ -1631,7 +1631,7 @@ func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
removePodWithPVC(0), removePodWithPVC(3), removePodWithPVC(4), updateSnapshot(),
},
expected: []*v1.Node{nodes[0], nodes[1]},
expectedUsedPVCSet: sets.NewString("test-ns/test-pvc1", "test-ns/test-pvc2"),
expectedUsedPVCSet: sets.New("test-ns/test-pvc1", "test-ns/test-pvc2"),
},
}
@@ -1703,7 +1703,7 @@ func compareCacheWithNodeInfoSnapshot(t *testing.T, cache *cacheImpl, snapshot *
expectedNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
expectedUsedPVCSet := sets.NewString()
expectedUsedPVCSet := sets.New[string]()
nodesList, err := cache.nodeTree.list()
if err != nil {
t.Fatal(err)

View File

@@ -117,6 +117,6 @@ type Cache interface {
// Dump is a dump of the cache state.
type Dump struct {
AssumedPods sets.String
AssumedPods sets.Set[string]
Nodes map[string]*framework.NodeInfo
}

View File

@@ -38,7 +38,7 @@ type Snapshot struct {
havePodsWithRequiredAntiAffinityNodeInfoList []*framework.NodeInfo
// usedPVCSet contains a set of PVC names that have one or more scheduled pods using them,
// keyed in the format "namespace/name".
usedPVCSet sets.String
usedPVCSet sets.Set[string]
generation int64
}
@@ -48,7 +48,7 @@ var _ framework.SharedLister = &Snapshot{}
func NewEmptySnapshot() *Snapshot {
return &Snapshot{
nodeInfoMap: make(map[string]*framework.NodeInfo),
usedPVCSet: sets.NewString(),
usedPVCSet: sets.New[string](),
}
}
@@ -103,8 +103,8 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*framework.N
return nodeNameToInfo
}
func createUsedPVCSet(pods []*v1.Pod) sets.String {
usedPVCSet := sets.NewString()
func createUsedPVCSet(pods []*v1.Pod) sets.Set[string] {
usedPVCSet := sets.New[string]()
for _, pod := range pods {
if pod.Spec.NodeName == "" {
continue
@@ -123,7 +123,7 @@ func createUsedPVCSet(pods []*v1.Pod) sets.String {
}
// getNodeImageStates returns the given node's image states based on the given imageExistence map.
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*framework.ImageStateSummary {
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.Set[string]) map[string]*framework.ImageStateSummary {
imageStates := make(map[string]*framework.ImageStateSummary)
for _, image := range node.Status.Images {
@@ -138,13 +138,13 @@ func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String)
}
// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names.
func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
imageExistenceMap := make(map[string]sets.String)
func createImageExistenceMap(nodes []*v1.Node) map[string]sets.Set[string] {
imageExistenceMap := make(map[string]sets.Set[string])
for _, node := range nodes {
for _, image := range node.Status.Images {
for _, name := range image.Names {
if _, ok := imageExistenceMap[name]; !ok {
imageExistenceMap[name] = sets.NewString(node.Name)
imageExistenceMap[name] = sets.New(node.Name)
} else {
imageExistenceMap[name].Insert(node.Name)
}

View File

@@ -35,7 +35,7 @@ const mb int64 = 1024 * 1024
func TestGetNodeImageStates(t *testing.T) {
tests := []struct {
node *v1.Node
imageExistenceMap map[string]sets.String
imageExistenceMap map[string]sets.Set[string]
expected map[string]*framework.ImageStateSummary
}{
{
@@ -58,9 +58,9 @@ func TestGetNodeImageStates(t *testing.T) {
},
},
},
imageExistenceMap: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-0"),
imageExistenceMap: map[string]sets.Set[string]{
"gcr.io/10:v1": sets.New("node-0", "node-1"),
"gcr.io/200:v1": sets.New("node-0"),
},
expected: map[string]*framework.ImageStateSummary{
"gcr.io/10:v1": {
@@ -78,9 +78,9 @@ func TestGetNodeImageStates(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "node-0"},
Status: v1.NodeStatus{},
},
imageExistenceMap: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-1"),
"gcr.io/200:v1": sets.NewString(),
imageExistenceMap: map[string]sets.Set[string]{
"gcr.io/10:v1": sets.New("node-1"),
"gcr.io/200:v1": sets.New[string](),
},
expected: map[string]*framework.ImageStateSummary{},
},
@@ -99,7 +99,7 @@ func TestGetNodeImageStates(t *testing.T) {
func TestCreateImageExistenceMap(t *testing.T) {
tests := []struct {
nodes []*v1.Node
expected map[string]sets.String
expected map[string]sets.Set[string]
}{
{
nodes: []*v1.Node{
@@ -136,9 +136,9 @@ func TestCreateImageExistenceMap(t *testing.T) {
},
},
},
expected: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-1"),
expected: map[string]sets.Set[string]{
"gcr.io/10:v1": sets.New("node-0", "node-1"),
"gcr.io/200:v1": sets.New("node-1"),
},
},
{
@@ -167,9 +167,9 @@ func TestCreateImageExistenceMap(t *testing.T) {
},
},
},
expected: map[string]sets.String{
"gcr.io/10:v1": sets.NewString("node-1"),
"gcr.io/200:v1": sets.NewString("node-1"),
expected: map[string]sets.Set[string]{
"gcr.io/10:v1": sets.New("node-1"),
"gcr.io/200:v1": sets.New("node-1"),
},
},
}
@@ -188,12 +188,12 @@ func TestCreateUsedPVCSet(t *testing.T) {
tests := []struct {
name string
pods []*v1.Pod
expected sets.String
expected sets.Set[string]
}{
{
name: "empty pods list",
pods: []*v1.Pod{},
expected: sets.NewString(),
expected: sets.New[string](),
},
{
name: "pods not scheduled",
@@ -201,7 +201,7 @@ func TestCreateUsedPVCSet(t *testing.T) {
st.MakePod().Name("foo").Namespace("foo").Obj(),
st.MakePod().Name("bar").Namespace("bar").Obj(),
},
expected: sets.NewString(),
expected: sets.New[string](),
},
{
name: "scheduled pods that do not use any PVC",
@@ -209,7 +209,7 @@ func TestCreateUsedPVCSet(t *testing.T) {
st.MakePod().Name("foo").Namespace("foo").Node("node-1").Obj(),
st.MakePod().Name("bar").Namespace("bar").Node("node-2").Obj(),
},
expected: sets.NewString(),
expected: sets.New[string](),
},
{
name: "scheduled pods that use PVC",
@@ -217,7 +217,7 @@ func TestCreateUsedPVCSet(t *testing.T) {
st.MakePod().Name("foo").Namespace("foo").Node("node-1").PVC("pvc1").Obj(),
st.MakePod().Name("bar").Namespace("bar").Node("node-2").PVC("pvc2").Obj(),
},
expected: sets.NewString("foo/pvc1", "bar/pvc2"),
expected: sets.New("foo/pvc1", "bar/pvc2"),
},
}
@@ -252,7 +252,7 @@ func TestNewSnapshot(t *testing.T) {
expectedNumNodes int
expectedPodsWithAffinity int
expectedPodsWithAntiAffinity int
expectedUsedPVCSet sets.String
expectedUsedPVCSet sets.Set[string]
}{
{
name: "no pods no nodes",
@@ -302,7 +302,7 @@ func TestNewSnapshot(t *testing.T) {
},
},
expectedNumNodes: 3,
expectedUsedPVCSet: sets.NewString("foo/pvc0", "bar/pvc1", "baz/pvc2"),
expectedUsedPVCSet: sets.New("foo/pvc0", "bar/pvc1", "baz/pvc2"),
},
{
name: "multiple nodes, pod with affinity",
@@ -330,7 +330,7 @@ func TestNewSnapshot(t *testing.T) {
Pod: podsWithAffitiny[0],
RequiredAffinityTerms: []framework.AffinityTerm{
{
Namespaces: sets.NewString("ns"),
Namespaces: sets.New("ns"),
Selector: labels.SelectorFromSet(map[string]string{"baz": "qux"}),
TopologyKey: "baz",
NamespaceSelector: labels.Nothing(),
@@ -360,7 +360,7 @@ func TestNewSnapshot(t *testing.T) {
Pod: podsWithAffitiny[1],
RequiredAffinityTerms: []framework.AffinityTerm{
{
Namespaces: sets.NewString("ns"),
Namespaces: sets.New("ns"),
Selector: labels.SelectorFromSet(map[string]string{"key": "value"}),
TopologyKey: "key",
NamespaceSelector: labels.Nothing(),
@@ -371,7 +371,7 @@ func TestNewSnapshot(t *testing.T) {
Pod: podWithAntiAffitiny,
RequiredAntiAffinityTerms: []framework.AffinityTerm{
{
Namespaces: sets.NewString("ns"),
Namespaces: sets.New("ns"),
Selector: labels.SelectorFromSet(map[string]string{"another": "label"}),
TopologyKey: "another",
NamespaceSelector: labels.Nothing(),

View File

@@ -174,7 +174,7 @@ type PriorityQueue struct {
// when we received move request.
moveRequestCycle int64
clusterEventMap map[framework.ClusterEvent]sets.String
clusterEventMap map[framework.ClusterEvent]sets.Set[string]
// preEnqueuePluginMap is keyed with profile name, valued with registered preEnqueue plugins.
preEnqueuePluginMap map[string][]framework.PreEnqueuePlugin
@@ -197,7 +197,7 @@ type priorityQueueOptions struct {
podLister listersv1.PodLister
metricsRecorder metrics.MetricAsyncRecorder
pluginMetricsSamplePercent int
clusterEventMap map[framework.ClusterEvent]sets.String
clusterEventMap map[framework.ClusterEvent]sets.Set[string]
preEnqueuePluginMap map[string][]framework.PreEnqueuePlugin
}
@@ -233,7 +233,7 @@ func WithPodLister(pl listersv1.PodLister) Option {
}
// WithClusterEventMap sets clusterEventMap for PriorityQueue.
func WithClusterEventMap(m map[framework.ClusterEvent]sets.String) Option {
func WithClusterEventMap(m map[framework.ClusterEvent]sets.Set[string]) Option {
return func(o *priorityQueueOptions) {
o.clusterEventMap = m
}
@@ -283,7 +283,7 @@ func newQueuedPodInfoForLookup(pod *v1.Pod, plugins ...string) *framework.Queued
// and so we avoid creating a full PodInfo, which is expensive to instantiate frequently.
return &framework.QueuedPodInfo{
PodInfo: &framework.PodInfo{Pod: pod},
UnschedulablePlugins: sets.NewString(plugins...),
UnschedulablePlugins: sets.New(plugins...),
}
}
@@ -903,7 +903,7 @@ func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod, plugins ...string) *framew
PodInfo: podInfo,
Timestamp: now,
InitialAttemptTimestamp: now,
UnschedulablePlugins: sets.NewString(plugins...),
UnschedulablePlugins: sets.New(plugins...),
}
}
@@ -1170,7 +1170,7 @@ func (p *PriorityQueue) podMatchesEvent(podInfo *framework.QueuedPodInfo, cluste
return false
}
func intersect(x, y sets.String) bool {
func intersect(x, y sets.Set[string]) bool {
if len(x) > len(y) {
x, y = y, x
}

View File

@@ -643,13 +643,13 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
b.StopTimer()
c := testingclock.NewFakeClock(time.Now())
m := make(map[framework.ClusterEvent]sets.String)
m := make(map[framework.ClusterEvent]sets.Set[string])
// - All plugins registered for events[0], which is NodeAdd.
// - 1/2 of plugins registered for events[1]
// - 1/3 of plugins registered for events[2]
// - ...
for j := 0; j < len(events); j++ {
m[events[j]] = sets.NewString()
m[events[j]] = sets.New[string]()
for k := 0; k < len(plugins); k++ {
if (k+1)%(j+1) == 0 {
m[events[j]].Insert(plugins[k])
@@ -702,8 +702,8 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
c := testingclock.NewFakeClock(time.Now())
m := map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fooPlugin"),
m := map[framework.ClusterEvent]sets.Set[string]{
{Resource: framework.Node, ActionType: framework.Add}: sets.New("fooPlugin"),
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -764,7 +764,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
labelPod := st.MakePod().Name("lbp").Namespace(affinityPod.Namespace).Label("service", "securityscan").Node("node1").Obj()
c := testingclock.NewFakeClock(time.Now())
m := map[framework.ClusterEvent]sets.String{AssignedPodAdd: sets.NewString("fakePlugin")}
m := map[framework.ClusterEvent]sets.Set[string]{AssignedPodAdd: sets.New("fakePlugin")}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
q := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
@@ -1034,35 +1034,35 @@ func TestUnschedulablePodsMap(t *testing.T) {
name: "create, update, delete subset of pods",
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.New[string]()},
},
podsToUpdate: []*v1.Pod{updatedPods[0]},
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, updatedPods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, updatedPods[0]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.New[string]()},
},
podsToDelete: []*v1.Pod{pods[0], pods[1]},
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.New[string]()},
},
},
{
name: "create, update, delete all",
podsToAdd: []*v1.Pod{pods[0], pods[3]},
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, pods[3]), UnschedulablePlugins: sets.New[string]()},
},
podsToUpdate: []*v1.Pod{updatedPods[3]},
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, updatedPods[3]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[0]): {PodInfo: mustNewTestPodInfo(t, pods[0]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[3]): {PodInfo: mustNewTestPodInfo(t, updatedPods[3]), UnschedulablePlugins: sets.New[string]()},
},
podsToDelete: []*v1.Pod{pods[0], pods[3]},
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{},
@@ -1071,17 +1071,17 @@ func TestUnschedulablePodsMap(t *testing.T) {
name: "delete non-existing and existing pods",
podsToAdd: []*v1.Pod{pods[1], pods[2]},
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, pods[1]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.New[string]()},
},
podsToUpdate: []*v1.Pod{updatedPods[1]},
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, updatedPods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, updatedPods[1]), UnschedulablePlugins: sets.New[string]()},
util.GetPodFullName(pods[2]): {PodInfo: mustNewTestPodInfo(t, pods[2]), UnschedulablePlugins: sets.New[string]()},
},
podsToDelete: []*v1.Pod{pods[2], pods[3]},
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, updatedPods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: mustNewTestPodInfo(t, updatedPods[1]), UnschedulablePlugins: sets.New[string]()},
},
},
}
@@ -1305,8 +1305,8 @@ func TestHighPriorityBackoff(t *testing.T) {
// activeQ after one minutes if it is in unschedulablePods.
func TestHighPriorityFlushUnschedulablePodsLeftover(t *testing.T) {
c := testingclock.NewFakeClock(time.Now())
m := map[framework.ClusterEvent]sets.String{
NodeAdd: sets.NewString("fakePlugin"),
m := map[framework.ClusterEvent]sets.Set[string]{
NodeAdd: sets.New("fakePlugin"),
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -2091,15 +2091,15 @@ func TestPodMatchesEvent(t *testing.T) {
name string
podInfo *framework.QueuedPodInfo
event framework.ClusterEvent
clusterEventMap map[framework.ClusterEvent]sets.String
clusterEventMap map[framework.ClusterEvent]sets.Set[string]
want bool
}{
{
name: "event not registered",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj()),
event: EmptyEvent,
clusterEventMap: map[framework.ClusterEvent]sets.String{
NodeAllEvent: sets.NewString("foo"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
NodeAllEvent: sets.New("foo"),
},
want: false,
},
@@ -2107,8 +2107,8 @@ func TestPodMatchesEvent(t *testing.T) {
name: "pod's failed plugin matches but event does not match",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "bar"),
event: AssignedPodAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
NodeAllEvent: sets.NewString("foo", "bar"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
NodeAllEvent: sets.New("foo", "bar"),
},
want: false,
},
@@ -2116,8 +2116,8 @@ func TestPodMatchesEvent(t *testing.T) {
name: "wildcard event wins regardless of event matching",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "bar"),
event: WildCardEvent,
clusterEventMap: map[framework.ClusterEvent]sets.String{
NodeAllEvent: sets.NewString("foo"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
NodeAllEvent: sets.New("foo"),
},
want: true,
},
@@ -2125,8 +2125,8 @@ func TestPodMatchesEvent(t *testing.T) {
name: "pod's failed plugin and event both match",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "bar"),
event: NodeTaintChange,
clusterEventMap: map[framework.ClusterEvent]sets.String{
NodeAllEvent: sets.NewString("foo", "bar"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
NodeAllEvent: sets.New("foo", "bar"),
},
want: true,
},
@@ -2134,9 +2134,9 @@ func TestPodMatchesEvent(t *testing.T) {
name: "pod's failed plugin registers fine-grained event",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "bar"),
event: NodeTaintChange,
clusterEventMap: map[framework.ClusterEvent]sets.String{
NodeAllEvent: sets.NewString("foo"),
NodeTaintChange: sets.NewString("bar"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
NodeAllEvent: sets.New("foo"),
NodeTaintChange: sets.New("bar"),
},
want: true,
},
@@ -2144,8 +2144,8 @@ func TestPodMatchesEvent(t *testing.T) {
name: "if pod failed by multiple plugins, a single match gets a final match",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "foo", "bar"),
event: NodeAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
NodeAllEvent: sets.NewString("bar"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
NodeAllEvent: sets.New("bar"),
},
want: true,
},
@@ -2153,8 +2153,8 @@ func TestPodMatchesEvent(t *testing.T) {
name: "plugin returns WildCardEvent and plugin name matches",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "foo"),
event: PvAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
WildCardEvent: sets.NewString("foo"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
WildCardEvent: sets.New("foo"),
},
want: true,
},
@@ -2162,8 +2162,8 @@ func TestPodMatchesEvent(t *testing.T) {
name: "plugin returns WildCardEvent but plugin name not match",
podInfo: newQueuedPodInfoForLookup(st.MakePod().Name("p").Obj(), "foo"),
event: PvAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
WildCardEvent: sets.NewString("bar"),
clusterEventMap: map[framework.ClusterEvent]sets.Set[string]{
WildCardEvent: sets.New("bar"),
},
want: false,
},
@@ -2266,7 +2266,7 @@ func makeQueuedPodInfos(num int, namePrefix, label string, timestamp time.Time)
PodInfo: mustNewPodInfo(
st.MakePod().Name(fmt.Sprintf("%v-%d", namePrefix, i)).Namespace(fmt.Sprintf("ns%d", i)).Label(label, "").UID(fmt.Sprintf("tp-%d", i)).Obj()),
Timestamp: timestamp,
UnschedulablePlugins: sets.NewString(),
UnschedulablePlugins: sets.New[string](),
}
pInfos = append(pInfos, p)
}

View File

@@ -387,7 +387,7 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.Diagnosis, error) {
diagnosis := framework.Diagnosis{
NodeToStatusMap: make(framework.NodeToStatusMap),
UnschedulablePlugins: sets.NewString(),
UnschedulablePlugins: sets.New[string](),
}
allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List()

View File

@@ -737,7 +737,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
NodeToStatusMap: framework.NodeToStatusMap{
node.Name: framework.NewStatus(framework.Unschedulable, nodeports.ErrReason).WithFailedPlugin(nodeports.Name),
},
UnschedulablePlugins: sets.NewString(nodeports.Name),
UnschedulablePlugins: sets.New(nodeports.Name),
},
}
if !reflect.DeepEqual(expectErr, err) {
@@ -843,7 +843,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
NumAllNodes: len(nodes),
Diagnosis: framework.Diagnosis{
NodeToStatusMap: failedNodeStatues,
UnschedulablePlugins: sets.NewString(noderesources.Name),
UnschedulablePlugins: sets.New(noderesources.Name),
},
}
if len(fmt.Sprint(expectErr)) > 150 {
@@ -1272,7 +1272,7 @@ func TestSelectHost(t *testing.T) {
tests := []struct {
name string
list []framework.NodePluginScores
possibleHosts sets.String
possibleHosts sets.Set[string]
expectsErr bool
}{
{
@@ -1281,7 +1281,7 @@ func TestSelectHost(t *testing.T) {
{Name: "node1.1", TotalScore: 1},
{Name: "node2.1", TotalScore: 2},
},
possibleHosts: sets.NewString("node2.1"),
possibleHosts: sets.New("node2.1"),
expectsErr: false,
},
{
@@ -1292,7 +1292,7 @@ func TestSelectHost(t *testing.T) {
{Name: "node1.3", TotalScore: 2},
{Name: "node2.1", TotalScore: 2},
},
possibleHosts: sets.NewString("node1.2", "node1.3", "node2.1"),
possibleHosts: sets.New("node1.2", "node1.3", "node2.1"),
expectsErr: false,
},
{
@@ -1304,13 +1304,13 @@ func TestSelectHost(t *testing.T) {
{Name: "node3.1", TotalScore: 1},
{Name: "node1.3", TotalScore: 3},
},
possibleHosts: sets.NewString("node1.1", "node1.2", "node1.3"),
possibleHosts: sets.New("node1.1", "node1.2", "node1.3"),
expectsErr: false,
},
{
name: "empty priority list",
list: []framework.NodePluginScores{},
possibleHosts: sets.NewString(),
possibleHosts: sets.New[string](),
expectsErr: true,
},
}
@@ -1525,7 +1525,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
pvcs []v1.PersistentVolumeClaim
pod *v1.Pod
pods []*v1.Pod
wantNodes sets.String
wantNodes sets.Set[string]
wantEvaluatedNodes *int32
wErr error
}{
@@ -1546,7 +1546,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
"node1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
"node2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
},
UnschedulablePlugins: sets.NewString("FalseFilter"),
UnschedulablePlugins: sets.New("FalseFilter"),
},
},
},
@@ -1558,7 +1558,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
wantNodes: sets.NewString("node1", "node2"),
wantNodes: sets.New("node1", "node2"),
name: "test 2",
wErr: nil,
},
@@ -1571,7 +1571,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("node2").UID("node2").Obj(),
wantNodes: sets.NewString("node2"),
wantNodes: sets.New("node2"),
name: "test 3",
wErr: nil,
},
@@ -1584,7 +1584,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"3", "2", "1"},
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
wantNodes: sets.NewString("3"),
wantNodes: sets.New("3"),
name: "test 4",
wErr: nil,
},
@@ -1597,7 +1597,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"3", "2", "1"},
pod: st.MakePod().Name("2").UID("2").Obj(),
wantNodes: sets.NewString("2"),
wantNodes: sets.New("2"),
name: "test 5",
wErr: nil,
},
@@ -1611,7 +1611,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"3", "2", "1"},
pod: st.MakePod().Name("2").UID("2").Obj(),
wantNodes: sets.NewString("1"),
wantNodes: sets.New("1"),
name: "test 6",
wErr: nil,
},
@@ -1635,7 +1635,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
"2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
"1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("FalseFilter"),
},
UnschedulablePlugins: sets.NewString("FalseFilter"),
UnschedulablePlugins: sets.New("FalseFilter"),
},
},
},
@@ -1661,7 +1661,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
"1": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"),
"2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("NoPodsFilter"),
},
UnschedulablePlugins: sets.NewString("MatchFilter", "NoPodsFilter"),
UnschedulablePlugins: sets.New("MatchFilter", "NoPodsFilter"),
},
},
},
@@ -1681,7 +1681,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
},
pod: st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
wantNodes: sets.NewString("node1", "node2"),
wantNodes: sets.New("node1", "node2"),
name: "existing PVC",
wErr: nil,
},
@@ -1702,7 +1702,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
Diagnosis: framework.Diagnosis{
NodeToStatusMap: framework.NodeToStatusMap{},
PreFilterMsg: `persistentvolumeclaim "unknownPVC" not found`,
UnschedulablePlugins: sets.NewString(volumebinding.Name),
UnschedulablePlugins: sets.New(volumebinding.Name),
},
},
},
@@ -1724,7 +1724,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
Diagnosis: framework.Diagnosis{
NodeToStatusMap: framework.NodeToStatusMap{},
PreFilterMsg: `persistentvolumeclaim "existingPVC" is being deleted`,
UnschedulablePlugins: sets.NewString(volumebinding.Name),
UnschedulablePlugins: sets.New(volumebinding.Name),
},
},
},
@@ -1765,7 +1765,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
pods: []*v1.Pod{
st.MakePod().Name("pod1").UID("pod1").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
},
wantNodes: sets.NewString("node2"),
wantNodes: sets.New("node2"),
wErr: nil,
},
{
@@ -1794,7 +1794,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
st.MakePod().Name("pod1b").UID("pod1b").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
st.MakePod().Name("pod2").UID("pod2").Label("foo", "").Node("node2").Phase(v1.PodRunning).Obj(),
},
wantNodes: sets.NewString("node2", "node3"),
wantNodes: sets.New("node2", "node3"),
wErr: nil,
},
{
@@ -1818,7 +1818,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
NodeToStatusMap: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter").WithFailedPlugin("FakeFilter"),
},
UnschedulablePlugins: sets.NewString("FakeFilter"),
UnschedulablePlugins: sets.New("FakeFilter"),
},
},
},
@@ -1843,7 +1843,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
NodeToStatusMap: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter").WithFailedPlugin("FakeFilter"),
},
UnschedulablePlugins: sets.NewString("FakeFilter"),
UnschedulablePlugins: sets.New("FakeFilter"),
},
},
},
@@ -1882,7 +1882,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
Diagnosis: framework.Diagnosis{
NodeToStatusMap: framework.NodeToStatusMap{},
PreFilterMsg: "injected unschedulable status",
UnschedulablePlugins: sets.NewString("FakePreFilter"),
UnschedulablePlugins: sets.New("FakePreFilter"),
},
},
},
@@ -1911,17 +1911,17 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
st.RegisterPreFilterPlugin(
"FakePreFilter2",
st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.NewString("node2")}, nil),
st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
),
st.RegisterPreFilterPlugin(
"FakePreFilter3",
st.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.NewString("node1", "node2")}, nil),
st.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
nodes: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: sets.NewString("node2"),
wantNodes: sets.New("node2"),
wantEvaluatedNodes: pointer.Int32(1),
},
{
@@ -1934,11 +1934,11 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
st.RegisterPreFilterPlugin(
"FakePreFilter2",
st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.NewString("node2")}, nil),
st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
),
st.RegisterPreFilterPlugin(
"FakePreFilter3",
st.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.NewString("node1")}, nil),
st.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1")}, nil),
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
@@ -1949,7 +1949,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
NumAllNodes: 3,
Diagnosis: framework.Diagnosis{
NodeToStatusMap: framework.NodeToStatusMap{},
UnschedulablePlugins: sets.String{},
UnschedulablePlugins: sets.Set[string]{},
PreFilterMsg: "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously",
},
},
@@ -1964,7 +1964,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
),
st.RegisterPreFilterPlugin(
"FakePreFilter2",
st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.NewString()}, nil),
st.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New[string]()}, nil),
),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
},
@@ -1975,7 +1975,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
NumAllNodes: 1,
Diagnosis: framework.Diagnosis{
NodeToStatusMap: framework.NodeToStatusMap{},
UnschedulablePlugins: sets.String{},
UnschedulablePlugins: sets.Set[string]{},
PreFilterMsg: "node(s) didn't satisfy plugin FakePreFilter2",
},
},
@@ -2013,7 +2013,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: sets.NewString("node2", "node3"),
wantNodes: sets.New("node2", "node3"),
wantEvaluatedNodes: pointer.Int32(3),
},
{
@@ -2029,7 +2029,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
},
nodes: []string{"node1", "node2"},
pod: st.MakePod().Name("ignore").UID("ignore").Obj(),
wantNodes: sets.NewString("node1", "node2"),
wantNodes: sets.New("node1", "node2"),
},
}
for _, test := range tests {
@@ -2138,7 +2138,7 @@ func TestFindFitAllError(t *testing.T) {
"2": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"),
"3": framework.NewStatus(framework.Unschedulable, st.ErrReasonFake).WithFailedPlugin("MatchFilter"),
},
UnschedulablePlugins: sets.NewString("MatchFilter"),
UnschedulablePlugins: sets.New("MatchFilter"),
}
if diff := cmp.Diff(diagnosis, expected); diff != "" {
t.Errorf("Unexpected diagnosis: (-want, +got): %s", diff)
@@ -2177,7 +2177,7 @@ func TestFindFitSomeError(t *testing.T) {
t.Errorf("unexpected failed status map: %v", diagnosis.NodeToStatusMap)
}
if diff := cmp.Diff(sets.NewString("MatchFilter"), diagnosis.UnschedulablePlugins); diff != "" {
if diff := cmp.Diff(sets.New("MatchFilter"), diagnosis.UnschedulablePlugins); diff != "" {
t.Errorf("Unexpected unschedulablePlugins: (-want, +got): %s", diagnosis.UnschedulablePlugins)
}

View File

@@ -282,7 +282,7 @@ func New(client clientset.Interface,
nodeLister := informerFactory.Core().V1().Nodes().Lister()
snapshot := internalcache.NewEmptySnapshot()
clusterEventMap := make(map[framework.ClusterEvent]sets.String)
clusterEventMap := make(map[framework.ClusterEvent]sets.Set[string])
metricsRecorder := metrics.NewMetricsAsyncRecorder(1000, time.Second, stopCh)
profiles, err := profile.NewMap(options.profiles, registry, recorderFactory, stopCh,
@@ -435,7 +435,7 @@ func buildExtenders(extenders []schedulerapi.Extender, profiles []schedulerapi.K
type FailureHandlerFn func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time)
func unionedGVKs(m map[framework.ClusterEvent]sets.String) map[framework.GVK]framework.ActionType {
func unionedGVKs(m map[framework.ClusterEvent]sets.Set[string]) map[framework.GVK]framework.ActionType {
gvkMap := make(map[framework.GVK]framework.ActionType)
for evt := range m {
if _, ok := gvkMap[evt.Resource]; ok {

View File

@@ -319,20 +319,20 @@ func TestFailureHandler_NodeNotFound(t *testing.T) {
nodes []v1.Node
nodeNameToDelete string
injectErr error
expectNodeNames sets.String
expectNodeNames sets.Set[string]
}{
{
name: "node is deleted during a scheduling cycle",
nodes: []v1.Node{*nodeFoo, *nodeBar},
nodeNameToDelete: "foo",
injectErr: apierrors.NewNotFound(v1.Resource("node"), nodeFoo.Name),
expectNodeNames: sets.NewString("bar"),
expectNodeNames: sets.New("bar"),
},
{
name: "node is not deleted but NodeNotFound is received incorrectly",
nodes: []v1.Node{*nodeFoo, *nodeBar},
injectErr: apierrors.NewNotFound(v1.Resource("node"), nodeFoo.Name),
expectNodeNames: sets.NewString("foo", "bar"),
expectNodeNames: sets.New("foo", "bar"),
},
}
@@ -368,7 +368,7 @@ func TestFailureHandler_NodeNotFound(t *testing.T) {
s.FailureHandler(ctx, fwk, testPodInfo, framework.NewStatus(framework.Unschedulable).WithError(tt.injectErr), nil, time.Now())
gotNodes := schedulerCache.Dump().Nodes
gotNodeNames := sets.NewString()
gotNodeNames := sets.New[string]()
for _, nodeInfo := range gotNodes {
gotNodeNames.Insert(nodeInfo.Node().Name)
}

View File

@@ -536,7 +536,7 @@ func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, e
}
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.Set[string], error) {
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
@@ -544,7 +544,7 @@ func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (set
}
// collect values of zone label from all nodes
zones := sets.NewString()
zones := sets.New[string]()
for _, node := range nodes.Items {
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
zones.Insert(zone)

View File

@@ -41,7 +41,7 @@ func SIGDescribe(text string, body func()) bool {
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, workerNodes sets.String) int {
func WaitForStableCluster(c clientset.Interface, workerNodes sets.Set[string]) int {
startTime := time.Now()
// Wait for all pods to be scheduled.
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, workerNodes)
@@ -61,7 +61,7 @@ func WaitForStableCluster(c clientset.Interface, workerNodes sets.String) int {
}
// getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in all namespaces, with succeeded and failed pods filtered out.
func getScheduledAndUnscheduledPods(c clientset.Interface, workerNodes sets.String) (scheduledPods, notScheduledPods []v1.Pod) {
func getScheduledAndUnscheduledPods(c clientset.Interface, workerNodes sets.Set[string]) (scheduledPods, notScheduledPods []v1.Pod) {
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", metav1.NamespaceAll))

View File

@@ -58,7 +58,7 @@ const (
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
// variable populated in BeforeEach, never modified afterwards
var workerNodes = sets.String{}
var workerNodes = sets.Set[string]{}
type pausePodConfig struct {
Name string
@@ -1154,7 +1154,7 @@ func createHostPortPodOnNode(ctx context.Context, f *framework.Framework, podNam
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes.
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
func GetPodsScheduled(workerNodes sets.Set[string], pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if pod.Spec.NodeName != "" && workerNodes.Has(pod.Spec.NodeName) {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)

View File

@@ -514,7 +514,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
runPausePod(ctx, f, mediumPodCfg)
ginkgo.By("Verify there are 3 Pods left in this namespace")
wantPods := sets.NewString("high", "medium", "low")
wantPods := sets.New("high", "medium", "low")
// Wait until the number of pods stabilizes. Note that `medium` pod can get scheduled once the
// second low priority pod is marked as terminating.

View File

@@ -46,7 +46,7 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
var zoneCount int
var err error
var zoneNames sets.String
var zoneNames sets.Set[string]
ginkgo.BeforeEach(func(ctx context.Context) {
cs := f.ClientSet
@@ -79,7 +79,7 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
// SpreadServiceOrFail check that the pods comprising a service
// get spread evenly across available zones
func SpreadServiceOrFail(ctx context.Context, f *framework.Framework, replicaCount int, zoneNames sets.String, image string) {
func SpreadServiceOrFail(ctx context.Context, f *framework.Framework, replicaCount int, zoneNames sets.Set[string], image string) {
// First create the service
serviceName := "test-service"
serviceSpec := &v1.Service{
@@ -128,7 +128,7 @@ func SpreadServiceOrFail(ctx context.Context, f *framework.Framework, replicaCou
framework.ExpectNoError(err)
// Now make sure they're spread across zones
checkZoneSpreading(ctx, f.ClientSet, pods, zoneNames.List())
checkZoneSpreading(ctx, f.ClientSet, pods, sets.List(zoneNames))
}
// Find the name of the zone in which a Node is running
@@ -182,7 +182,7 @@ func checkZoneSpreading(ctx context.Context, c clientset.Interface, pods *v1.Pod
// SpreadRCOrFail Check that the pods comprising a replication
// controller get spread evenly across available zones
func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount int32, zoneNames sets.String, image string, args []string) {
func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount int32, zoneNames sets.Set[string], image string, args []string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, &v1.ReplicationController{
@@ -231,5 +231,5 @@ func SpreadRCOrFail(ctx context.Context, f *framework.Framework, replicaCount in
framework.ExpectNoError(err)
// Now make sure they're spread across zones
checkZoneSpreading(ctx, f.ClientSet, pods, zoneNames.List())
checkZoneSpreading(ctx, f.ClientSet, pods, sets.List(zoneNames))
}

View File

@@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@@ -82,7 +83,7 @@ func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCoun
zones, err := e2enode.GetSchedulableClusterZones(ctx, c)
framework.ExpectNoError(err)
zonelist := zones.List()
zonelist := sets.List(zones)
ginkgo.By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
for i := range configs {

View File

@@ -126,7 +126,7 @@ func getScheduledPods(podInformer coreinformers.PodInformer, namespaces ...strin
return nil, err
}
s := sets.NewString(namespaces...)
s := sets.New(namespaces...)
scheduled := make([]*v1.Pod, 0, len(pods))
for i := range pods {
pod := pods[i]