Merge pull request #71872 from yuexiao-wang/scheduler-nodeinfo

[scheduler cleanup phase 2]: Rename `pkg/scheduler/cache` to `pkg/scheduler/nodeinfo`
This commit is contained in:
Kubernetes Prow Robot
2018-12-12 08:08:33 -08:00
committed by GitHub
98 changed files with 596 additions and 596 deletions

View File

@@ -71,7 +71,7 @@
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator", "k8s.io/kubernetes/pkg/registry/core/service/ipallocator",
"k8s.io/kubernetes/pkg/scheduler/algorithm", "k8s.io/kubernetes/pkg/scheduler/algorithm",
"k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/api",
"k8s.io/kubernetes/pkg/scheduler/cache", "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/internal/cache", "k8s.io/kubernetes/pkg/scheduler/internal/cache",
"k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/security/apparmor",

View File

@@ -278,7 +278,7 @@
"k8s.io/kubernetes/pkg/registry/core/secret", "k8s.io/kubernetes/pkg/registry/core/secret",
"k8s.io/kubernetes/pkg/scheduler/algorithm", "k8s.io/kubernetes/pkg/scheduler/algorithm",
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
"k8s.io/kubernetes/pkg/scheduler/cache", "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/securitycontext", "k8s.io/kubernetes/pkg/securitycontext",
"k8s.io/kubernetes/pkg/serviceaccount", "k8s.io/kubernetes/pkg/serviceaccount",
"k8s.io/kubernetes/pkg/util/goroutinemap", "k8s.io/kubernetes/pkg/util/goroutinemap",

View File

@@ -22,7 +22,7 @@ go_library(
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/labels:go_default_library", "//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library", "//pkg/util/metrics:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",

View File

@@ -56,7 +56,7 @@ import (
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
) )
@@ -1287,13 +1287,13 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return dsc.updateDaemonSetStatus(ds, hash, true) return dsc.updateDaemonSetStatus(ds, hash, true)
} }
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) { func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulernodeinfo.NodeInfo, error) {
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name) objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
for _, obj := range objects { for _, obj := range objects {
@@ -1428,7 +1428,7 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
// - PodFitsHost: checks pod's NodeName against node // - PodFitsHost: checks pod's NodeName against node
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node // - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
// - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration // - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration
func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo) fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
if err != nil { if err != nil {
@@ -1458,7 +1458,7 @@ func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
// Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates // Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
// and PodToleratesNodeTaints predicate // and PodToleratesNodeTaints predicate
func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []algorithm.PredicateFailureReason
// If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match. // If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match.

View File

@@ -121,7 +121,7 @@
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util",
"k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/api",
"k8s.io/kubernetes/pkg/scheduler/cache", "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/internal/cache", "k8s.io/kubernetes/pkg/scheduler/internal/cache",
"k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/scheduler/volumebinder",

View File

@@ -208,7 +208,7 @@ go_test(
"//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library",
"//pkg/kubelet/volumemanager:go_default_library", "//pkg/kubelet/volumemanager:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/mount:go_default_library", "//pkg/util/mount:go_default_library",
"//pkg/util/taints:go_default_library", "//pkg/util/taints:go_default_library",
"//pkg/version:go_default_library", "//pkg/version:go_default_library",

View File

@@ -35,7 +35,7 @@ go_library(
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/util/pluginwatcher:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"fmt" "fmt"
"strconv" "strconv"
@@ -90,7 +90,7 @@ type ContainerManager interface {
// Otherwise, it updates allocatableResource in nodeInfo if necessary, // Otherwise, it updates allocatableResource in nodeInfo if necessary,
// to make sure it is at least equal to the pod's requested capacity for // to make sure it is at least equal to the pod's requested capacity for
// any registered device plugin resource // any registered device plugin resource
UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error
InternalContainerLifecycle() InternalContainerLifecycle InternalContainerLifecycle() InternalContainerLifecycle

View File

@@ -55,7 +55,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
utilfile "k8s.io/kubernetes/pkg/util/file" utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/oom"
@@ -628,7 +628,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return opts, nil return opts, nil
} }
func (cm *containerManagerImpl) UpdatePluginResources(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerImpl) UpdatePluginResources(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return cm.deviceManager.Allocate(node, attrs) return cm.deviceManager.Allocate(node, attrs)
} }

View File

@@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
type containerManagerStub struct{} type containerManagerStub struct{}
@@ -94,7 +94,7 @@ func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil return &kubecontainer.RunContainerOptions{}, nil
} }
func (cm *containerManagerStub) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerStub) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@@ -39,7 +39,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
) )
@@ -156,7 +156,7 @@ func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Containe
return &kubecontainer.RunContainerOptions{}, nil return &kubecontainer.RunContainerOptions{}, nil
} }
func (cm *containerManagerImpl) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { func (cm *containerManagerImpl) UpdatePluginResources(*schedulernodeinfo.NodeInfo, *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@@ -25,7 +25,7 @@ go_library(
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/util/pluginwatcher:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@@ -47,7 +47,7 @@ go_test(
"//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/checkpointmanager:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/util/pluginwatcher:go_default_library", "//pkg/kubelet/util/pluginwatcher:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -41,7 +41,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// ActivePodsFunc is a function that returns a list of pods to reconcile. // ActivePodsFunc is a function that returns a list of pods to reconcile.
@@ -313,7 +313,7 @@ func (m *ManagerImpl) isVersionCompatibleWithPlugin(versions []string) bool {
// Allocate is the call that you can use to allocate a set of devices // Allocate is the call that you can use to allocate a set of devices
// from the registered device plugins. // from the registered device plugins.
func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (m *ManagerImpl) Allocate(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
pod := attrs.Pod pod := attrs.Pod
devicesToReuse := make(map[string]sets.String) devicesToReuse := make(map[string]sets.String)
for _, container := range pod.Spec.InitContainers { for _, container := range pod.Spec.InitContainers {
@@ -769,8 +769,8 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
// and if necessary, updates allocatableResource in nodeInfo to at least equal to // and if necessary, updates allocatableResource in nodeInfo to at least equal to
// the allocated capacity. This allows pods that have already been scheduled on // the allocated capacity. This allows pods that have already been scheduled on
// the node to pass GeneralPredicates admission checking even upon device plugin failure. // the node to pass GeneralPredicates admission checking even upon device plugin failure.
func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulercache.NodeInfo) { func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulernodeinfo.NodeInfo) {
var newAllocatableResource *schedulercache.Resource var newAllocatableResource *schedulernodeinfo.Resource
allocatableResource := node.AllocatableResource() allocatableResource := node.AllocatableResource()
if allocatableResource.ScalarResources == nil { if allocatableResource.ScalarResources == nil {
allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) allocatableResource.ScalarResources = make(map[v1.ResourceName]int64)

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// ManagerStub provides a simple stub implementation for the Device Manager. // ManagerStub provides a simple stub implementation for the Device Manager.
@@ -44,7 +44,7 @@ func (h *ManagerStub) Stop() error {
} }
// Allocate simply returns nil. // Allocate simply returns nil.
func (h *ManagerStub) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { func (h *ManagerStub) Allocate(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return nil return nil
} }

View File

@@ -37,7 +37,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
const ( const (
@@ -635,13 +635,13 @@ func getTestManager(tmpDir string, activePods ActivePodsFunc, testRes []TestReso
return testManager, nil return testManager, nil
} }
func getTestNodeInfo(allocatable v1.ResourceList) *schedulercache.NodeInfo { func getTestNodeInfo(allocatable v1.ResourceList) *schedulernodeinfo.NodeInfo {
cachedNode := &v1.Node{ cachedNode := &v1.Node{
Status: v1.NodeStatus{ Status: v1.NodeStatus{
Allocatable: allocatable, Allocatable: allocatable,
}, },
} }
nodeInfo := &schedulercache.NodeInfo{} nodeInfo := &schedulernodeinfo.NodeInfo{}
nodeInfo.SetNode(cachedNode) nodeInfo.SetNode(cachedNode)
return nodeInfo return nodeInfo
} }
@@ -875,7 +875,7 @@ func TestSanitizeNodeAllocatable(t *testing.T) {
}, },
}, },
} }
nodeInfo := &schedulercache.NodeInfo{} nodeInfo := &schedulernodeinfo.NodeInfo{}
nodeInfo.SetNode(cachedNode) nodeInfo.SetNode(cachedNode)
testManager.sanitizeNodeAllocatable(nodeInfo) testManager.sanitizeNodeAllocatable(nodeInfo)

View File

@@ -25,7 +25,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" watcher "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// Manager manages all the Device Plugins running on a node. // Manager manages all the Device Plugins running on a node.
@@ -41,7 +41,7 @@ type Manager interface {
// variables, mount points and device files). The node object is provided // variables, mount points and device files). The node object is provided
// for the device manager to update the node capacity to reflect the // for the device manager to update the node capacity to reflect the
// currently available devices. // currently available devices.
Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error Allocate(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error
// Stop stops the manager. // Stop stops the manager.
Stop() error Stop() error

View File

@@ -66,7 +66,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/awsebs"
@@ -659,7 +659,7 @@ func TestHandlePluginResources(t *testing.T) {
} }
kl.nodeInfo = testNodeInfo{nodes: nodes} kl.nodeInfo = testNodeInfo{nodes: nodes}
updatePluginResourcesFunc := func(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { updatePluginResourcesFunc := func(node *schedulernodeinfo.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
// Maps from resourceName to the value we use to set node.allocatableResource[resourceName]. // Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
// A resource with invalid value (< 0) causes the function to return an error // A resource with invalid value (< 0) causes the function to return an error
// to emulate resource Allocation failure. // to emulate resource Allocation failure.

View File

@@ -23,7 +23,7 @@ go_library(
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/security/apparmor:go_default_library", "//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@@ -42,7 +42,7 @@ go_test(
deps = [ deps = [
"//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",

View File

@@ -26,12 +26,12 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
type getNodeAnyWayFuncType func() (*v1.Node, error) type getNodeAnyWayFuncType func() (*v1.Node, error)
type pluginResourceUpdateFuncType func(*schedulercache.NodeInfo, *PodAdmitAttributes) error type pluginResourceUpdateFuncType func(*schedulernodeinfo.NodeInfo, *PodAdmitAttributes) error
// AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod.
// This allows for the graceful handling of pod admission failure. // This allows for the graceful handling of pod admission failure.
@@ -67,7 +67,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
} }
admitPod := attrs.Pod admitPod := attrs.Pod
pods := attrs.OtherPods pods := attrs.OtherPods
nodeInfo := schedulercache.NewNodeInfo(pods...) nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
// ensure the node has enough plugin resources for that required in pods // ensure the node has enough plugin resources for that required in pods
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
@@ -155,7 +155,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
} }
} }
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) *v1.Pod { func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *v1.Pod {
podCopy := pod.DeepCopy() podCopy := pod.DeepCopy()
for i, c := range pod.Spec.Containers { for i, c := range pod.Spec.Containers {
// We only handle requests in Requests but not Limits because the // We only handle requests in Requests but not Limits because the

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
var ( var (
@@ -81,7 +81,7 @@ func TestRemoveMissingExtendedResources(t *testing.T) {
), ),
}, },
} { } {
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
pod := removeMissingExtendedResources(test.pod, nodeInfo) pod := removeMissingExtendedResources(test.pod, nodeInfo)
if !reflect.DeepEqual(pod, test.expectedPod) { if !reflect.DeepEqual(pod, test.expectedPod) {

View File

@@ -51,11 +51,11 @@ go_test(
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/core:go_default_library", "//pkg/scheduler/core:go_default_library",
"//pkg/scheduler/factory:go_default_library", "//pkg/scheduler/factory:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/cache/fake:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library", "//pkg/scheduler/volumebinder:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@@ -91,12 +91,12 @@ filegroup(
"//pkg/scheduler/algorithmprovider:all-srcs", "//pkg/scheduler/algorithmprovider:all-srcs",
"//pkg/scheduler/api:all-srcs", "//pkg/scheduler/api:all-srcs",
"//pkg/scheduler/apis/config:all-srcs", "//pkg/scheduler/apis/config:all-srcs",
"//pkg/scheduler/cache:all-srcs",
"//pkg/scheduler/core:all-srcs", "//pkg/scheduler/core:all-srcs",
"//pkg/scheduler/factory:all-srcs", "//pkg/scheduler/factory:all-srcs",
"//pkg/scheduler/internal/cache:all-srcs", "//pkg/scheduler/internal/cache:all-srcs",
"//pkg/scheduler/internal/queue:all-srcs", "//pkg/scheduler/internal/queue:all-srcs",
"//pkg/scheduler/metrics:all-srcs", "//pkg/scheduler/metrics:all-srcs",
"//pkg/scheduler/nodeinfo:all-srcs",
"//pkg/scheduler/plugins:all-srcs", "//pkg/scheduler/plugins:all-srcs",
"//pkg/scheduler/testing:all-srcs", "//pkg/scheduler/testing:all-srcs",
"//pkg/scheduler/util:all-srcs", "//pkg/scheduler/util:all-srcs",

View File

@@ -16,8 +16,8 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm", importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm",
deps = [ deps = [
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
@@ -30,7 +30,7 @@ go_test(
srcs = ["types_test.go"], srcs = ["types_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
], ],

View File

@@ -25,7 +25,7 @@ go_library(
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library", "//pkg/scheduler/volumebinder:go_default_library",
"//pkg/volume/util:go_default_library", "//pkg/volume/util:go_default_library",
@@ -61,7 +61,7 @@ go_test(
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/volume/util:go_default_library", "//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@@ -45,7 +45,7 @@ func NewCSIMaxVolumeLimitPredicate(
} }
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate( func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// if feature gate is disable we return // if feature gate is disable we return
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) { if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {

View File

@@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@@ -748,7 +748,7 @@ func TestVolumeCountConflicts(t *testing.T) {
for _, test := range tests { for _, test := range tests {
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols)) os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)) pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulercache.NewNodeInfo(test.existingPods...)) fits, reasons, err := pred(test.newPod, PredicateMetadata(test.newPod, nil), schedulernodeinfo.NewNodeInfo(test.existingPods...))
if err != nil { if err != nil {
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err) t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
} }
@@ -895,8 +895,8 @@ func TestMaxVolumeFuncM4(t *testing.T) {
} }
} }
func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulercache.NodeInfo { func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, filter string) *schedulernodeinfo.NodeInfo {
nodeInfo := schedulercache.NewNodeInfo(pods...) nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
node := &v1.Node{ node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
Status: v1.NodeStatus{ Status: v1.NodeStatus{

View File

@@ -30,7 +30,7 @@ import (
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@@ -68,7 +68,7 @@ type topologyPairsMaps struct {
type predicateMetadata struct { type predicateMetadata struct {
pod *v1.Pod pod *v1.Pod
podBestEffort bool podBestEffort bool
podRequest *schedulercache.Resource podRequest *schedulernodeinfo.Resource
podPorts []*v1.ContainerPort podPorts []*v1.ContainerPort
topologyPairsAntiAffinityPodsMap *topologyPairsMaps topologyPairsAntiAffinityPodsMap *topologyPairsMaps
@@ -126,7 +126,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Predic
} }
// GetMetadata returns the predicateMetadata used which will be used by various predicates. // GetMetadata returns the predicateMetadata used which will be used by various predicates.
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata { func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
// If we cannot compute metadata, just return nil // If we cannot compute metadata, just return nil
if pod == nil { if pod == nil {
return nil return nil
@@ -230,7 +230,7 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod) error {
// AddPod changes predicateMetadata assuming that `newPod` is added to the // AddPod changes predicateMetadata assuming that `newPod` is added to the
// system. // system.
func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error { func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error {
addedPodFullName := schedutil.GetPodFullName(addedPod) addedPodFullName := schedutil.GetPodFullName(addedPod)
if addedPodFullName == schedutil.GetPodFullName(meta.pod) { if addedPodFullName == schedutil.GetPodFullName(meta.pod) {
return fmt.Errorf("addedPod and meta.pod must not be the same") return fmt.Errorf("addedPod and meta.pod must not be the same")
@@ -359,7 +359,7 @@ func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTerm
// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: // getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node:
// (1) Whether it has PodAntiAffinity // (1) Whether it has PodAntiAffinity
// (2) Whether any AffinityTerm matches the incoming pod // (2) Whether any AffinityTerm matches the incoming pod
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) { func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (*topologyPairsMaps, error) {
allNodeNames := make([]string, 0, len(nodeInfoMap)) allNodeNames := make([]string, 0, len(nodeInfoMap))
for name := range nodeInfoMap { for name := range nodeInfoMap {
allNodeNames = append(allNodeNames, name) allNodeNames = append(allNodeNames, name)
@@ -407,7 +407,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s
// It returns a topologyPairsMaps that are checked later by the affinity // It returns a topologyPairsMaps that are checked later by the affinity
// predicate. With this topologyPairsMaps available, the affinity predicate does not // predicate. With this topologyPairsMaps available, the affinity predicate does not
// need to check all the pods in the cluster. // need to check all the pods in the cluster.
func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) {
affinity := pod.Spec.Affinity affinity := pod.Spec.Affinity
if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) {
return newTopologyPairsMaps(), newTopologyPairsMaps(), nil return newTopologyPairsMaps(), newTopologyPairsMaps(), nil

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@@ -354,8 +354,8 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod)) allPodLister := schedulertesting.FakePodLister(append(test.existingPods, test.addedPod))
// getMeta creates predicate meta data given the list of pods. // getMeta creates predicate meta data given the list of pods.
getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulercache.NodeInfo) { getMeta := func(lister schedulertesting.FakePodLister) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) {
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(lister, test.nodes) nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(lister, test.nodes)
// nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo. // nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo.
nodeList := []v1.Node{} nodeList := []v1.Node{}
for _, n := range test.nodes { for _, n := range test.nodes {
@@ -407,7 +407,7 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) {
}, },
}, },
podBestEffort: true, podBestEffort: true,
podRequest: &schedulercache.Resource{ podRequest: &schedulernodeinfo.Resource{
MilliCPU: 1000, MilliCPU: 1000,
Memory: 300, Memory: 300,
AllowedPodNumber: 4, AllowedPodNumber: 4,
@@ -775,7 +775,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes) nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes)
gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, nodeInfoMap) gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, nodeInfoMap)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {

View File

@@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
@@ -274,7 +274,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// - ISCSI forbids if any two pods share at least same IQN, LUN and Target // - ISCSI forbids if any two pods share at least same IQN, LUN and Target
// TODO: migrate this into some per-volume specific code? // TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func NoDiskConflict(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
for _, v := range pod.Spec.Volumes { for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() { for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) { if isVolumeConflict(v, ev) {
@@ -447,7 +447,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
return nil return nil
} }
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {
@@ -584,7 +584,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
return c.predicate return c.predicate
} }
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {
@@ -680,7 +680,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
return true, nil, nil return true, nil, nil
} }
// GetResourceRequest returns a *schedulercache.Resource that covers the largest // GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect // width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for // the max in each dimension iteratively. In contrast, we sum the resource vectors for
// regular containers since they run simultaneously. // regular containers since they run simultaneously.
@@ -704,8 +704,8 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
// Memory: 1G // Memory: 1G
// //
// Result: CPU: 3, Memory: 3G // Result: CPU: 3, Memory: 3G
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource {
result := &schedulercache.Resource{} result := &schedulernodeinfo.Resource{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests) result.Add(container.Resources.Requests)
} }
@@ -725,7 +725,7 @@ func podName(pod *v1.Pod) string {
// PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
// predicate failure reasons if the node has insufficient resources to run the pod. // predicate failure reasons if the node has insufficient resources to run the pod.
func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@@ -740,7 +740,7 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
// No extended resources should be ignored by default. // No extended resources should be ignored by default.
ignoredExtendedResources := sets.NewString() ignoredExtendedResources := sets.NewString()
var podRequest *schedulercache.Resource var podRequest *schedulernodeinfo.Resource
if predicateMeta, ok := meta.(*predicateMetadata); ok { if predicateMeta, ok := meta.(*predicateMetadata); ok {
podRequest = predicateMeta.podRequest podRequest = predicateMeta.podRequest
if predicateMeta.ignoredExtendedResources != nil { if predicateMeta.ignoredExtendedResources != nil {
@@ -850,7 +850,7 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
} }
// PodMatchNodeSelector checks if a pod node selector matches the node label. // PodMatchNodeSelector checks if a pod node selector matches the node label.
func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@@ -862,7 +862,7 @@ func PodMatchNodeSelector(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInf
} }
// PodFitsHost checks if a pod spec node name matches the current node. // PodFitsHost checks if a pod spec node name matches the current node.
func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodFitsHost(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if len(pod.Spec.NodeName) == 0 { if len(pod.Spec.NodeName) == 0 {
return true, nil, nil return true, nil, nil
} }
@@ -904,7 +904,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value // A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node // and it may be desirable to avoid scheduling new pods on this node
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@@ -989,7 +989,7 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
// //
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var services []*v1.Service var services []*v1.Service
var pods []*v1.Pod var pods []*v1.Pod
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
@@ -1028,7 +1028,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi
} }
// PodFitsHostPorts checks if a node has free ports for the requested pod ports. // PodFitsHostPorts checks if a node has free ports for the requested pod ports.
func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var wantPorts []*v1.ContainerPort var wantPorts []*v1.ContainerPort
if predicateMeta, ok := meta.(*predicateMetadata); ok { if predicateMeta, ok := meta.(*predicateMetadata); ok {
wantPorts = predicateMeta.podPorts wantPorts = predicateMeta.podPorts
@@ -1068,7 +1068,7 @@ func haveOverlap(a1, a2 []string) bool {
// GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates
// that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need // that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need
func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo) fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo)
if err != nil { if err != nil {
@@ -1090,7 +1090,7 @@ func GeneralPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *
} }
// noncriticalPredicates are the predicates that only non-critical pods need // noncriticalPredicates are the predicates that only non-critical pods need
func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
if err != nil { if err != nil {
@@ -1104,7 +1104,7 @@ func noncriticalPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeIn
} }
// EssentialPredicates are the predicates that all pods, including critical pods, need // EssentialPredicates are the predicates that all pods, including critical pods, need
func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func EssentialPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason var predicateFails []algorithm.PredicateFailureReason
fit, reasons, err := PodFitsHost(pod, meta, nodeInfo) fit, reasons, err := PodFitsHost(pod, meta, nodeInfo)
if err != nil { if err != nil {
@@ -1152,7 +1152,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algor
// InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
// First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the
// predicate failure reasons if the pod cannot be scheduled on the specified node. // predicate failure reasons if the pod cannot be scheduled on the specified node.
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@@ -1186,7 +1186,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
// targetPod matches all the terms and their topologies, 2) whether targetPod // targetPod matches all the terms and their topologies, 2) whether targetPod
// matches all the terms label selector and namespaces (AKA term properties), // matches all the terms label selector and namespaces (AKA term properties),
// 3) any error. // 3) any error.
func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) {
if len(terms) == 0 { if len(terms) == 0 {
return false, false, fmt.Errorf("terms array is empty") return false, false, fmt.Errorf("terms array is empty")
} }
@@ -1290,7 +1290,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
// Checks if scheduling the pod onto this node would break any anti-affinity // Checks if scheduling the pod onto this node would break any anti-affinity
// terms indicated by the existing pods. // terms indicated by the existing pods.
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) { func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil") return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil")
@@ -1333,7 +1333,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta
// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches // nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches
// topology of all the "terms" for the given "pod". // topology of all the "terms" for the given "pod".
func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node() node := nodeInfo.Node()
for _, term := range terms { for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok { if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
@@ -1350,7 +1350,7 @@ func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPa
// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches // nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches
// topology of any "term" for the given "pod". // topology of any "term" for the given "pod".
func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulernodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node() node := nodeInfo.Node()
for _, term := range terms { for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok { if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
@@ -1365,7 +1365,7 @@ func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPai
// Checks if scheduling the pod onto this node would break any term of this pod. // Checks if scheduling the pod onto this node would break any term of this pod.
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo,
affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
@@ -1466,7 +1466,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
} }
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec. // CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
} }
@@ -1486,7 +1486,7 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada
} }
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints // PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
} }
@@ -1498,13 +1498,13 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI
} }
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints // PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute return t.Effect == v1.TaintEffectNoExecute
}) })
} }
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) {
taints, err := nodeInfo.Taints() taints, err := nodeInfo.Taints()
if err != nil { if err != nil {
return false, nil, err return false, nil, err
@@ -1523,7 +1523,7 @@ func isPodBestEffort(pod *v1.Pod) bool {
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
// reporting memory pressure condition. // reporting memory pressure condition.
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var podBestEffort bool var podBestEffort bool
if predicateMeta, ok := meta.(*predicateMetadata); ok { if predicateMeta, ok := meta.(*predicateMetadata); ok {
podBestEffort = predicateMeta.podBestEffort podBestEffort = predicateMeta.podBestEffort
@@ -1545,7 +1545,7 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetad
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
// reporting disk pressure condition. // reporting disk pressure condition.
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// check if node is under disk pressure // check if node is under disk pressure
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
@@ -1555,7 +1555,7 @@ func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadat
// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node // CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node
// reporting pid pressure condition. // reporting pid pressure condition.
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
// check if node is under pid pressure // check if node is under pid pressure
if nodeInfo.PIDPressureCondition() == v1.ConditionTrue { if nodeInfo.PIDPressureCondition() == v1.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil return false, []algorithm.PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
@@ -1565,7 +1565,7 @@ func CheckNodePIDPressurePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata
// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting out of disk, // CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting out of disk,
// network unavailable and not ready condition. Only node conditions are accounted in this predicate. // network unavailable and not ready condition. Only node conditions are accounted in this predicate.
func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
reasons := []algorithm.PredicateFailureReason{} reasons := []algorithm.PredicateFailureReason{}
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
@@ -1617,7 +1617,7 @@ func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitP
return c.predicate return c.predicate
} }
func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
return true, nil, nil return true, nil, nil
} }

View File

@@ -36,7 +36,7 @@ import (
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@@ -72,7 +72,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
} }
} }
func newResourcePod(usage ...schedulercache.Resource) *v1.Pod { func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod {
containers := []v1.Container{} containers := []v1.Container{}
for _, req := range usage { for _, req := range usage {
containers = append(containers, v1.Container{ containers = append(containers, v1.Container{
@@ -86,12 +86,12 @@ func newResourcePod(usage ...schedulercache.Resource) *v1.Pod {
} }
} }
func newResourceInitPod(pod *v1.Pod, usage ...schedulercache.Resource) *v1.Pod { func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Pod {
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
return pod return pod
} }
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata { func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}} pm := PredicateMetadataFactory{schedulertesting.FakePodLister{p}}
return pm.GetMetadata(p, nodeInfo) return pm.GetMetadata(p, nodeInfo)
} }
@@ -99,7 +99,7 @@ func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo)
func TestPodFitsResources(t *testing.T) { func TestPodFitsResources(t *testing.T) {
enoughPodsTests := []struct { enoughPodsTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
reasons []algorithm.PredicateFailureReason reasons []algorithm.PredicateFailureReason
@@ -107,15 +107,15 @@ func TestPodFitsResources(t *testing.T) {
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
fits: true, fits: true,
name: "no resources requested always fits", name: "no resources requested always fits",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
fits: false, fits: false,
name: "too many resources fails", name: "too many resources fails",
reasons: []algorithm.PredicateFailureReason{ reasons: []algorithm.PredicateFailureReason{
@@ -124,234 +124,234 @@ func TestPodFitsResources(t *testing.T) {
}, },
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to init container cpu", name: "too many resources fails due to init container cpu",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}, schedulercache.Resource{MilliCPU: 2, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to highest init container cpu", name: "too many resources fails due to highest init container cpu",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to init container memory", name: "too many resources fails due to init container memory",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}, schedulercache.Resource{MilliCPU: 1, Memory: 2}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
fits: false, fits: false,
name: "too many resources fails due to highest init container memory", name: "too many resources fails due to highest init container memory",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
fits: true, fits: true,
name: "init container fits because it's the max, not sum, of containers and init containers", name: "init container fits because it's the max, not sum, of containers and init containers",
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}, schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
fits: true, fits: true,
name: "multiple init containers fit because it's the max, not sum, of containers and init containers", name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
fits: true, fits: true,
name: "both resources fit", name: "both resources fit",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 5})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
fits: false, fits: false,
name: "one resource memory fits", name: "one resource memory fits",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)},
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 2}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
name: "one resource cpu fits", name: "one resource cpu fits",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)},
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: true, fits: true,
name: "equal edge case", name: "equal edge case",
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 4, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 4, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: true, fits: true,
name: "equal edge case for init container", name: "equal edge case for init container",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), pod: newResourcePod(schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})),
fits: true, fits: true,
name: "extended resource fits", name: "extended resource fits",
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})),
fits: true, fits: true,
name: "extended resource fits for init container", name: "extended resource fits for init container",
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
fits: false, fits: false,
name: "extended resource capacity enforced", name: "extended resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
fits: false, fits: false,
name: "extended resource capacity enforced for init container", name: "extended resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
fits: false, fits: false,
name: "extended resource allocatable enforced", name: "extended resource allocatable enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
fits: false, fits: false,
name: "extended resource allocatable enforced for init container", name: "extended resource allocatable enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 1, 5, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
fits: false, fits: false,
name: "extended resource allocatable enforced for multiple containers", name: "extended resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
fits: true, fits: true,
name: "extended resource allocatable admits multiple init containers", name: "extended resource allocatable admits multiple init containers",
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
fits: false, fits: false,
name: "extended resource allocatable enforced for multiple init containers", name: "extended resource allocatable enforced for multiple init containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "extended resource allocatable enforced for unknown resource", name: "extended resource allocatable enforced for unknown resource",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "extended resource allocatable enforced for unknown resource for init container", name: "extended resource allocatable enforced for unknown resource for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(extendedResourceB, 1, 0, 0)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "kubernetes.io resource capacity enforced", name: "kubernetes.io resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: false, fits: false,
name: "kubernetes.io resource capacity enforced for init container", name: "kubernetes.io resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
fits: false, fits: false,
name: "hugepages resource capacity enforced", name: "hugepages resource capacity enforced",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
fits: false, fits: false,
name: "hugepages resource capacity enforced for init container", name: "hugepages resource capacity enforced for init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
fits: false, fits: false,
name: "hugepages resource allocatable enforced for multiple containers", name: "hugepages resource allocatable enforced for multiple containers",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulercache.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
fits: true, fits: true,
ignoredExtendedResources: sets.NewString(string(extendedResourceB)), ignoredExtendedResources: sets.NewString(string(extendedResourceB)),
name: "skip checking ignored extended resource", name: "skip checking ignored extended resource",
@@ -379,39 +379,39 @@ func TestPodFitsResources(t *testing.T) {
notEnoughPodsTests := []struct { notEnoughPodsTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
reasons []algorithm.PredicateFailureReason reasons []algorithm.PredicateFailureReason
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
fits: false, fits: false,
name: "even without specified resources predicate fails when there's no space for additional pod", name: "even without specified resources predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
fits: false, fits: false,
name: "even if both resources fit predicate fails when there's no space for additional pod", name: "even if both resources fit predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
name: "even for equal edge case predicate fails when there's no space for additional pod", name: "even for equal edge case predicate fails when there's no space for additional pod",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
fits: false, fits: false,
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container", name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)},
@@ -436,15 +436,15 @@ func TestPodFitsResources(t *testing.T) {
storagePodsTests := []struct { storagePodsTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
reasons []algorithm.PredicateFailureReason reasons []algorithm.PredicateFailureReason
}{ }{
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
fits: false, fits: false,
name: "due to container scratch disk", name: "due to container scratch disk",
reasons: []algorithm.PredicateFailureReason{ reasons: []algorithm.PredicateFailureReason{
@@ -452,16 +452,16 @@ func TestPodFitsResources(t *testing.T) {
}, },
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 10})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 10})),
fits: true, fits: true,
name: "pod fit", name: "pod fit",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{EphemeralStorage: 25}), pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 25}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
fits: false, fits: false,
name: "storage ephemeral local storage request exceeds allocatable", name: "storage ephemeral local storage request exceeds allocatable",
reasons: []algorithm.PredicateFailureReason{ reasons: []algorithm.PredicateFailureReason{
@@ -469,9 +469,9 @@ func TestPodFitsResources(t *testing.T) {
}, },
}, },
{ {
pod: newResourcePod(schedulercache.Resource{EphemeralStorage: 10}), pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
fits: true, fits: true,
name: "pod fits", name: "pod fits",
}, },
@@ -542,7 +542,7 @@ func TestPodFitsHost(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := PodFitsHost(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil { if err != nil {
@@ -585,96 +585,96 @@ func newPod(host string, hostPortInfos ...string) *v1.Pod {
func TestPodFitsHostPorts(t *testing.T) { func TestPodFitsHostPorts(t *testing.T) {
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulercache.NewNodeInfo(), nodeInfo: schedulernodeinfo.NewNodeInfo(),
fits: true, fits: true,
name: "nothing running", name: "nothing running",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/9090")), newPod("m1", "UDP/127.0.0.1/9090")),
fits: true, fits: true,
name: "other port", name: "other port",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")), newPod("m1", "UDP/127.0.0.1/8080")),
fits: false, fits: false,
name: "same udp port", name: "same udp port",
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8080"), pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")), newPod("m1", "TCP/127.0.0.1/8080")),
fits: false, fits: false,
name: "same tcp port", name: "same tcp port",
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8080"), pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.2/8080")), newPod("m1", "TCP/127.0.0.2/8080")),
fits: true, fits: true,
name: "different host ip", name: "different host ip",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")), newPod("m1", "TCP/127.0.0.1/8080")),
fits: true, fits: true,
name: "different protocol", name: "different protocol",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")), newPod("m1", "UDP/127.0.0.1/8080")),
fits: false, fits: false,
name: "second udp port conflict", name: "second udp port conflict",
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"), pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")), newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
fits: false, fits: false,
name: "first tcp port conflict", name: "first tcp port conflict",
}, },
{ {
pod: newPod("m1", "TCP/0.0.0.0/8001"), pod: newPod("m1", "TCP/0.0.0.0/8001"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")), newPod("m1", "TCP/127.0.0.1/8001")),
fits: false, fits: false,
name: "first tcp port conflict due to 0.0.0.0 hostIP", name: "first tcp port conflict due to 0.0.0.0 hostIP",
}, },
{ {
pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"), pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")), newPod("m1", "TCP/127.0.0.1/8001")),
fits: false, fits: false,
name: "TCP hostPort conflict due to 0.0.0.0 hostIP", name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8001"), pod: newPod("m1", "TCP/127.0.0.1/8001"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")), newPod("m1", "TCP/0.0.0.0/8001")),
fits: false, fits: false,
name: "second tcp port conflict to 0.0.0.0 hostIP", name: "second tcp port conflict to 0.0.0.0 hostIP",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8001"), pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")), newPod("m1", "TCP/0.0.0.0/8001")),
fits: true, fits: true,
name: "second different protocol", name: "second different protocol",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8001"), pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")), newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
fits: false, fits: false,
name: "UDP hostPort conflict due to 0.0.0.0 hostIP", name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
@@ -723,14 +723,14 @@ func TestGCEDiskConflicts(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
isOk bool isOk bool
name string name string
}{ }{
{&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"},
{&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"},
{&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
@@ -778,14 +778,14 @@ func TestAWSDiskConflicts(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
isOk bool isOk bool
name string name string
}{ }{
{&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"},
{&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"},
{&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
@@ -839,14 +839,14 @@ func TestRBDDiskConflicts(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
isOk bool isOk bool
name string name string
}{ }{
{&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"},
{&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"},
{&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
@@ -900,14 +900,14 @@ func TestISCSIDiskConflicts(t *testing.T) {
} }
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
isOk bool isOk bool
name string name string
}{ }{
{&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing"},
{&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, {&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"},
{&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, {&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
{&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, {&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
} }
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
@@ -1611,7 +1611,7 @@ func TestPodFitsSelector(t *testing.T) {
Name: test.nodeName, Name: test.nodeName,
Labels: test.labels, Labels: test.labels,
}} }}
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
@@ -1679,7 +1679,7 @@ func TestNodeLabelPresence(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}} node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
labelChecker := NodeLabelChecker{test.labels, test.presence} labelChecker := NodeLabelChecker{test.labels, test.presence}
@@ -1828,9 +1828,9 @@ func TestServiceAffinity(t *testing.T) {
testIt := func(skipPrecompute bool) { testIt := func(skipPrecompute bool) {
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) { t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
nodes := []v1.Node{node1, node2, node3, node4, node5} nodes := []v1.Node{node1, node2, node3, node4, node5}
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo} nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
// Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations. // Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations.
predicate, precompute := NewServiceAffinityPredicate(schedulertesting.FakePodLister(test.pods), schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels) predicate, precompute := NewServiceAffinityPredicate(schedulertesting.FakePodLister(test.pods), schedulertesting.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels)
// Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test. // Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test.
@@ -1880,7 +1880,7 @@ func newPodWithPort(hostPorts ...int) *v1.Pod {
func TestRunGeneralPredicates(t *testing.T) { func TestRunGeneralPredicates(t *testing.T) {
resourceTests := []struct { resourceTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
node *v1.Node node *v1.Node
fits bool fits bool
name string name string
@@ -1889,8 +1889,8 @@ func TestRunGeneralPredicates(t *testing.T) {
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -1900,9 +1900,9 @@ func TestRunGeneralPredicates(t *testing.T) {
name: "no resources/port/host requested always fits", name: "no resources/port/host requested always fits",
}, },
{ {
pod: newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 10}), pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 10}),
nodeInfo: schedulercache.NewNodeInfo( nodeInfo: schedulernodeinfo.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -1921,7 +1921,7 @@ func TestRunGeneralPredicates(t *testing.T) {
NodeName: "machine2", NodeName: "machine2",
}, },
}, },
nodeInfo: schedulercache.NewNodeInfo(), nodeInfo: schedulernodeinfo.NewNodeInfo(),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -1933,7 +1933,7 @@ func TestRunGeneralPredicates(t *testing.T) {
}, },
{ {
pod: newPodWithPort(123), pod: newPodWithPort(123),
nodeInfo: schedulercache.NewNodeInfo(newPodWithPort(123)), nodeInfo: schedulernodeinfo.NewNodeInfo(newPodWithPort(123)),
node: &v1.Node{ node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)}, Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
@@ -2908,9 +2908,9 @@ func TestInterPodAffinity(t *testing.T) {
info: FakeNodeInfo(*node), info: FakeNodeInfo(*node),
podLister: schedulertesting.FakePodLister(test.pods), podLister: schedulertesting.FakePodLister(test.pods),
} }
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo} nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) fits, reasons, _ := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) { if !fits && !reflect.DeepEqual(reasons, test.expectFailureReasons) {
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons) t.Errorf("unexpected failure reasons: %v, want: %v", reasons, test.expectFailureReasons)
@@ -4003,7 +4003,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
for indexTest, test := range tests { for indexTest, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeListInfo := FakeNodeListInfo(test.nodes) nodeListInfo := FakeNodeListInfo(test.nodes)
nodeInfoMap := make(map[string]*schedulercache.NodeInfo) nodeInfoMap := make(map[string]*schedulernodeinfo.NodeInfo)
for i, node := range test.nodes { for i, node := range test.nodes {
var podsOnNode []*v1.Pod var podsOnNode []*v1.Pod
for _, pod := range test.pods { for _, pod := range test.pods {
@@ -4012,7 +4012,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
} }
} }
nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
nodeInfo.SetNode(&test.nodes[i]) nodeInfo.SetNode(&test.nodes[i])
nodeInfoMap[node.Name] = nodeInfo nodeInfoMap[node.Name] = nodeInfo
} }
@@ -4034,9 +4034,9 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
} }
affinity := test.pod.Spec.Affinity affinity := test.pod.Spec.Affinity
if affinity != nil && affinity.NodeAffinity != nil { if affinity != nil && affinity.NodeAffinity != nil {
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo} nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) fits2, reasons, err := PodMatchNodeSelector(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@@ -4242,7 +4242,7 @@ func TestPodToleratesTaints(t *testing.T) {
for _, test := range podTolerateTaintsTests { for _, test := range podTolerateTaintsTests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&test.node) nodeInfo.SetNode(&test.node)
fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) fits, reasons, err := PodToleratesNodeTaints(test.pod, PredicateMetadata(test.pod, nil), nodeInfo)
if err != nil { if err != nil {
@@ -4258,8 +4258,8 @@ func TestPodToleratesTaints(t *testing.T) {
} }
} }
func makeEmptyNodeInfo(node *v1.Node) *schedulercache.NodeInfo { func makeEmptyNodeInfo(node *v1.Node) *schedulernodeinfo.NodeInfo {
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
return nodeInfo return nodeInfo
} }
@@ -4323,7 +4323,7 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
}{ }{
@@ -4409,7 +4409,7 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
}{ }{
@@ -4471,7 +4471,7 @@ func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) {
} }
tests := []struct { tests := []struct {
nodeInfo *schedulercache.NodeInfo nodeInfo *schedulernodeinfo.NodeInfo
fits bool fits bool
name string name string
}{ }{
@@ -4708,7 +4708,7 @@ func TestVolumeZonePredicate(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fit := NewVolumeZonePredicate(pvInfo, pvcInfo, nil) fit := NewVolumeZonePredicate(pvInfo, pvcInfo, nil)
node := &schedulercache.NodeInfo{} node := &schedulernodeinfo.NodeInfo{}
node.SetNode(test.Node) node.SetNode(test.Node)
fits, reasons, err := fit(test.Pod, nil, node) fits, reasons, err := fit(test.Pod, nil, node)
@@ -4802,7 +4802,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fit := NewVolumeZonePredicate(pvInfo, pvcInfo, nil) fit := NewVolumeZonePredicate(pvInfo, pvcInfo, nil)
node := &schedulercache.NodeInfo{} node := &schedulernodeinfo.NodeInfo{}
node.SetNode(test.Node) node.SetNode(test.Node)
fits, reasons, err := fit(test.Pod, nil, node) fits, reasons, err := fit(test.Pod, nil, node)
@@ -4920,7 +4920,7 @@ func TestVolumeZonePredicateWithVolumeBinding(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
fit := NewVolumeZonePredicate(pvInfo, pvcInfo, classInfo) fit := NewVolumeZonePredicate(pvInfo, pvcInfo, classInfo)
node := &schedulercache.NodeInfo{} node := &schedulernodeinfo.NodeInfo{}
node.SetNode(test.Node) node.SetNode(test.Node)
fits, _, err := fit(test.Pod, nil, node) fits, _, err := fit(test.Pod, nil, node)
@@ -5028,7 +5028,7 @@ func TestCheckNodeUnschedulablePredicate(t *testing.T) {
} }
for _, test := range testCases { for _, test := range testCases {
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
fit, _, err := CheckNodeUnschedulablePredicate(test.pod, nil, nodeInfo) fit, _, err := CheckNodeUnschedulablePredicate(test.pod, nil, nodeInfo)
if err != nil { if err != nil {

View File

@@ -19,7 +19,7 @@ package predicates
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// FindLabelsInSet gets as many key/value pairs as possible out of a label set. // FindLabelsInSet gets as many key/value pairs as possible out of a label set.
@@ -68,7 +68,7 @@ func CreateSelectorFromLabels(aL map[string]string) labels.Selector {
// portsConflict check whether existingPorts and wantPorts conflict with each other // portsConflict check whether existingPorts and wantPorts conflict with each other
// return true if we have a conflict // return true if we have a conflict
func portsConflict(existingPorts schedulercache.HostPortInfo, wantPorts []*v1.ContainerPort) bool { func portsConflict(existingPorts schedulernodeinfo.HostPortInfo, wantPorts []*v1.ContainerPort) bool {
for _, cp := range wantPorts { for _, cp := range wantPorts {
if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) { if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) {
return true return true

View File

@@ -34,7 +34,7 @@ go_library(
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//pkg/util/parsers:go_default_library", "//pkg/util/parsers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
@@ -71,7 +71,7 @@ go_test(
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/util/parsers:go_default_library", "//pkg/util/parsers:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",

View File

@@ -22,7 +22,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
var ( var (
@@ -38,7 +38,7 @@ var (
BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap
) )
func balancedResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { func balancedResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU) cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU)
memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory) memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory)
// This to find a node which has most balanced CPU, memory and volume usage. // This to find a node which has most balanced CPU, memory and volume usage.

View File

@@ -27,7 +27,7 @@ import (
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// getExistingVolumeCountForNode gets the current number of volumes on node. // getExistingVolumeCountForNode gets the current number of volumes on node.
@@ -401,7 +401,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
if len(test.pod.Spec.Volumes) > 0 { if len(test.pod.Spec.Volumes) > 0 {
maxVolumes := 5 maxVolumes := 5
for _, info := range nodeNameToInfo { for _, info := range nodeNameToInfo {

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/parsers" "k8s.io/kubernetes/pkg/util/parsers"
) )
@@ -39,7 +39,7 @@ const (
// based on the total size of those images. // based on the total size of those images.
// - If none of the images are present, this node will be given the lowest priority. // - If none of the images are present, this node will be given the lowest priority.
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority. // - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
@@ -74,7 +74,7 @@ func calculatePriority(sumScores int64) int {
// sumImageScores returns the sum of image scores of all the containers that are already on the node. // sumImageScores returns the sum of image scores of all the containers that are already on the node.
// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate // Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate
// the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers. // the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers.
func sumImageScores(nodeInfo *schedulercache.NodeInfo, containers []v1.Container, totalNumNodes int) int64 { func sumImageScores(nodeInfo *schedulernodeinfo.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
var sum int64 var sum int64
imageStates := nodeInfo.ImageStates() imageStates := nodeInfo.ImageStates()
@@ -91,7 +91,7 @@ func sumImageScores(nodeInfo *schedulercache.NodeInfo, containers []v1.Container
// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to. // The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to.
// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or // This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or
// a few nodes due to image locality. // a few nodes due to image locality.
func scaledImageScore(imageState *schedulercache.ImageStateSummary, totalNumNodes int) int64 { func scaledImageScore(imageState *schedulernodeinfo.ImageStateSummary, totalNumNodes int) int64 {
spread := float64(imageState.NumNodes) / float64(totalNumNodes) spread := float64(imageState.NumNodes) / float64(totalNumNodes)
return int64(float64(imageState.Size) * spread) return int64(float64(imageState.Size) * spread)
} }

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/util/parsers" "k8s.io/kubernetes/pkg/util/parsers"
) )
@@ -164,7 +164,7 @@ func TestImageLocalityPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
list, err := priorityFunction(ImageLocalityPriorityMap, nil, &priorityMetadata{totalNumNodes: len(test.nodes)})(test.pod, nodeNameToInfo, test.nodes) list, err := priorityFunction(ImageLocalityPriorityMap, nil, &priorityMetadata{totalNumNodes: len(test.nodes)})(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/klog" "k8s.io/klog"
) )
@@ -116,7 +116,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm
// that node; the node(s) with the highest sum are the most preferred. // that node; the node(s) with the highest sum are the most preferred.
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, // Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
// symmetry need to be considered for hard requirements from podAffinity // symmetry need to be considered for hard requirements from podAffinity
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
affinity := pod.Spec.Affinity affinity := pod.Spec.Affinity
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@@ -510,7 +510,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
interPodAffinity := InterPodAffinity{ interPodAffinity := InterPodAffinity{
info: FakeNodeListInfo(test.nodes), info: FakeNodeListInfo(test.nodes),
nodeLister: schedulertesting.FakeNodeLister(test.nodes), nodeLister: schedulertesting.FakeNodeLister(test.nodes),
@@ -600,7 +600,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
ipa := InterPodAffinity{ ipa := InterPodAffinity{
info: FakeNodeListInfo(test.nodes), info: FakeNodeListInfo(test.nodes),
nodeLister: schedulertesting.FakeNodeLister(test.nodes), nodeLister: schedulertesting.FakeNodeLister(test.nodes),

View File

@@ -18,7 +18,7 @@ package priorities
import ( import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
var ( var (
@@ -33,7 +33,7 @@ var (
LeastRequestedPriorityMap = leastResourcePriority.PriorityMap LeastRequestedPriorityMap = leastResourcePriority.PriorityMap
) )
func leastResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { func leastResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
return (leastRequestedScore(requested.MilliCPU, allocable.MilliCPU) + return (leastRequestedScore(requested.MilliCPU, allocable.MilliCPU) +
leastRequestedScore(requested.Memory, allocable.Memory)) / 2 leastRequestedScore(requested.Memory, allocable.Memory)) / 2
} }

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestLeastRequested(t *testing.T) { func TestLeastRequested(t *testing.T) {
@@ -253,7 +253,7 @@ func TestLeastRequested(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// PriorityMetadataFactory is a factory to produce PriorityMetadata. // PriorityMetadataFactory is a factory to produce PriorityMetadata.
@@ -45,7 +45,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle
// priorityMetadata is a type that is passed as metadata for priority functions // priorityMetadata is a type that is passed as metadata for priority functions
type priorityMetadata struct { type priorityMetadata struct {
nonZeroRequest *schedulercache.Resource nonZeroRequest *schedulernodeinfo.Resource
podTolerations []v1.Toleration podTolerations []v1.Toleration
affinity *v1.Affinity affinity *v1.Affinity
podSelectors []labels.Selector podSelectors []labels.Selector
@@ -55,7 +55,7 @@ type priorityMetadata struct {
} }
// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil. // PriorityMetadata is a PriorityMetadataProducer. Node info can be nil.
func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} {
// If we cannot compute metadata, just return nil // If we cannot compute metadata, just return nil
if pod == nil { if pod == nil {
return nil return nil

View File

@@ -25,16 +25,16 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
) )
func TestPriorityMetadata(t *testing.T) { func TestPriorityMetadata(t *testing.T) {
nonZeroReqs := &schedulercache.Resource{} nonZeroReqs := &schedulernodeinfo.Resource{}
nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCPURequest nonZeroReqs.MilliCPU = priorityutil.DefaultMilliCPURequest
nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest nonZeroReqs.Memory = priorityutil.DefaultMemoryRequest
specifiedReqs := &schedulercache.Resource{} specifiedReqs := &schedulernodeinfo.Resource{}
specifiedReqs.MilliCPU = 200 specifiedReqs.MilliCPU = 200
specifiedReqs.Memory = 2000 specifiedReqs.Memory = 2000

View File

@@ -18,7 +18,7 @@ package priorities
import ( import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
var ( var (
@@ -31,7 +31,7 @@ var (
MostRequestedPriorityMap = mostResourcePriority.PriorityMap MostRequestedPriorityMap = mostResourcePriority.PriorityMap
) )
func mostResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { func mostResourceScorer(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
return (mostRequestedScore(requested.MilliCPU, allocable.MilliCPU) + return (mostRequestedScore(requested.MilliCPU, allocable.MilliCPU) +
mostRequestedScore(requested.Memory, allocable.Memory)) / 2 mostRequestedScore(requested.Memory, allocable.Memory)) / 2
} }

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestMostRequested(t *testing.T) { func TestMostRequested(t *testing.T) {
@@ -210,7 +210,7 @@ func TestMostRequested(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences // CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences
@@ -31,7 +31,7 @@ import (
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms // it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher // the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
// score the node gets. // score the node gets.
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestNodeAffinityPriority(t *testing.T) { func TestNodeAffinityPriority(t *testing.T) {
@@ -167,7 +167,7 @@ func TestNodeAffinityPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil) nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil)
list, err := nap(test.pod, nodeNameToInfo, test.nodes) list, err := nap(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// NodeLabelPrioritizer contains information to calculate node label priority. // NodeLabelPrioritizer contains information to calculate node label priority.
@@ -44,7 +44,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value. // CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes nodes that have the specified label, regardless of value. // If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes nodes that do not have the specified label. // If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestNewNodeLabelPriority(t *testing.T) { func TestNewNodeLabelPriority(t *testing.T) {
@@ -108,7 +108,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
labelPrioritizer := &NodeLabelPrioritizer{ labelPrioritizer := &NodeLabelPrioritizer{
label: test.label, label: test.label,
presence: test.presence, presence: test.presence,

View File

@@ -23,12 +23,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation // CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
// "scheduler.alpha.kubernetes.io/preferAvoidPods". // "scheduler.alpha.kubernetes.io/preferAvoidPods".
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestNodePreferAvoidPriority(t *testing.T) { func TestNodePreferAvoidPriority(t *testing.T) {
@@ -142,7 +142,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@@ -20,7 +20,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// NormalizeReduce generates a PriorityReduceFunction that can normalize the result // NormalizeReduce generates a PriorityReduceFunction that can normalize the result
@@ -30,7 +30,7 @@ func NormalizeReduce(maxPriority int, reverse bool) algorithm.PriorityReduceFunc
return func( return func(
_ *v1.Pod, _ *v1.Pod,
_ interface{}, _ interface{},
_ map[string]*schedulercache.NodeInfo, _ map[string]*schedulernodeinfo.NodeInfo,
result schedulerapi.HostPriorityList) error { result schedulerapi.HostPriorityList) error {
var maxCount int var maxCount int

View File

@@ -20,7 +20,7 @@ import (
"fmt" "fmt"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// FunctionShape represents shape of scoring function. // FunctionShape represents shape of scoring function.
@@ -98,7 +98,7 @@ func RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape Fun
return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape)} return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape)}
} }
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape) func(*schedulercache.Resource, *schedulercache.Resource, bool, int, int) int64 { func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape) func(*schedulernodeinfo.Resource, *schedulernodeinfo.Resource, bool, int, int) int64 {
rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape) rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape)
resourceScoringFunction := func(requested, capacity int64) int64 { resourceScoringFunction := func(requested, capacity int64) int64 {
@@ -109,7 +109,7 @@ func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionSh
return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity) return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity)
} }
return func(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 { return func(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
cpuScore := resourceScoringFunction(requested.MilliCPU, allocable.MilliCPU) cpuScore := resourceScoringFunction(requested.MilliCPU, allocable.MilliCPU)
memoryScore := resourceScoringFunction(requested.Memory, allocable.Memory) memoryScore := resourceScoringFunction(requested.Memory, allocable.Memory)
return (cpuScore + memoryScore) / 2 return (cpuScore + memoryScore) / 2

View File

@@ -25,7 +25,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestCreatingFunctionShapeErrorsIfEmptyPoints(t *testing.T) { func TestCreatingFunctionShapeErrorsIfEmptyPoints(t *testing.T) {
@@ -229,7 +229,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
newPod := buildResourcesPod("", test.requested) newPod := buildResourcesPod("", test.requested)
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(scheduledPods, nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(scheduledPods, nodes)
list, err := priorityFunction(RequestedToCapacityRatioResourceAllocationPriorityDefault().PriorityMap, nil, nil)(newPod, nodeNameToInfo, nodes) list, err := priorityFunction(RequestedToCapacityRatioResourceAllocationPriorityDefault().PriorityMap, nil, nil)(newPod, nodeNameToInfo, nodes)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@@ -25,13 +25,13 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// ResourceAllocationPriority contains information to calculate resource allocation priority. // ResourceAllocationPriority contains information to calculate resource allocation priority.
type ResourceAllocationPriority struct { type ResourceAllocationPriority struct {
Name string Name string
scorer func(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 scorer func(requested, allocable *schedulernodeinfo.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
} }
// PriorityMap priorities nodes according to the resource allocations on the node. // PriorityMap priorities nodes according to the resource allocations on the node.
@@ -39,14 +39,14 @@ type ResourceAllocationPriority struct {
func (r *ResourceAllocationPriority) PriorityMap( func (r *ResourceAllocationPriority) PriorityMap(
pod *v1.Pod, pod *v1.Pod,
meta interface{}, meta interface{},
nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
} }
allocatable := nodeInfo.AllocatableResource() allocatable := nodeInfo.AllocatableResource()
var requested schedulercache.Resource var requested schedulernodeinfo.Resource
if priorityMeta, ok := meta.(*priorityMetadata); ok { if priorityMeta, ok := meta.(*priorityMetadata); ok {
requested = *priorityMeta.nonZeroRequest requested = *priorityMeta.nonZeroRequest
} else { } else {
@@ -91,8 +91,8 @@ func (r *ResourceAllocationPriority) PriorityMap(
}, nil }, nil
} }
func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource { func getNonZeroRequests(pod *v1.Pod) *schedulernodeinfo.Resource {
result := &schedulercache.Resource{} result := &schedulernodeinfo.Resource{}
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
container := &pod.Spec.Containers[i] container := &pod.Spec.Containers[i]
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/klog" "k8s.io/klog"
) )
@@ -33,7 +33,7 @@ import (
// of the pod are satisfied, the node is assigned a score of 1. // of the pod are satisfied, the node is assigned a score of 1.
// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have // Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
// same scores assigned by one of least and most requested priority functions. // same scores assigned by one of least and most requested priority functions.
func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
@@ -82,10 +82,10 @@ func computeScore(limit, allocatable int64) int64 {
// getResourceLimits computes resource limits for input pod. // getResourceLimits computes resource limits for input pod.
// The reason to create this new function is to be consistent with other // The reason to create this new function is to be consistent with other
// priority functions because most or perhaps all priority functions work // priority functions because most or perhaps all priority functions work
// with schedulercache.Resource. // with schedulernodeinfo.Resource.
// TODO: cache it as part of metadata passed to priority functions. // TODO: cache it as part of metadata passed to priority functions.
func getResourceLimits(pod *v1.Pod) *schedulercache.Resource { func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
result := &schedulercache.Resource{} result := &schedulernodeinfo.Resource{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Limits) result.Add(container.Resources.Limits)
} }

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestResourceLimistPriority(t *testing.T) { func TestResourceLimistPriority(t *testing.T) {
@@ -139,7 +139,7 @@ func TestResourceLimistPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
"k8s.io/klog" "k8s.io/klog"
@@ -63,7 +63,7 @@ func NewSelectorSpreadPriority(
// It favors nodes that have fewer existing matching pods. // It favors nodes that have fewer existing matching pods.
// i.e. it pushes the scheduler towards a node where there's the smallest number of // i.e. it pushes the scheduler towards a node where there's the smallest number of
// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled. // pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled.
func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
var selectors []labels.Selector var selectors []labels.Selector
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
@@ -114,7 +114,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
// based on the number of existing matching pods on the node // based on the number of existing matching pods on the node
// where zone information is included on the nodes, it favors nodes // where zone information is included on the nodes, it favors nodes
// in zones with fewer existing matching pods. // in zones with fewer existing matching pods.
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
countsByZone := make(map[string]int, 10) countsByZone := make(map[string]int, 10)
maxCountByZone := int(0) maxCountByZone := int(0)
maxCountByNodeName := int(0) maxCountByNodeName := int(0)
@@ -202,7 +202,7 @@ func (s *ServiceAntiAffinity) getNodeClassificationByLabels(nodes []*v1.Node) (m
} }
// filteredPod get pods based on namespace and selector // filteredPod get pods based on namespace and selector
func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulercache.NodeInfo) (pods []*v1.Pod) { func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) (pods []*v1.Pod) {
if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector == nil { if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector == nil {
return []*v1.Pod{} return []*v1.Pod{}
} }
@@ -218,7 +218,7 @@ func filteredPod(namespace string, selector labels.Selector, nodeInfo *scheduler
// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service // CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
// on given machine // on given machine
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
var firstServiceSelector labels.Selector var firstServiceSelector labels.Selector
node := nodeInfo.Node() node := nodeInfo.Node()
@@ -242,7 +242,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta
// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label. // CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity). // The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
var numServicePods int var numServicePods int
var label string var label string
podCounts := map[string]int{} podCounts := map[string]int{}

View File

@@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@@ -339,7 +339,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes)) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes))
selectorSpread := SelectorSpread{ selectorSpread := SelectorSpread{
serviceLister: schedulertesting.FakeServiceLister(test.services), serviceLister: schedulertesting.FakeServiceLister(test.services),
controllerLister: schedulertesting.FakeControllerLister(test.rcs), controllerLister: schedulertesting.FakeControllerLister(test.rcs),
@@ -575,7 +575,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes)) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes))
selectorSpread := SelectorSpread{ selectorSpread := SelectorSpread{
serviceLister: schedulertesting.FakeServiceLister(test.services), serviceLister: schedulertesting.FakeServiceLister(test.services),
controllerLister: schedulertesting.FakeControllerLister(test.rcs), controllerLister: schedulertesting.FakeControllerLister(test.rcs),
@@ -767,7 +767,7 @@ func TestZoneSpreadPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes)) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes))
zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"} zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"}
metaDataProducer := NewPriorityMetadataFactory( metaDataProducer := NewPriorityMetadataFactory(

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule // CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
@@ -52,7 +52,7 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi
} }
// ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node // ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
@@ -227,7 +227,7 @@ func TestTaintAndToleration(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil) ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil)
list, err := ttp(test.pod, nodeNameToInfo, test.nodes) list, err := ttp(test.pod, nodeNameToInfo, test.nodes)
if err != nil { if err != nil {

View File

@@ -22,7 +22,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func makeNode(node string, milliCPU, memory int64) *v1.Node { func makeNode(node string, milliCPU, memory int64) *v1.Node {
@@ -42,7 +42,7 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
} }
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, metaData interface{}) algorithm.PriorityFunction { func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, metaData interface{}) algorithm.PriorityFunction {
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := make(schedulerapi.HostPriorityList, 0, len(nodes)) result := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes { for i := range nodes {
hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name]) hostResult, err := mapFn(pod, metaData, nodeNameToInfo[nodes[i].Name])

View File

@@ -19,7 +19,7 @@ package algorithm
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// SchedulerExtender is an interface for external processes to influence scheduling // SchedulerExtender is an interface for external processes to influence scheduling
@@ -33,7 +33,7 @@ type SchedulerExtender interface {
// expected to be a subset of the supplied list. failedNodesMap optionally contains // expected to be a subset of the supplied list. failedNodesMap optionally contains
// the list of failed nodes and failure reasons. // the list of failed nodes and failure reasons.
Filter(pod *v1.Pod, Filter(pod *v1.Pod,
nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) ) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
// Prioritize based on extender-implemented priority functions. The returned scores & weight // Prioritize based on extender-implemented priority functions. The returned scores & weight
@@ -62,7 +62,7 @@ type SchedulerExtender interface {
ProcessPreemption( ProcessPreemption(
pod *v1.Pod, pod *v1.Pod,
nodeToVictims map[*v1.Node]*schedulerapi.Victims, nodeToVictims map[*v1.Node]*schedulerapi.Victims,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (map[*v1.Node]*schedulerapi.Victims, error) ) (map[*v1.Node]*schedulerapi.Victims, error)
// SupportsPreemption returns if the scheduler extender support preemption or not. // SupportsPreemption returns if the scheduler extender support preemption or not.

View File

@@ -22,8 +22,8 @@ import (
policyv1beta1 "k8s.io/api/policy/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are // NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are
@@ -34,30 +34,30 @@ var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
// FitPredicate is a function that indicates if a pod fits into an existing node. // FitPredicate is a function that indicates if a pod fits into an existing node.
// The failure information is given by the error. // The failure information is given by the error.
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error) type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
// PriorityMapFunction is a function that computes per-node results for a given node. // PriorityMapFunction is a function that computes per-node results for a given node.
// TODO: Figure out the exact API of this method. // TODO: Figure out the exact API of this method.
// TODO: Change interface{} to a specific type. // TODO: Change interface{} to a specific type.
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error)
// PriorityReduceFunction is a function that aggregated per-node results and computes // PriorityReduceFunction is a function that aggregated per-node results and computes
// final scores for all nodes. // final scores for all nodes.
// TODO: Figure out the exact API of this method. // TODO: Figure out the exact API of this method.
// TODO: Change interface{} to a specific type. // TODO: Change interface{} to a specific type.
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod. // PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata
// PriorityMetadataProducer is a function that computes metadata for a given pod. This // PriorityMetadataProducer is a function that computes metadata for a given pod. This
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer. // is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{}
// PriorityFunction is a function that computes scores for all nodes. // PriorityFunction is a function that computes scores for all nodes.
// DEPRECATED // DEPRECATED
// Use Map-Reduce pattern for priority functions. // Use Map-Reduce pattern for priority functions.
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
// PriorityConfig is a config used for a priority function. // PriorityConfig is a config used for a priority function.
type PriorityConfig struct { type PriorityConfig struct {
@@ -71,12 +71,12 @@ type PriorityConfig struct {
} }
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type. // EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata { func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) PredicateMetadata {
return nil return nil
} }
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type. // EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) interface{} {
return nil return nil
} }
@@ -174,6 +174,6 @@ func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.Sta
// PredicateMetadata interface represents anything that can access a predicate metadata. // PredicateMetadata interface represents anything that can access a predicate metadata.
type PredicateMetadata interface { type PredicateMetadata interface {
ShallowCopy() PredicateMetadata ShallowCopy() PredicateMetadata
AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error
RemovePod(deletedPod *v1.Pod) error RemovePod(deletedPod *v1.Pod) error
} }

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// EmptyPriorityMetadataProducer should returns a no-op PriorityMetadataProducer type. // EmptyPriorityMetadataProducer should returns a no-op PriorityMetadataProducer type.
@@ -29,9 +29,9 @@ func TestEmptyPriorityMetadataProducer(t *testing.T) {
fakePod := new(v1.Pod) fakePod := new(v1.Pod)
fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"}) fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"})
nodeNameToInfo := map[string]*schedulercache.NodeInfo{ nodeNameToInfo := map[string]*schedulernodeinfo.NodeInfo{
"2": schedulercache.NewNodeInfo(fakePod), "2": schedulernodeinfo.NewNodeInfo(fakePod),
"1": schedulercache.NewNodeInfo(), "1": schedulernodeinfo.NewNodeInfo(),
} }
// Test EmptyPriorityMetadataProducer // Test EmptyPriorityMetadataProducer
metadata := EmptyPriorityMetadataProducer(fakePod, nodeNameToInfo) metadata := EmptyPriorityMetadataProducer(fakePod, nodeNameToInfo)

View File

@@ -12,11 +12,11 @@ go_library(
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/core/equivalence:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/plugins/v1alpha1:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library", "//pkg/scheduler/volumebinder:go_default_library",
@@ -48,10 +48,10 @@ go_test(
"//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/core/equivalence:go_default_library", "//pkg/scheduler/core/equivalence:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/plugins/v1alpha1:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",

View File

@@ -9,8 +9,8 @@ go_library(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/hash:go_default_library", "//pkg/util/hash:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@@ -26,7 +26,7 @@ go_test(
deps = [ deps = [
"//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -30,8 +30,8 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
hashutil "k8s.io/kubernetes/pkg/util/hash" hashutil "k8s.io/kubernetes/pkg/util/hash"
) )
@@ -283,7 +283,7 @@ func (n *NodeCache) RunPredicate(
predicateID int, predicateID int,
pod *v1.Pod, pod *v1.Pod,
meta algorithm.PredicateMetadata, meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo, nodeInfo *schedulernodeinfo.NodeInfo,
equivClass *Class, equivClass *Class,
) (bool, []algorithm.PredicateFailureReason, error) { ) (bool, []algorithm.PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
@@ -310,7 +310,7 @@ func (n *NodeCache) updateResult(
fit bool, fit bool,
reasons []algorithm.PredicateFailureReason, reasons []algorithm.PredicateFailureReason,
equivalenceHash uint64, equivalenceHash uint64,
nodeInfo *schedulercache.NodeInfo, nodeInfo *schedulernodeinfo.NodeInfo,
) { ) {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen during tests. // This may happen during tests.

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// makeBasicPod returns a Pod object with many of the fields populated. // makeBasicPod returns a Pod object with many of the fields populated.
@@ -162,7 +162,7 @@ type mockPredicate struct {
callCount int callCount int
} }
func (p *mockPredicate) predicate(*v1.Pod, algorithm.PredicateMetadata, *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func (p *mockPredicate) predicate(*v1.Pod, algorithm.PredicateMetadata, *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
p.callCount++ p.callCount++
return p.fit, p.reasons, p.err return p.fit, p.reasons, p.err
} }
@@ -219,7 +219,7 @@ func TestRunPredicate(t *testing.T) {
predicateID := 0 predicateID := 0
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := schedulercache.NewNodeInfo() node := schedulernodeinfo.NewNodeInfo()
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}} testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}}
node.SetNode(testNode) node.SetNode(testNode)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1"}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1"}}
@@ -323,7 +323,7 @@ func TestUpdateResult(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := schedulercache.NewNodeInfo() node := schedulernodeinfo.NewNodeInfo()
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
node.SetNode(testNode) node.SetNode(testNode)
@@ -469,7 +469,7 @@ func TestLookupResult(t *testing.T) {
ecache := NewCache(predicatesOrdering) ecache := NewCache(predicatesOrdering)
nodeCache, _ := ecache.GetNodeCache(testNode.Name) nodeCache, _ := ecache.GetNodeCache(testNode.Name)
node := schedulercache.NewNodeInfo() node := schedulernodeinfo.NewNodeInfo()
node.SetNode(testNode) node.SetNode(testNode)
// set cached item to equivalence cache // set cached item to equivalence cache
nodeCache.updateResult( nodeCache.updateResult(
@@ -687,7 +687,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) {
ecache := NewCache(predicatesOrdering) ecache := NewCache(predicatesOrdering)
for _, test := range tests { for _, test := range tests {
node := schedulercache.NewNodeInfo() node := schedulernodeinfo.NewNodeInfo()
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
node.SetNode(testNode) node.SetNode(testNode)
@@ -765,7 +765,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) {
ecache := NewCache(predicatesOrdering) ecache := NewCache(predicatesOrdering)
for _, test := range tests { for _, test := range tests {
node := schedulercache.NewNodeInfo() node := schedulernodeinfo.NewNodeInfo()
testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}
node.SetNode(testNode) node.SetNode(testNode)

View File

@@ -30,7 +30,7 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
const ( const (
@@ -128,7 +128,7 @@ func (h *HTTPExtender) SupportsPreemption() bool {
func (h *HTTPExtender) ProcessPreemption( func (h *HTTPExtender) ProcessPreemption(
pod *v1.Pod, pod *v1.Pod,
nodeToVictims map[*v1.Node]*schedulerapi.Victims, nodeToVictims map[*v1.Node]*schedulerapi.Victims,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (map[*v1.Node]*schedulerapi.Victims, error) { ) (map[*v1.Node]*schedulerapi.Victims, error) {
var ( var (
result schedulerapi.ExtenderPreemptionResult result schedulerapi.ExtenderPreemptionResult
@@ -172,7 +172,7 @@ func (h *HTTPExtender) ProcessPreemption(
// such as UIDs and names, to object pointers. // such as UIDs and names, to object pointers.
func (h *HTTPExtender) convertToNodeToVictims( func (h *HTTPExtender) convertToNodeToVictims(
nodeNameToMetaVictims map[string]*schedulerapi.MetaVictims, nodeNameToMetaVictims map[string]*schedulerapi.MetaVictims,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (map[*v1.Node]*schedulerapi.Victims, error) { ) (map[*v1.Node]*schedulerapi.Victims, error) {
nodeToVictims := map[*v1.Node]*schedulerapi.Victims{} nodeToVictims := map[*v1.Node]*schedulerapi.Victims{}
for nodeName, metaVictims := range nodeNameToMetaVictims { for nodeName, metaVictims := range nodeNameToMetaVictims {
@@ -198,8 +198,8 @@ func (h *HTTPExtender) convertToNodeToVictims(
func (h *HTTPExtender) convertPodUIDToPod( func (h *HTTPExtender) convertPodUIDToPod(
metaPod *schedulerapi.MetaPod, metaPod *schedulerapi.MetaPod,
nodeName string, nodeName string,
nodeNameToInfo map[string]*schedulercache.NodeInfo) (*v1.Pod, error) { nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) (*v1.Pod, error) {
var nodeInfo *schedulercache.NodeInfo var nodeInfo *schedulernodeinfo.NodeInfo
if nodeInfo, ok := nodeNameToInfo[nodeName]; ok { if nodeInfo, ok := nodeNameToInfo[nodeName]; ok {
for _, pod := range nodeInfo.Pods() { for _, pod := range nodeInfo.Pods() {
if string(pod.UID) == metaPod.UID { if string(pod.UID) == metaPod.UID {
@@ -250,7 +250,7 @@ func convertToNodeNameToVictims(
// the list of failed nodes and failure reasons. // the list of failed nodes and failure reasons.
func (h *HTTPExtender) Filter( func (h *HTTPExtender) Filter(
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { ) ([]*v1.Node, schedulerapi.FailedNodesMap, error) {
var ( var (
result schedulerapi.ExtenderFilterResult result schedulerapi.ExtenderFilterResult

View File

@@ -26,9 +26,9 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
) )
@@ -95,7 +95,7 @@ func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.H
return &result, nil return &result, nil
} }
func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := []schedulerapi.HostPriority{} result := []schedulerapi.HostPriority{}
for _, node := range nodes { for _, node := range nodes {
score := 1 score := 1
@@ -117,7 +117,7 @@ type FakeExtender struct {
ignorable bool ignorable bool
// Cached node information for fake extender // Cached node information for fake extender
cachedNodeNameToInfo map[string]*schedulercache.NodeInfo cachedNodeNameToInfo map[string]*schedulernodeinfo.NodeInfo
} }
func (f *FakeExtender) Name() string { func (f *FakeExtender) Name() string {
@@ -136,7 +136,7 @@ func (f *FakeExtender) SupportsPreemption() bool {
func (f *FakeExtender) ProcessPreemption( func (f *FakeExtender) ProcessPreemption(
pod *v1.Pod, pod *v1.Pod,
nodeToVictims map[*v1.Node]*schedulerapi.Victims, nodeToVictims map[*v1.Node]*schedulerapi.Victims,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (map[*v1.Node]*schedulerapi.Victims, error) { ) (map[*v1.Node]*schedulerapi.Victims, error) {
nodeToVictimsCopy := map[*v1.Node]*schedulerapi.Victims{} nodeToVictimsCopy := map[*v1.Node]*schedulerapi.Victims{}
// We don't want to change the original nodeToVictims // We don't want to change the original nodeToVictims
@@ -175,7 +175,7 @@ func (f *FakeExtender) ProcessPreemption(
func (f *FakeExtender) selectVictimsOnNodeByExtender( func (f *FakeExtender) selectVictimsOnNodeByExtender(
pod *v1.Pod, pod *v1.Pod,
node *v1.Node, node *v1.Node,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) ([]*v1.Pod, int, bool, error) { ) ([]*v1.Pod, int, bool, error) {
// If a extender support preemption but have no cached node info, let's run filter to make sure // If a extender support preemption but have no cached node info, let's run filter to make sure
// default scheduler's decision still stand with given pod and node. // default scheduler's decision still stand with given pod and node.
@@ -264,7 +264,7 @@ func (f *FakeExtender) runPredicate(pod *v1.Pod, node *v1.Node) (bool, error) {
return fits, nil return fits, nil
} }
func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) ([]*v1.Node, schedulerapi.FailedNodesMap, error) {
filtered := []*v1.Node{} filtered := []*v1.Node{}
failedNodesMap := schedulerapi.FailedNodesMap{} failedNodesMap := schedulerapi.FailedNodesMap{}
for _, node := range nodes { for _, node := range nodes {

View File

@@ -39,11 +39,11 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/core/equivalence" "k8s.io/kubernetes/pkg/scheduler/core/equivalence"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
pluginsv1alpha1 "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" pluginsv1alpha1 "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
@@ -108,7 +108,7 @@ type genericScheduler struct {
extenders []algorithm.SchedulerExtender extenders []algorithm.SchedulerExtender
lastNodeIndex uint64 lastNodeIndex uint64
alwaysCheckAllPredicates bool alwaysCheckAllPredicates bool
cachedNodeInfoMap map[string]*schedulercache.NodeInfo cachedNodeInfoMap map[string]*schedulernodeinfo.NodeInfo
volumeBinder *volumebinder.VolumeBinder volumeBinder *volumebinder.VolumeBinder
pvcLister corelisters.PersistentVolumeClaimLister pvcLister corelisters.PersistentVolumeClaimLister
pdbLister algorithm.PDBLister pdbLister algorithm.PDBLister
@@ -497,8 +497,8 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether // to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
// any pod was found, 2) augmented meta data, 3) augmented nodeInfo. // any pod was found, 2) augmented meta data, 3) augmented nodeInfo.
func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata, func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, algorithm.PredicateMetadata,
*schedulercache.NodeInfo) { *schedulernodeinfo.NodeInfo) {
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests. // This may happen only in tests.
return false, meta, nodeInfo return false, meta, nodeInfo
@@ -536,7 +536,7 @@ func addNominatedPods(pod *v1.Pod, meta algorithm.PredicateMetadata,
func podFitsOnNode( func podFitsOnNode(
pod *v1.Pod, pod *v1.Pod,
meta algorithm.PredicateMetadata, meta algorithm.PredicateMetadata,
info *schedulercache.NodeInfo, info *schedulernodeinfo.NodeInfo,
predicateFuncs map[string]algorithm.FitPredicate, predicateFuncs map[string]algorithm.FitPredicate,
nodeCache *equivalence.NodeCache, nodeCache *equivalence.NodeCache,
queue internalqueue.SchedulingQueue, queue internalqueue.SchedulingQueue,
@@ -622,7 +622,7 @@ func podFitsOnNode(
// All scores are finally combined (added) to get the total weighted scores of all nodes // All scores are finally combined (added) to get the total weighted scores of all nodes
func PrioritizeNodes( func PrioritizeNodes(
pod *v1.Pod, pod *v1.Pod,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
meta interface{}, meta interface{},
priorityConfigs []algorithm.PriorityConfig, priorityConfigs []algorithm.PriorityConfig,
nodes []*v1.Node, nodes []*v1.Node,
@@ -763,7 +763,7 @@ func PrioritizeNodes(
} }
// EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes // EqualPriorityMap is a prioritizer function that gives an equal weight of one to all nodes
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return schedulerapi.HostPriority{}, fmt.Errorf("node not found") return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
@@ -892,7 +892,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims)
// selectNodesForPreemption finds all the nodes with possible victims for // selectNodesForPreemption finds all the nodes with possible victims for
// preemption in parallel. // preemption in parallel.
func selectNodesForPreemption(pod *v1.Pod, func selectNodesForPreemption(pod *v1.Pod,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
potentialNodes []*v1.Node, potentialNodes []*v1.Node,
predicates map[string]algorithm.FitPredicate, predicates map[string]algorithm.FitPredicate,
metadataProducer algorithm.PredicateMetadataProducer, metadataProducer algorithm.PredicateMetadataProducer,
@@ -982,7 +982,7 @@ func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruption
func selectVictimsOnNode( func selectVictimsOnNode(
pod *v1.Pod, pod *v1.Pod,
meta algorithm.PredicateMetadata, meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo, nodeInfo *schedulernodeinfo.NodeInfo,
fitPredicates map[string]algorithm.FitPredicate, fitPredicates map[string]algorithm.FitPredicate,
queue internalqueue.SchedulingQueue, queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
@@ -1106,7 +1106,7 @@ func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedP
// considered for preemption. // considered for preemption.
// We look at the node that is nominated for this pod and as long as there are // We look at the node that is nominated for this pod and as long as there are
// terminating pods on the node, we don't consider this for preempting more pods. // terminating pods on the node, we don't consider this for preempting more pods.
func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) bool { func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) bool {
nomNodeName := pod.Status.NominatedNodeName nomNodeName := pod.Status.NominatedNodeName
if len(nomNodeName) > 0 { if len(nomNodeName) > 0 {
if nodeInfo, found := nodeNameToInfo[nomNodeName]; found { if nodeInfo, found := nodeNameToInfo[nomNodeName]; found {
@@ -1175,7 +1175,7 @@ func NewGenericScheduler(
priorityMetaProducer: priorityMetaProducer, priorityMetaProducer: priorityMetaProducer,
pluginSet: pluginSet, pluginSet: pluginSet,
extenders: extenders, extenders: extenders,
cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), cachedNodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
volumeBinder: volumeBinder, volumeBinder: volumeBinder,
pvcLister: pvcLister, pvcLister: pvcLister,
pdbLister: pdbLister, pdbLister: pdbLister,

View File

@@ -39,10 +39,10 @@ import (
algorithmpriorities "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities" algorithmpriorities "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/core/equivalence" "k8s.io/kubernetes/pkg/scheduler/core/equivalence"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1" plugins "k8s.io/kubernetes/pkg/scheduler/plugins/v1alpha1"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@@ -52,15 +52,15 @@ var (
order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred} order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred}
) )
func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
} }
func truePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func truePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return false, nil, fmt.Errorf("node not found") return false, nil, fmt.Errorf("node not found")
@@ -71,14 +71,14 @@ func matchesPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
} }
func hasNoPodsPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func hasNoPodsPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
if len(nodeInfo.Pods()) == 0 { if len(nodeInfo.Pods()) == 0 {
return true, nil, nil return true, nil, nil
} }
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
} }
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
result := []schedulerapi.HostPriority{} result := []schedulerapi.HostPriority{}
for _, node := range nodes { for _, node := range nodes {
score, err := strconv.Atoi(node.Name) score, err := strconv.Atoi(node.Name)
@@ -93,7 +93,7 @@ func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.Node
return result, nil return result, nil
} }
func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
var maxScore float64 var maxScore float64
minScore := math.MaxFloat64 minScore := math.MaxFloat64
reverseResult := []schedulerapi.HostPriority{} reverseResult := []schedulerapi.HostPriority{}
@@ -116,18 +116,18 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercac
return reverseResult, nil return reverseResult, nil
} }
func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
return schedulerapi.HostPriority{ return schedulerapi.HostPriority{
Host: nodeInfo.Node().Name, Host: nodeInfo.Node().Name,
Score: 1, Score: 1,
}, nil }, nil
} }
func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (schedulerapi.HostPriority, error) {
return schedulerapi.HostPriority{}, errPrioritize return schedulerapi.HostPriority{}, errPrioritize
} }
func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, result schedulerapi.HostPriorityList) error {
for _, host := range result { for _, host := range result {
if host.Host == "" { if host.Host == "" {
return fmt.Errorf("unexpected empty host name") return fmt.Errorf("unexpected empty host name")
@@ -729,7 +729,7 @@ func TestZeroRequest(t *testing.T) {
pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1} pc := algorithm.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1}
priorityConfigs = append(priorityConfigs, pc) priorityConfigs = append(priorityConfigs, pc)
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
metaDataProducer := algorithmpriorities.NewPriorityMetadataFactory( metaDataProducer := algorithmpriorities.NewPriorityMetadataFactory(
schedulertesting.FakeServiceLister([]*v1.Service{}), schedulertesting.FakeServiceLister([]*v1.Service{}),
@@ -800,7 +800,7 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
return &node, nil return &node, nil
} }
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo) algorithm.PredicateMetadata { func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo) return algorithmpredicates.NewPredicateMetadataFactory(schedulertesting.FakePodLister{p})(p, nodeInfo)
} }
@@ -984,7 +984,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
if test.addAffinityPredicate { if test.addAffinityPredicate {
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods)) test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
} }
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
// newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo // newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo
// doesn't have it yet. // doesn't have it yet.
newnode := makeNode("newnode", 1000*5, priorityutil.DefaultMemoryRequest*5) newnode := makeNode("newnode", 1000*5, priorityutil.DefaultMemoryRequest*5)
@@ -1149,7 +1149,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
for _, n := range test.nodes { for _, n := range test.nodes {
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5)) nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
} }
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil) candidateNodes, _ := selectNodesForPreemption(test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
node := pickOneNodeForPreemption(candidateNodes) node := pickOneNodeForPreemption(candidateNodes)
found := false found := false
@@ -1417,13 +1417,13 @@ func TestPreempt(t *testing.T) {
for _, pod := range test.pods { for _, pod := range test.pods {
cache.AddPod(pod) cache.AddPod(pod)
} }
cachedNodeInfoMap := map[string]*schedulercache.NodeInfo{} cachedNodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{}
for _, name := range nodeNames { for _, name := range nodeNames {
node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5) node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5)
cache.AddNode(node) cache.AddNode(node)
// Set nodeInfo to extenders to mock extenders' cache for preemption. // Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulercache.NewNodeInfo() cachedNodeInfo := schedulernodeinfo.NewNodeInfo()
cachedNodeInfo.SetNode(node) cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[name] = cachedNodeInfo cachedNodeInfoMap[name] = cachedNodeInfo
} }
@@ -1502,7 +1502,7 @@ type syncingMockCache struct {
// //
// Since UpdateNodeNameToInfoMap is one of the first steps of (*genericScheduler).Schedule, we use // Since UpdateNodeNameToInfoMap is one of the first steps of (*genericScheduler).Schedule, we use
// this point to signal to the test that a scheduling cycle has started. // this point to signal to the test that a scheduling cycle has started.
func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error { func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error {
err := c.Cache.UpdateNodeNameToInfoMap(infoMap) err := c.Cache.UpdateNodeNameToInfoMap(infoMap)
c.once.Do(func() { c.once.Do(func() {
c.cycleStart <- struct{}{} c.cycleStart <- struct{}{}
@@ -1513,14 +1513,14 @@ func (c *syncingMockCache) UpdateNodeNameToInfoMap(infoMap map[string]*scheduler
// TestCacheInvalidationRace tests that equivalence cache invalidation is correctly // TestCacheInvalidationRace tests that equivalence cache invalidation is correctly
// handled when an invalidation event happens early in a scheduling cycle. Specifically, the event // handled when an invalidation event happens early in a scheduling cycle. Specifically, the event
// occurs after schedulercache is snapshotted and before equivalence cache lock is acquired. // occurs after schedulernodeinfo is snapshotted and before equivalence cache lock is acquired.
func TestCacheInvalidationRace(t *testing.T) { func TestCacheInvalidationRace(t *testing.T) {
// Create a predicate that returns false the first time and true on subsequent calls. // Create a predicate that returns false the first time and true on subsequent calls.
podWillFit := false podWillFit := false
var callCount int var callCount int
testPredicate := func(pod *v1.Pod, testPredicate := func(pod *v1.Pod,
meta algorithm.PredicateMetadata, meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
callCount++ callCount++
if !podWillFit { if !podWillFit {
podWillFit = true podWillFit = true
@@ -1605,7 +1605,7 @@ func TestCacheInvalidationRace2(t *testing.T) {
) )
testPredicate := func(pod *v1.Pod, testPredicate := func(pod *v1.Pod,
meta algorithm.PredicateMetadata, meta algorithm.PredicateMetadata,
nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
callCount++ callCount++
once.Do(func() { once.Do(func() {
cycleStart <- struct{}{} cycleStart <- struct{}{}

View File

@@ -69,9 +69,9 @@ go_test(
"//pkg/scheduler/algorithm/priorities:go_default_library", "//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/api/latest:go_default_library", "//pkg/scheduler/api/latest:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/internal/cache/fake:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@@ -38,9 +38,9 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest" latestschedulerapi "k8s.io/kubernetes/pkg/scheduler/api/latest"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
) )
@@ -230,19 +230,19 @@ func TestCreateFromConfigWithEmptyPredicatesOrPriorities(t *testing.T) {
} }
} }
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
return []schedulerapi.HostPriority{}, nil return []schedulerapi.HostPriority{}, nil
} }
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
return []schedulerapi.HostPriority{}, nil return []schedulerapi.HostPriority{}, nil
} }
@@ -557,7 +557,7 @@ func (f *fakeExtender) IsIgnorable() bool {
func (f *fakeExtender) ProcessPreemption( func (f *fakeExtender) ProcessPreemption(
pod *v1.Pod, pod *v1.Pod,
nodeToVictims map[*v1.Node]*schedulerapi.Victims, nodeToVictims map[*v1.Node]*schedulerapi.Victims,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (map[*v1.Node]*schedulerapi.Victims, error) { ) (map[*v1.Node]*schedulerapi.Victims, error) {
return nil, nil return nil, nil
} }
@@ -569,7 +569,7 @@ func (f *fakeExtender) SupportsPreemption() bool {
func (f *fakeExtender) Filter( func (f *fakeExtender) Filter(
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodes []*v1.Node,
nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) { ) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) {
return nil, nil, nil return nil, nil, nil
} }

View File

@@ -11,7 +11,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@@ -33,7 +33,7 @@ go_test(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/klog" "k8s.io/klog"
) )
@@ -58,7 +58,7 @@ type schedulerCache struct {
assumedPods map[string]bool assumedPods map[string]bool
// a map from pod key to podState. // a map from pod key to podState.
podStates map[string]*podState podStates map[string]*podState
nodes map[string]*schedulercache.NodeInfo nodes map[string]*schedulernodeinfo.NodeInfo
nodeTree *NodeTree nodeTree *NodeTree
// A map from image name to its imageState. // A map from image name to its imageState.
imageStates map[string]*imageState imageStates map[string]*imageState
@@ -80,8 +80,8 @@ type imageState struct {
} }
// createImageStateSummary returns a summarizing snapshot of the given image's state. // createImageStateSummary returns a summarizing snapshot of the given image's state.
func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulercache.ImageStateSummary { func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulernodeinfo.ImageStateSummary {
return &schedulercache.ImageStateSummary{ return &schedulernodeinfo.ImageStateSummary{
Size: state.size, Size: state.size,
NumNodes: len(state.nodes), NumNodes: len(state.nodes),
} }
@@ -93,7 +93,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul
period: period, period: period,
stop: stop, stop: stop,
nodes: make(map[string]*schedulercache.NodeInfo), nodes: make(map[string]*schedulernodeinfo.NodeInfo),
nodeTree: newNodeTree(nil), nodeTree: newNodeTree(nil),
assumedPods: make(map[string]bool), assumedPods: make(map[string]bool),
podStates: make(map[string]*podState), podStates: make(map[string]*podState),
@@ -107,7 +107,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
cache.mu.RLock() cache.mu.RLock()
defer cache.mu.RUnlock() defer cache.mu.RUnlock()
nodes := make(map[string]*schedulercache.NodeInfo) nodes := make(map[string]*schedulernodeinfo.NodeInfo)
for k, v := range cache.nodes { for k, v := range cache.nodes {
nodes[k] = v.Clone() nodes[k] = v.Clone()
} }
@@ -123,7 +123,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
} }
} }
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulercache.NodeInfo) error { func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) error {
cache.mu.Lock() cache.mu.Lock()
defer cache.mu.Unlock() defer cache.mu.Unlock()
@@ -171,7 +171,7 @@ func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.S
} }
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@@ -197,7 +197,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error {
// finishBinding exists to make tests determinitistic by injecting now as an argument // finishBinding exists to make tests determinitistic by injecting now as an argument
func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@@ -216,7 +216,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
} }
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@@ -248,7 +248,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
func (cache *schedulerCache) addPod(pod *v1.Pod) { func (cache *schedulerCache) addPod(pod *v1.Pod) {
n, ok := cache.nodes[pod.Spec.NodeName] n, ok := cache.nodes[pod.Spec.NodeName]
if !ok { if !ok {
n = schedulercache.NewNodeInfo() n = schedulernodeinfo.NewNodeInfo()
cache.nodes[pod.Spec.NodeName] = n cache.nodes[pod.Spec.NodeName] = n
} }
n.AddPod(pod) n.AddPod(pod)
@@ -276,7 +276,7 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error {
} }
func (cache *schedulerCache) AddPod(pod *v1.Pod) error { func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@@ -311,7 +311,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
} }
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
key, err := schedulercache.GetPodKey(oldPod) key, err := schedulernodeinfo.GetPodKey(oldPod)
if err != nil { if err != nil {
return err return err
} }
@@ -339,7 +339,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
} }
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@@ -368,7 +368,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
} }
func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -384,7 +384,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
} }
func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
key, err := schedulercache.GetPodKey(pod) key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -406,7 +406,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error {
n, ok := cache.nodes[node.Name] n, ok := cache.nodes[node.Name]
if !ok { if !ok {
n = schedulercache.NewNodeInfo() n = schedulernodeinfo.NewNodeInfo()
cache.nodes[node.Name] = n cache.nodes[node.Name] = n
} else { } else {
cache.removeNodeImageStates(n.Node()) cache.removeNodeImageStates(n.Node())
@@ -423,7 +423,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
n, ok := cache.nodes[newNode.Name] n, ok := cache.nodes[newNode.Name]
if !ok { if !ok {
n = schedulercache.NewNodeInfo() n = schedulernodeinfo.NewNodeInfo()
cache.nodes[newNode.Name] = n cache.nodes[newNode.Name] = n
} else { } else {
cache.removeNodeImageStates(n.Node()) cache.removeNodeImageStates(n.Node())
@@ -457,8 +457,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in // addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
// scheduler cache. This function assumes the lock to scheduler cache has been acquired. // scheduler cache. This function assumes the lock to scheduler cache has been acquired.
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulercache.NodeInfo) { func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulernodeinfo.NodeInfo) {
newSum := make(map[string]*schedulercache.ImageStateSummary) newSum := make(map[string]*schedulernodeinfo.ImageStateSummary)
for _, image := range node.Status.Images { for _, image := range node.Status.Images {
for _, name := range image.Names { for _, name := range image.Names {

View File

@@ -32,10 +32,10 @@ import (
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulercache.NodeInfo) { func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulernodeinfo.NodeInfo) {
// Ignore generation field. // Ignore generation field.
if actual != nil { if actual != nil {
actual.SetGeneration(0) actual.SetGeneration(0)
@@ -66,21 +66,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
return b return b
} }
func (b *hostPortInfoBuilder) build() schedulercache.HostPortInfo { func (b *hostPortInfoBuilder) build() schedulernodeinfo.HostPortInfo {
res := make(schedulercache.HostPortInfo) res := make(schedulernodeinfo.HostPortInfo)
for _, param := range b.inputs { for _, param := range b.inputs {
res.Add(param.ip, param.protocol, param.port) res.Add(param.ip, param.protocol, param.port)
} }
return res return res
} }
func newNodeInfo(requestedResource *schedulercache.Resource, func newNodeInfo(requestedResource *schedulernodeinfo.Resource,
nonzeroRequest *schedulercache.Resource, nonzeroRequest *schedulernodeinfo.Resource,
pods []*v1.Pod, pods []*v1.Pod,
usedPorts schedulercache.HostPortInfo, usedPorts schedulernodeinfo.HostPortInfo,
imageStates map[string]*schedulercache.ImageStateSummary, imageStates map[string]*schedulernodeinfo.ImageStateSummary,
) *schedulercache.NodeInfo { ) *schedulernodeinfo.NodeInfo {
nodeInfo := schedulercache.NewNodeInfo(pods...) nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
nodeInfo.SetRequestedResource(requestedResource) nodeInfo.SetRequestedResource(requestedResource)
nodeInfo.SetNonZeroRequest(nonzeroRequest) nodeInfo.SetNonZeroRequest(nonzeroRequest)
nodeInfo.SetUsedPorts(usedPorts) nodeInfo.SetUsedPorts(usedPorts)
@@ -108,98 +108,98 @@ func TestAssumePodScheduled(t *testing.T) {
tests := []struct { tests := []struct {
pods []*v1.Pod pods []*v1.Pod
wNodeInfo *schedulercache.NodeInfo wNodeInfo *schedulernodeinfo.NodeInfo
}{{ }{{
pods: []*v1.Pod{testPods[0]}, pods: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[1], testPods[2]}, pods: []*v1.Pod{testPods[1], testPods[2]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
}, },
[]*v1.Pod{testPods[1], testPods[2]}, []*v1.Pod{testPods[1], testPods[2]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, { // test non-zero request }, { // test non-zero request
pods: []*v1.Pod{testPods[3]}, pods: []*v1.Pod{testPods[3]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 0, MilliCPU: 0,
Memory: 0, Memory: 0,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: priorityutil.DefaultMilliCPURequest, MilliCPU: priorityutil.DefaultMilliCPURequest,
Memory: priorityutil.DefaultMemoryRequest, Memory: priorityutil.DefaultMemoryRequest,
}, },
[]*v1.Pod{testPods[3]}, []*v1.Pod{testPods[3]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[4]}, pods: []*v1.Pod{testPods[4]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3}, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[4]}, []*v1.Pod{testPods[4]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[4], testPods[5]}, pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8}, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
}, },
[]*v1.Pod{testPods[4], testPods[5]}, []*v1.Pod{testPods[4], testPods[5]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[6]}, pods: []*v1.Pod{testPods[6]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[6]}, []*v1.Pod{testPods[6]},
newHostPortInfoBuilder().build(), newHostPortInfoBuilder().build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, },
} }
@@ -253,7 +253,7 @@ func TestExpirePod(t *testing.T) {
pods []*testExpirePodStruct pods []*testExpirePodStruct
cleanupTime time.Time cleanupTime time.Time
wNodeInfo *schedulercache.NodeInfo wNodeInfo *schedulernodeinfo.NodeInfo
}{{ // assumed pod would expires }{{ // assumed pod would expires
pods: []*testExpirePodStruct{ pods: []*testExpirePodStruct{
{pod: testPods[0], assumedTime: now}, {pod: testPods[0], assumedTime: now},
@@ -267,17 +267,17 @@ func TestExpirePod(t *testing.T) {
}, },
cleanupTime: now.Add(2 * ttl), cleanupTime: now.Add(2 * ttl),
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
[]*v1.Pod{testPods[1]}, []*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}} }}
@@ -313,22 +313,22 @@ func TestAddPodWillConfirm(t *testing.T) {
podsToAssume []*v1.Pod podsToAssume []*v1.Pod
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
wNodeInfo *schedulercache.NodeInfo wNodeInfo *schedulernodeinfo.NodeInfo
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed. }{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
podsToAssume: []*v1.Pod{testPods[0], testPods[1]}, podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
podsToAdd: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}} }}
@@ -405,25 +405,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
podsToUpdate [][]*v1.Pod podsToUpdate [][]*v1.Pod
wNodeInfo map[string]*schedulercache.NodeInfo wNodeInfo map[string]*schedulernodeinfo.NodeInfo
}{{ }{{
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()}, podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
podsToAdd: []*v1.Pod{addedPod.DeepCopy()}, podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}}, podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
wNodeInfo: map[string]*schedulercache.NodeInfo{ wNodeInfo: map[string]*schedulernodeinfo.NodeInfo{
"assumed-node": nil, "assumed-node": nil,
"actual-node": newNodeInfo( "actual-node": newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{updatedPod.DeepCopy()}, []*v1.Pod{updatedPod.DeepCopy()},
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(), newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, },
}} }}
@@ -463,21 +463,21 @@ func TestAddPodAfterExpiration(t *testing.T) {
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
wNodeInfo *schedulercache.NodeInfo wNodeInfo *schedulernodeinfo.NodeInfo
}{{ }{{
pod: basePod, pod: basePod,
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{basePod}, []*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}} }}
@@ -516,34 +516,34 @@ func TestUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod podsToUpdate []*v1.Pod
wNodeInfo []*schedulercache.NodeInfo wNodeInfo []*schedulernodeinfo.NodeInfo
}{{ // add a pod and then update it twice }{{ // add a pod and then update it twice
podsToAdd: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo( wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
[]*v1.Pod{testPods[1]}, []*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), newNodeInfo( ), newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
)}, )},
}} }}
@@ -643,35 +643,35 @@ func TestExpireAddUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod podsToUpdate []*v1.Pod
wNodeInfo []*schedulercache.NodeInfo wNodeInfo []*schedulernodeinfo.NodeInfo
}{{ // Pod is assumed, expired, and added. Then it would be updated twice. }{{ // Pod is assumed, expired, and added. Then it would be updated twice.
podsToAssume: []*v1.Pod{testPods[0]}, podsToAssume: []*v1.Pod{testPods[0]},
podsToAdd: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo( wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
[]*v1.Pod{testPods[1]}, []*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), newNodeInfo( ), newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
)}, )},
}} }}
@@ -733,21 +733,21 @@ func TestEphemeralStorageResource(t *testing.T) {
podE := makePodWithEphemeralStorage(nodeName, "500") podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
wNodeInfo *schedulercache.NodeInfo wNodeInfo *schedulernodeinfo.NodeInfo
}{ }{
{ {
pod: podE, pod: podE,
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
EphemeralStorage: 500, EphemeralStorage: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: priorityutil.DefaultMilliCPURequest, MilliCPU: priorityutil.DefaultMilliCPURequest,
Memory: priorityutil.DefaultMemoryRequest, Memory: priorityutil.DefaultMemoryRequest,
}, },
[]*v1.Pod{podE}, []*v1.Pod{podE},
schedulercache.HostPortInfo{}, schedulernodeinfo.HostPortInfo{},
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}, },
} }
@@ -778,21 +778,21 @@ func TestRemovePod(t *testing.T) {
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}) basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
wNodeInfo *schedulercache.NodeInfo wNodeInfo *schedulernodeinfo.NodeInfo
}{{ }{{
pod: basePod, pod: basePod,
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulercache.Resource{ &schedulernodeinfo.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{basePod}, []*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary), make(map[string]*schedulernodeinfo.ImageStateSummary),
), ),
}} }}
@@ -872,7 +872,7 @@ func TestForgetPod(t *testing.T) {
// getResourceRequest returns the resource request of all containers in Pods; // getResourceRequest returns the resource request of all containers in Pods;
// excuding initContainers. // excuding initContainers.
func getResourceRequest(pod *v1.Pod) v1.ResourceList { func getResourceRequest(pod *v1.Pod) v1.ResourceList {
result := &schedulercache.Resource{} result := &schedulernodeinfo.Resource{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests) result.Add(container.Resources.Requests)
} }
@@ -881,13 +881,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
} }
// buildNodeInfo creates a NodeInfo by simulating node operations in cache. // buildNodeInfo creates a NodeInfo by simulating node operations in cache.
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulercache.NodeInfo { func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulernodeinfo.NodeInfo {
expected := schedulercache.NewNodeInfo() expected := schedulernodeinfo.NewNodeInfo()
// Simulate SetNode. // Simulate SetNode.
expected.SetNode(node) expected.SetNode(node)
expected.SetAllocatableResource(schedulercache.NewResource(node.Status.Allocatable)) expected.SetAllocatableResource(schedulernodeinfo.NewResource(node.Status.Allocatable))
expected.SetTaints(node.Spec.Taints) expected.SetTaints(node.Spec.Taints)
expected.SetGeneration(expected.GetGeneration() + 1) expected.SetGeneration(expected.GetGeneration() + 1)
@@ -1068,7 +1068,7 @@ func TestNodeOperators(t *testing.T) {
} }
// Case 2: dump cached nodes successfully. // Case 2: dump cached nodes successfully.
cachedNodes := map[string]*schedulercache.NodeInfo{} cachedNodes := map[string]*schedulernodeinfo.NodeInfo{}
cache.UpdateNodeNameToInfoMap(cachedNodes) cache.UpdateNodeNameToInfoMap(cachedNodes)
newNode, found := cachedNodes[node.Name] newNode, found := cachedNodes[node.Name]
if !found || len(cachedNodes) != 1 { if !found || len(cachedNodes) != 1 {
@@ -1089,7 +1089,7 @@ func TestNodeOperators(t *testing.T) {
cache.UpdateNode(nil, node) cache.UpdateNode(nil, node)
got, found = cache.nodes[node.Name] got, found = cache.nodes[node.Name]
if !found { if !found {
t.Errorf("Failed to find node %v in schedulercache after UpdateNode.", node.Name) t.Errorf("Failed to find node %v in schedulernodeinfo after UpdateNode.", node.Name)
} }
if got.GetGeneration() <= expected.GetGeneration() { if got.GetGeneration() <= expected.GetGeneration() {
t.Errorf("Generation is not incremented. got: %v, expected: %v", got.GetGeneration(), expected.GetGeneration()) t.Errorf("Generation is not incremented. got: %v, expected: %v", got.GetGeneration(), expected.GetGeneration())
@@ -1097,7 +1097,7 @@ func TestNodeOperators(t *testing.T) {
expected.SetGeneration(got.GetGeneration()) expected.SetGeneration(got.GetGeneration())
if !reflect.DeepEqual(got, expected) { if !reflect.DeepEqual(got, expected) {
t.Errorf("Failed to update node in schedulercache:\n got: %+v \nexpected: %+v", got, expected) t.Errorf("Failed to update node in schedulernodeinfo:\n got: %+v \nexpected: %+v", got, expected)
} }
// Check nodeTree after update // Check nodeTree after update
if cache.nodeTree.NumNodes() != 1 || cache.nodeTree.Next() != node.Name { if cache.nodeTree.NumNodes() != 1 || cache.nodeTree.Next() != node.Name {
@@ -1131,7 +1131,7 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
cache := setupCacheOf1kNodes30kPods(b) cache := setupCacheOf1kNodes30kPods(b)
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
cachedNodes := map[string]*schedulercache.NodeInfo{} cachedNodes := map[string]*schedulernodeinfo.NodeInfo{}
cache.UpdateNodeNameToInfoMap(cachedNodes) cache.UpdateNodeNameToInfoMap(cachedNodes)
} }
} }

View File

@@ -10,9 +10,9 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger", importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger",
visibility = ["//pkg/scheduler:__subpackages__"], visibility = ["//pkg/scheduler:__subpackages__"],
deps = [ deps = [
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
@@ -25,7 +25,7 @@ go_test(
srcs = ["comparer_test.go"], srcs = ["comparer_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
], ],

View File

@@ -24,9 +24,9 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog" "k8s.io/klog"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// CacheComparer is an implementation of the Scheduler's cache comparer. // CacheComparer is an implementation of the Scheduler's cache comparer.
@@ -68,7 +68,7 @@ func (c *CacheComparer) Compare() error {
} }
// CompareNodes compares actual nodes with cached nodes. // CompareNodes compares actual nodes with cached nodes.
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) {
actual := []string{} actual := []string{}
for _, node := range nodes { for _, node := range nodes {
actual = append(actual, node.Name) actual = append(actual, node.Name)
@@ -83,7 +83,7 @@ func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sch
} }
// ComparePods compares actual pods with cached pods. // ComparePods compares actual pods with cached pods.
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) { func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) {
actual := []string{} actual := []string{}
for _, pod := range pods { for _, pod := range pods {
actual = append(actual, string(pod.UID)) actual = append(actual, string(pod.UID))

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
func TestCompareNodes(t *testing.T) { func TestCompareNodes(t *testing.T) {
@@ -72,9 +72,9 @@ func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T)
nodes = append(nodes, node) nodes = append(nodes, node)
} }
nodeInfo := make(map[string]*schedulercache.NodeInfo) nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo)
for _, nodeName := range cached { for _, nodeName := range cached {
nodeInfo[nodeName] = &schedulercache.NodeInfo{} nodeInfo[nodeName] = &schedulernodeinfo.NodeInfo{}
} }
m, r := compare.CompareNodes(nodes, nodeInfo) m, r := compare.CompareNodes(nodes, nodeInfo)
@@ -170,14 +170,14 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes
queuedPods = append(queuedPods, pod) queuedPods = append(queuedPods, pod)
} }
nodeInfo := make(map[string]*schedulercache.NodeInfo) nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo)
for _, uid := range cached { for _, uid := range cached {
pod := &v1.Pod{} pod := &v1.Pod{}
pod.UID = types.UID(uid) pod.UID = types.UID(uid)
pod.Namespace = "ns" pod.Namespace = "ns"
pod.Name = uid pod.Name = uid
nodeInfo[uid] = schedulercache.NewNodeInfo(pod) nodeInfo[uid] = schedulernodeinfo.NewNodeInfo(pod)
} }
m, r := compare.ComparePods(pods, queuedPods, nodeInfo) m, r := compare.ComparePods(pods, queuedPods, nodeInfo)

View File

@@ -23,9 +23,9 @@ import (
"k8s.io/klog" "k8s.io/klog"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/cache"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// CacheDumper writes some information from the scheduler cache and the scheduling queue to the // CacheDumper writes some information from the scheduler cache and the scheduling queue to the
@@ -61,7 +61,7 @@ func (d *CacheDumper) dumpSchedulingQueue() {
} }
// printNodeInfo writes parts of NodeInfo to a string. // printNodeInfo writes parts of NodeInfo to a string.
func printNodeInfo(n *cache.NodeInfo) string { func printNodeInfo(n *schedulernodeinfo.NodeInfo) string {
var nodeData strings.Builder var nodeData strings.Builder
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nNumber of Pods: %v\nPods:\n", nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nNumber of Pods: %v\nPods:\n",
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods()))) n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))

View File

@@ -6,8 +6,8 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake", importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
visibility = ["//pkg/scheduler:__subpackages__"], visibility = ["//pkg/scheduler:__subpackages__"],
deps = [ deps = [
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
], ],

View File

@@ -19,8 +19,8 @@ package fake
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// Cache is used for testing // Cache is used for testing
@@ -75,7 +75,7 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
func (c *Cache) RemoveNode(node *v1.Node) error { return nil } func (c *Cache) RemoveNode(node *v1.Node) error { return nil }
// UpdateNodeNameToInfoMap is a fake method for testing. // UpdateNodeNameToInfoMap is a fake method for testing.
func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error { func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error {
return nil return nil
} }

View File

@@ -19,7 +19,7 @@ package cache
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
// PodFilter is a function to filter a pod. If pod passed return true else return false. // PodFilter is a function to filter a pod. If pod passed return true else return false.
@@ -100,7 +100,7 @@ type Cache interface {
// UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache.
// The node info contains aggregated information of pods scheduled (including assumed to be) // The node info contains aggregated information of pods scheduled (including assumed to be)
// on this node. // on this node.
UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error
// List lists all cached pods (including assumed ones). // List lists all cached pods (including assumed ones).
List(labels.Selector) ([]*v1.Pod, error) List(labels.Selector) ([]*v1.Pod, error)
@@ -118,5 +118,5 @@ type Cache interface {
// Snapshot is a snapshot of cache state // Snapshot is a snapshot of cache state
type Snapshot struct { type Snapshot struct {
AssumedPods map[string]bool AssumedPods map[string]bool
Nodes map[string]*schedulercache.NodeInfo Nodes map[string]*schedulernodeinfo.NodeInfo
} }

View File

@@ -7,7 +7,7 @@ go_library(
"node_info.go", "node_info.go",
"util.go", "util.go",
], ],
importpath = "k8s.io/kubernetes/pkg/scheduler/cache", importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cache package nodeinfo
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cache package nodeinfo
import ( import (
"testing" "testing"

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cache package nodeinfo
import ( import (
"errors" "errors"

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cache package nodeinfo
import ( import (
"fmt" "fmt"

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cache package nodeinfo
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package cache package nodeinfo
import ( import (
"reflect" "reflect"

View File

@@ -46,11 +46,11 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/api"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/core" "k8s.io/kubernetes/pkg/scheduler/core"
"k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/pkg/scheduler/factory"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
) )
@@ -136,11 +136,11 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v
return pod return pod
} }
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (api.HostPriorityList, error) { func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (api.HostPriorityList, error) {
return []api.HostPriority{}, nil return []api.HostPriority{}, nil
} }
@@ -425,8 +425,8 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
} }
// We mimic the workflow of cache behavior when a pod is removed by user. // We mimic the workflow of cache behavior when a pod is removed by user.
// Note: if the schedulercache timeout would be super short, the first pod would expire // Note: if the schedulernodeinfo timeout would be super short, the first pod would expire
// and would be removed itself (without any explicit actions on schedulercache). Even in that case, // and would be removed itself (without any explicit actions on schedulernodeinfo). Even in that case,
// explicitly AddPod will as well correct the behavior. // explicitly AddPod will as well correct the behavior.
firstPod.Spec.NodeName = node.Name firstPod.Spec.NodeName = node.Name
if err := scache.AddPod(firstPod); err != nil { if err := scache.AddPod(firstPod); err != nil {

View File

@@ -36,7 +36,7 @@ go_library(
"//pkg/controller/replicaset:go_default_library", "//pkg/controller/replicaset:go_default_library",
"//pkg/controller/replication:go_default_library", "//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library", "//pkg/master/ports:go_default_library",
"//pkg/scheduler/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library", "//staging/src/k8s.io/api/batch/v1beta1:go_default_library",

View File

@@ -35,7 +35,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@@ -639,7 +639,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool { func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
newPod := daemon.NewPod(ds, node.Name) newPod := daemon.NewPod(ds, node.Name)
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
fit, _, err := daemon.Predicates(newPod, nodeInfo) fit, _, err := daemon.Predicates(newPod, nodeInfo)
if err != nil { if err != nil {

View File

@@ -65,8 +65,8 @@ go_library(
"//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/format:go_default_library",
"//pkg/master/ports:go_default_library", "//pkg/master/ports:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/ssh:go_default_library", "//pkg/ssh:go_default_library",
"//pkg/util/system:go_default_library", "//pkg/util/system:go_default_library",

View File

@@ -91,7 +91,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
sshutil "k8s.io/kubernetes/pkg/ssh" sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints" taintutils "k8s.io/kubernetes/pkg/util/taints"
@@ -2655,7 +2655,7 @@ func isNodeUntainted(node *v1.Node) bool {
}, },
}, },
} }
nodeInfo := schedulercache.NewNodeInfo() nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo) fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil { if err != nil {

View File

@@ -36,8 +36,8 @@ go_test(
"//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/algorithmprovider:go_default_library",
"//pkg/scheduler/api:go_default_library", "//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/factory:go_default_library", "//pkg/scheduler/factory:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/plugins/v1alpha1:go_default_library", "//pkg/scheduler/plugins/v1alpha1:go_default_library",
"//pkg/volume:go_default_library", "//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library", "//pkg/volume/testing:go_default_library",

View File

@@ -44,8 +44,8 @@ import (
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/pkg/scheduler/factory" "k8s.io/kubernetes/pkg/scheduler/factory"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@@ -56,19 +56,19 @@ type nodeStateManager struct {
makeUnSchedulable nodeMutationFunc makeUnSchedulable nodeMutationFunc
} }
func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateOne(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { func PredicateTwo(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
return true, nil, nil return true, nil, nil
} }
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
return []schedulerapi.HostPriority{}, nil return []schedulerapi.HostPriority{}, nil
} }
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
return []schedulerapi.HostPriority{}, nil return []schedulerapi.HostPriority{}, nil
} }

View File

@@ -763,7 +763,7 @@ k8s.io/kubernetes/pkg/scheduler/algorithm/priorities,fgrzadkowski,0,
k8s.io/kubernetes/pkg/scheduler/algorithmprovider,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/algorithmprovider,fgrzadkowski,0,
k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults,fgrzadkowski,0,
k8s.io/kubernetes/pkg/scheduler/api/validation,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/api/validation,fgrzadkowski,0,
k8s.io/kubernetes/pkg/scheduler/cache,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/nodeinfo,fgrzadkowski,0,
k8s.io/kubernetes/pkg/scheduler/core,madhusudancs,1, k8s.io/kubernetes/pkg/scheduler/core,madhusudancs,1,
k8s.io/kubernetes/pkg/scheduler/factory,fgrzadkowski,0, k8s.io/kubernetes/pkg/scheduler/factory,fgrzadkowski,0,
k8s.io/kubernetes/pkg/scheduler/util,wojtek-t,1, k8s.io/kubernetes/pkg/scheduler/util,wojtek-t,1,
1 name owner auto-assigned sig
763 k8s.io/kubernetes/pkg/scheduler/algorithmprovider fgrzadkowski 0
764 k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults fgrzadkowski 0
765 k8s.io/kubernetes/pkg/scheduler/api/validation fgrzadkowski 0
766 k8s.io/kubernetes/pkg/scheduler/cache k8s.io/kubernetes/pkg/scheduler/nodeinfo fgrzadkowski 0
767 k8s.io/kubernetes/pkg/scheduler/core madhusudancs 1
768 k8s.io/kubernetes/pkg/scheduler/factory fgrzadkowski 0
769 k8s.io/kubernetes/pkg/scheduler/util wojtek-t 1