mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-01 18:58:18 +00:00
test/integration
This commit is contained in:
@@ -29,12 +29,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -51,8 +51,8 @@ const (
|
||||
prioritize = "prioritize"
|
||||
)
|
||||
|
||||
type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error)
|
||||
type priorityFunc func(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||
type priorityFunc func(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error)
|
||||
|
||||
type priorityConfig struct {
|
||||
function priorityFunc
|
||||
@@ -104,15 +104,15 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Extender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, schedulerapi.FailedNodesMap, error) {
|
||||
filtered := []api.Node{}
|
||||
func (e *Extender) Filter(pod *v1.Pod, nodes *v1.NodeList) (*v1.NodeList, schedulerapi.FailedNodesMap, error) {
|
||||
filtered := []v1.Node{}
|
||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||
for _, node := range nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range e.predicates {
|
||||
fit, err := predicate(pod, &node)
|
||||
if err != nil {
|
||||
return &api.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
||||
return &v1.NodeList{}, schedulerapi.FailedNodesMap{}, err
|
||||
}
|
||||
if !fit {
|
||||
fits = false
|
||||
@@ -125,10 +125,10 @@ func (e *Extender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, sch
|
||||
failedNodesMap[node.Name] = fmt.Sprintf("extender failed: %s", e.name)
|
||||
}
|
||||
}
|
||||
return &api.NodeList{Items: filtered}, failedNodesMap, nil
|
||||
return &v1.NodeList{Items: filtered}, failedNodesMap, nil
|
||||
}
|
||||
|
||||
func (e *Extender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func (e *Extender) Prioritize(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
combinedScores := map[string]int{}
|
||||
for _, prioritizer := range e.prioritizers {
|
||||
@@ -151,21 +151,21 @@ func (e *Extender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_1_2_3_Predicate(pod *api.Pod, node *api.Node) (bool, error) {
|
||||
func machine_1_2_3_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine1" || node.Name == "machine2" || node.Name == "machine3" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_3_5_Predicate(pod *api.Pod, node *api.Node) (bool, error) {
|
||||
func machine_2_3_5_Predicate(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node.Name == "machine2" || node.Name == "machine3" || node.Name == "machine5" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func machine_2_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func machine_2_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
@@ -177,7 +177,7 @@ func machine_2_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.Hos
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func machine_3_Prioritizer(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
func machine_3_Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
score := 1
|
||||
@@ -196,7 +196,7 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
ns := framework.CreateTestingNamespace("scheduler-extender", s, t)
|
||||
defer framework.DeleteTestingNamespace(ns, s, t)
|
||||
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
@@ -236,16 +236,16 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.APIVersion = registered.GroupOrDie(api.GroupName).GroupVersion.String()
|
||||
policy.APIVersion = registered.GroupOrDie(v1.GroupName).GroupVersion.String()
|
||||
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
||||
schedulerConfigFactory := factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||
schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler config: %v", err)
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: v1.DefaultSchedulerName})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
|
||||
scheduler.New(schedulerConfig).Run()
|
||||
|
||||
defer close(schedulerConfig.StopEverything)
|
||||
@@ -253,24 +253,24 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
DoTestPodScheduling(ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer cs.Core().Nodes().DeleteCollection(nil, v1.ListOptions{})
|
||||
|
||||
goodCondition := api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: unversioned.Time{time.Now()},
|
||||
}
|
||||
node := &api.Node{
|
||||
Spec: api.NodeSpec{Unschedulable: false},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
node := &v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []api.NodeCondition{goodCondition},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -281,10 +281,10 @@ func DoTestPodScheduling(ns *api.Namespace, t *testing.T, cs clientset.Interface
|
||||
}
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "extender-test-pod"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user