mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-12-15 20:37:39 +00:00
Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -34,7 +34,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -19,9 +19,9 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
@@ -126,26 +126,26 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
// TODO - the actual handling of unbound PVCs will be fixed by late binding design.
|
||||
if pvName == "" {
|
||||
glog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
|
||||
klog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
|
||||
klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
|
||||
continue
|
||||
}
|
||||
|
||||
csiSource := pv.Spec.PersistentVolumeSource.CSI
|
||||
if csiSource == nil {
|
||||
glog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
|
||||
klog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
driverName := csiSource.Driver
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -140,7 +140,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
|
||||
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
|
||||
incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap)
|
||||
if err != nil {
|
||||
glog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err)
|
||||
klog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err)
|
||||
return nil
|
||||
}
|
||||
predicateMetadata := &predicateMetadata{
|
||||
@@ -153,7 +153,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
|
||||
topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap,
|
||||
}
|
||||
for predicateName, precomputeFunc := range predicateMetadataProducers {
|
||||
glog.V(10).Infof("Precompute: %v", predicateName)
|
||||
klog.V(10).Infof("Precompute: %v", predicateName)
|
||||
precomputeFunc(predicateMetadata)
|
||||
}
|
||||
return predicateMetadata
|
||||
@@ -502,7 +502,7 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
}
|
||||
affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity))
|
||||
if err != nil {
|
||||
glog.Errorf("error in getting affinity properties of Pod %v", pod.Name)
|
||||
klog.Errorf("error in getting affinity properties of Pod %v", pod.Name)
|
||||
return false
|
||||
}
|
||||
return podMatchesAllAffinityTermProperties(targetPod, affinityProperties)
|
||||
@@ -519,7 +519,7 @@ func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool {
|
||||
}
|
||||
properties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity))
|
||||
if err != nil {
|
||||
glog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name)
|
||||
klog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name)
|
||||
return false
|
||||
}
|
||||
return podMatchesAnyAffinityTermProperties(targetPod, properties)
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
@@ -329,7 +329,7 @@ func NewMaxPDVolumeCountPredicate(
|
||||
filter = AzureDiskVolumeFilter
|
||||
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
default:
|
||||
glog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType,
|
||||
GCEPDVolumeFilterType, AzureDiskVolumeFilterType)
|
||||
return nil
|
||||
|
||||
@@ -383,9 +383,9 @@ func getMaxEBSVolume(nodeInstanceType string) int {
|
||||
func getMaxVolLimitFromEnv() int {
|
||||
if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" {
|
||||
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
|
||||
glog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
|
||||
klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err)
|
||||
} else if parsedMaxVols <= 0 {
|
||||
glog.Errorf("Maximum PD volumes must be a positive value, using default ")
|
||||
klog.Errorf("Maximum PD volumes must be a positive value, using default ")
|
||||
} else {
|
||||
return parsedMaxVols
|
||||
}
|
||||
@@ -413,7 +413,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||
if err != nil || pvc == nil {
|
||||
// if the PVC is not found, log the error and count the PV towards the PV limit
|
||||
glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)
|
||||
filteredVolumes[pvID] = true
|
||||
continue
|
||||
}
|
||||
@@ -424,7 +424,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
// it was forcefully unbound by admin. The pod can still use the
|
||||
// original PV where it was bound to -> log the error and count
|
||||
// the PV towards the PV limit
|
||||
glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||
klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName)
|
||||
filteredVolumes[pvID] = true
|
||||
continue
|
||||
}
|
||||
@@ -433,7 +433,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s
|
||||
if err != nil || pv == nil {
|
||||
// if the PV is not found, log the error
|
||||
// and count the PV towards the PV limit
|
||||
glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||
klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)
|
||||
filteredVolumes[pvID] = true
|
||||
continue
|
||||
}
|
||||
@@ -665,12 +665,12 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad
|
||||
nodeV, _ := nodeConstraints[k]
|
||||
volumeVSet, err := volumeutil.LabelZonesToSet(v)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err)
|
||||
klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !volumeVSet.Has(nodeV) {
|
||||
glog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
|
||||
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
|
||||
return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil
|
||||
}
|
||||
}
|
||||
@@ -781,11 +781,11 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
if len(predicateFails) == 0 {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
|
||||
podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)
|
||||
}
|
||||
}
|
||||
@@ -834,14 +834,14 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool {
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
// if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms
|
||||
// glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
// klog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
// nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
// }
|
||||
|
||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
}
|
||||
|
||||
@@ -933,7 +933,7 @@ type ServiceAffinity struct {
|
||||
// only should be referenced by NewServiceAffinityPredicate.
|
||||
func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) {
|
||||
if pm.pod == nil {
|
||||
glog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.")
|
||||
klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.")
|
||||
return
|
||||
}
|
||||
pm.serviceAffinityInUse = true
|
||||
@@ -945,7 +945,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
||||
|
||||
// In the future maybe we will return them as part of the function.
|
||||
if errSvc != nil || errList != nil {
|
||||
glog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList)
|
||||
klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList)
|
||||
}
|
||||
// consider only the pods that belong to the same namespace
|
||||
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
|
||||
@@ -1172,10 +1172,10 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm
|
||||
return false, failedPredicates, error
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return true, nil, nil
|
||||
@@ -1274,7 +1274,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
|
||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
@@ -1304,12 +1304,12 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta
|
||||
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("Failed to get all pods, %+v", err)
|
||||
glog.Error(errMessage)
|
||||
klog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil {
|
||||
errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err)
|
||||
glog.Error(errMessage)
|
||||
klog.Error(errMessage)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
}
|
||||
@@ -1318,14 +1318,14 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta
|
||||
// the scheduled pod anti-affinity terms
|
||||
for topologyKey, topologyValue := range node.Labels {
|
||||
if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name)
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name)
|
||||
return ErrExistingPodsAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return nil, nil
|
||||
@@ -1382,7 +1382,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
// in the cluster matches the namespace and selector of this pod and the pod matches
|
||||
// its own terms, then we allow the pod to pass the affinity check.
|
||||
if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
}
|
||||
@@ -1394,7 +1394,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 {
|
||||
matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms)
|
||||
if matchExists {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
@@ -1414,7 +1414,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms)
|
||||
if err != nil {
|
||||
errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err)
|
||||
glog.Error(errMessage)
|
||||
klog.Error(errMessage)
|
||||
return ErrPodAffinityRulesNotMatch, errors.New(errMessage)
|
||||
}
|
||||
if termsSelectorMatch {
|
||||
@@ -1429,7 +1429,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
if len(antiAffinityTerms) > 0 {
|
||||
antiAffTermsMatch, _, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms)
|
||||
if err != nil || antiAffTermsMatch {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v",
|
||||
podName(pod), node.Name, err)
|
||||
return ErrPodAntiAffinityRulesNotMatch, nil
|
||||
}
|
||||
@@ -1443,23 +1443,23 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
|
||||
// in the cluster matches the namespace and selector of this pod and the pod matches
|
||||
// its own terms, then we allow the pod to pass the affinity check.
|
||||
if termsSelectorMatchFound {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
}
|
||||
// Check if pod matches its own affinity properties (namespace and label selector).
|
||||
if !targetPodMatchesAffinityOfPod(pod, pod) {
|
||||
glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity",
|
||||
podName(pod), node.Name)
|
||||
return ErrPodAffinityRulesNotMatch, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.",
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.",
|
||||
podName(pod), node.Name)
|
||||
}
|
||||
return nil, nil
|
||||
@@ -1634,12 +1634,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
|
||||
|
||||
failReasons := []algorithm.PredicateFailureReason{}
|
||||
if !boundSatisfied {
|
||||
glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
failReasons = append(failReasons, ErrVolumeNodeConflict)
|
||||
}
|
||||
|
||||
if !unboundSatisfied {
|
||||
glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
failReasons = append(failReasons, ErrVolumeBindConflict)
|
||||
}
|
||||
|
||||
@@ -1648,6 +1648,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe
|
||||
}
|
||||
|
||||
// All volumes bound or matching PVs found for all unbound PVCs
|
||||
glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
klog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name)
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// InterPodAffinity contains information to calculate inter pod affinity.
|
||||
@@ -137,7 +137,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@@ -233,8 +233,8 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
if glog.V(10) {
|
||||
glog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
|
||||
@@ -19,9 +19,9 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
@@ -64,9 +64,9 @@ func (r *ResourceAllocationPriority) PriorityMap(
|
||||
score = r.scorer(&requested, &allocatable, false, 0, 0)
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
glog.Infof(
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount,
|
||||
@@ -75,7 +75,7 @@ func (r *ResourceAllocationPriority) PriorityMap(
|
||||
score,
|
||||
)
|
||||
} else {
|
||||
glog.Infof(
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory,
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies
|
||||
@@ -52,10 +52,10 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
|
||||
score = 1
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
if klog.V(10) {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof(
|
||||
klog.Infof(
|
||||
"%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name,
|
||||
allocatableResources.MilliCPU, allocatableResources.Memory,
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading
|
||||
@@ -94,7 +94,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{
|
||||
// Ignore the previous deleted version for spreading purposes
|
||||
// (it can still be considered for resource restrictions etc.)
|
||||
if nodePod.DeletionTimestamp != nil {
|
||||
glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name)
|
||||
klog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name)
|
||||
continue
|
||||
}
|
||||
for _, selector := range selectors {
|
||||
@@ -160,8 +160,8 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
|
||||
}
|
||||
}
|
||||
result[i].Score = int(fScore)
|
||||
if glog.V(10) {
|
||||
glog.Infof(
|
||||
if klog.V(10) {
|
||||
klog.Infof(
|
||||
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ go_library(
|
||||
"//pkg/scheduler/factory:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -207,12 +207,12 @@ func ApplyFeatureGates() {
|
||||
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
|
||||
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred)
|
||||
|
||||
glog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
|
||||
klog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
|
||||
}
|
||||
|
||||
// Prioritizes nodes that satisfy pod's resource limits
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
|
||||
glog.Infof("Registering resourcelimits priority function")
|
||||
klog.Infof("Registering resourcelimits priority function")
|
||||
factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1)
|
||||
// Register the priority function to specific provider too.
|
||||
factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1))
|
||||
|
||||
2
pkg/scheduler/cache/BUILD
vendored
2
pkg/scheduler/cache/BUILD
vendored
@@ -15,7 +15,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
6
pkg/scheduler/cache/node_info.go
vendored
6
pkg/scheduler/cache/node_info.go
vendored
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -529,7 +529,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
for i := range n.podsWithAffinity {
|
||||
k2, err := GetPodKey(n.podsWithAffinity[i])
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get pod key, err: %v", err)
|
||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||
continue
|
||||
}
|
||||
if k1 == k2 {
|
||||
@@ -542,7 +542,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
for i := range n.pods {
|
||||
k2, err := GetPodKey(n.pods[i])
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot get pod key, err: %v", err)
|
||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||
continue
|
||||
}
|
||||
if k1 == k2 {
|
||||
|
||||
@@ -30,7 +30,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"hash/fnv"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
@@ -143,7 +143,7 @@ func (c *Cache) predicateKeysToIDs(predicateKeys sets.String) []int {
|
||||
if id, ok := c.predicateIDMap[predicateKey]; ok {
|
||||
predicateIDs = append(predicateIDs, id)
|
||||
} else {
|
||||
glog.Errorf("predicate key %q not found", predicateKey)
|
||||
klog.Errorf("predicate key %q not found", predicateKey)
|
||||
}
|
||||
}
|
||||
return predicateIDs
|
||||
@@ -160,7 +160,7 @@ func (c *Cache) InvalidatePredicates(predicateKeys sets.String) {
|
||||
for _, n := range c.nodeToCache {
|
||||
n.invalidatePreds(predicateIDs)
|
||||
}
|
||||
glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
|
||||
klog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys)
|
||||
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.S
|
||||
if n, ok := c.nodeToCache[nodeName]; ok {
|
||||
n.invalidatePreds(predicateIDs)
|
||||
}
|
||||
glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
|
||||
klog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys)
|
||||
}
|
||||
|
||||
// InvalidateAllPredicatesOnNode clears all cached results for one node.
|
||||
@@ -185,7 +185,7 @@ func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) {
|
||||
if node, ok := c.nodeToCache[nodeName]; ok {
|
||||
node.invalidate()
|
||||
}
|
||||
glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
|
||||
klog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName)
|
||||
}
|
||||
|
||||
// InvalidateCachedPredicateItemForPodAdd is a wrapper of
|
||||
@@ -344,7 +344,7 @@ func (n *NodeCache) updateResult(
|
||||
}
|
||||
n.predicateGenerations[predicateID]++
|
||||
|
||||
glog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v",
|
||||
klog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v",
|
||||
nodeInfo.Node().Name, predicateKey, podName, predicateItem)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
@@ -253,7 +253,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister,
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if !podEligibleToPreemptOthers(pod, g.cachedNodeInfoMap) {
|
||||
glog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
|
||||
klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name)
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
allNodes, err := nodeLister.List()
|
||||
@@ -265,7 +265,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister,
|
||||
}
|
||||
potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError.FailedPredicates)
|
||||
if len(potentialNodes) == 0 {
|
||||
glog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name)
|
||||
klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name)
|
||||
// In this case, we should clean-up any existing nominated node name of the pod.
|
||||
return nil, nil, []*v1.Pod{pod}, nil
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func (g *genericScheduler) processPreemptionWithExtenders(
|
||||
)
|
||||
if err != nil {
|
||||
if extender.IsIgnorable() {
|
||||
glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
extender, err)
|
||||
continue
|
||||
}
|
||||
@@ -468,7 +468,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
|
||||
filteredList, failedMap, err := extender.Filter(pod, filtered, g.cachedNodeInfoMap)
|
||||
if err != nil {
|
||||
if extender.IsIgnorable() {
|
||||
glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set",
|
||||
extender, err)
|
||||
continue
|
||||
} else {
|
||||
@@ -599,7 +599,7 @@ func podFitsOnNode(
|
||||
failedPredicates = append(failedPredicates, reasons...)
|
||||
// if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails.
|
||||
if !alwaysCheckAllPredicates {
|
||||
glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " +
|
||||
klog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " +
|
||||
"evaluation is short circuited and there are chances " +
|
||||
"of other predicates failing as well.")
|
||||
break
|
||||
@@ -695,9 +695,9 @@ func PrioritizeNodes(
|
||||
if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil {
|
||||
appendError(err)
|
||||
}
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
for _, hostPriority := range results[index] {
|
||||
glog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score)
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score)
|
||||
}
|
||||
}
|
||||
}(i, priorityConfig)
|
||||
@@ -735,8 +735,8 @@ func PrioritizeNodes(
|
||||
mu.Lock()
|
||||
for i := range *prioritizedList {
|
||||
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
|
||||
if glog.V(10) {
|
||||
glog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score)
|
||||
if klog.V(10) {
|
||||
klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score)
|
||||
}
|
||||
combinedScores[host] += score * weight
|
||||
}
|
||||
@@ -750,9 +750,9 @@ func PrioritizeNodes(
|
||||
}
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
if klog.V(10) {
|
||||
for i := range result {
|
||||
glog.Infof("Host %s => Score %d", result[i].Host, result[i].Score)
|
||||
klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
@@ -881,7 +881,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims)
|
||||
if lenNodes2 > 0 {
|
||||
return minNodes2[0]
|
||||
}
|
||||
glog.Errorf("Error in logic of node scoring for preemption. We should never reach here!")
|
||||
klog.Errorf("Error in logic of node scoring for preemption. We should never reach here!")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1016,7 +1016,7 @@ func selectVictimsOnNode(
|
||||
// TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance.
|
||||
if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil); !fits {
|
||||
if err != nil {
|
||||
glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
|
||||
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
|
||||
}
|
||||
return nil, 0, false
|
||||
}
|
||||
@@ -1032,7 +1032,7 @@ func selectVictimsOnNode(
|
||||
if !fits {
|
||||
removePod(p)
|
||||
victims = append(victims, p)
|
||||
glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name)
|
||||
klog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name)
|
||||
}
|
||||
return fits
|
||||
}
|
||||
@@ -1087,7 +1087,7 @@ func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedP
|
||||
}
|
||||
}
|
||||
if !found || !unresolvableReasonExist {
|
||||
glog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
|
||||
klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
|
||||
potentialNodes = append(potentialNodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
@@ -472,7 +472,7 @@ func (c *configFactory) skipPodUpdate(pod *v1.Pod) bool {
|
||||
if !reflect.DeepEqual(assumedPodCopy, podCopy) {
|
||||
return false
|
||||
}
|
||||
glog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name)
|
||||
klog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -480,7 +480,7 @@ func (c *configFactory) onPvAdd(obj interface{}) {
|
||||
if c.enableEquivalenceClassCache {
|
||||
pv, ok := obj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolume: %v", obj)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolume: %v", obj)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForPv(pv)
|
||||
@@ -498,12 +498,12 @@ func (c *configFactory) onPvUpdate(old, new interface{}) {
|
||||
if c.enableEquivalenceClassCache {
|
||||
newPV, ok := new.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolume: %v", new)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolume: %v", new)
|
||||
return
|
||||
}
|
||||
oldPV, ok := old.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolume: %v", old)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolume: %v", old)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForPvUpdate(oldPV, newPV)
|
||||
@@ -549,11 +549,11 @@ func (c *configFactory) onPvDelete(obj interface{}) {
|
||||
var ok bool
|
||||
pv, ok = t.Obj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolume: %v", t.Obj)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolume: %v", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolume: %v", t)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolume: %v", t)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForPv(pv)
|
||||
@@ -600,7 +600,7 @@ func (c *configFactory) onPvcAdd(obj interface{}) {
|
||||
if c.enableEquivalenceClassCache {
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", obj)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", obj)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForPvc(pvc)
|
||||
@@ -616,12 +616,12 @@ func (c *configFactory) onPvcUpdate(old, new interface{}) {
|
||||
if c.enableEquivalenceClassCache {
|
||||
newPVC, ok := new.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new)
|
||||
return
|
||||
}
|
||||
oldPVC, ok := old.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForPvcUpdate(oldPVC, newPVC)
|
||||
@@ -639,11 +639,11 @@ func (c *configFactory) onPvcDelete(obj interface{}) {
|
||||
var ok bool
|
||||
pvc, ok = t.Obj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t.Obj)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t)
|
||||
klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForPvc(pvc)
|
||||
@@ -692,7 +692,7 @@ func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.Persistent
|
||||
func (c *configFactory) onStorageClassAdd(obj interface{}) {
|
||||
sc, ok := obj.(*storagev1.StorageClass)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj)
|
||||
klog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -717,11 +717,11 @@ func (c *configFactory) onStorageClassDelete(obj interface{}) {
|
||||
var ok bool
|
||||
sc, ok = t.Obj.(*storagev1.StorageClass)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj)
|
||||
klog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t)
|
||||
klog.Errorf("cannot convert to *storagev1.StorageClass: %v", t)
|
||||
return
|
||||
}
|
||||
c.invalidatePredicatesForStorageClass(sc)
|
||||
@@ -794,12 +794,12 @@ func (c *configFactory) GetScheduledPodLister() corelisters.PodLister {
|
||||
func (c *configFactory) addPodToCache(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.Pod: %v", obj)
|
||||
klog.Errorf("cannot convert to *v1.Pod: %v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.schedulerCache.AddPod(pod); err != nil {
|
||||
glog.Errorf("scheduler cache AddPod failed: %v", err)
|
||||
klog.Errorf("scheduler cache AddPod failed: %v", err)
|
||||
}
|
||||
|
||||
c.podQueue.AssignedPodAdded(pod)
|
||||
@@ -811,12 +811,12 @@ func (c *configFactory) addPodToCache(obj interface{}) {
|
||||
func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) {
|
||||
oldPod, ok := oldObj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj)
|
||||
klog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj)
|
||||
return
|
||||
}
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj)
|
||||
klog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -826,7 +826,7 @@ func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) {
|
||||
// snapshotted before updates are written, we would update equivalence
|
||||
// cache with stale information which is based on snapshot of old cache.
|
||||
if err := c.schedulerCache.UpdatePod(oldPod, newPod); err != nil {
|
||||
glog.Errorf("scheduler cache UpdatePod failed: %v", err)
|
||||
klog.Errorf("scheduler cache UpdatePod failed: %v", err)
|
||||
}
|
||||
|
||||
c.invalidateCachedPredicatesOnUpdatePod(newPod, oldPod)
|
||||
@@ -904,11 +904,11 @@ func (c *configFactory) deletePodFromCache(obj interface{}) {
|
||||
var ok bool
|
||||
pod, ok = t.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.Pod: %v", t.Obj)
|
||||
klog.Errorf("cannot convert to *v1.Pod: %v", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
glog.Errorf("cannot convert to *v1.Pod: %v", t)
|
||||
klog.Errorf("cannot convert to *v1.Pod: %v", t)
|
||||
return
|
||||
}
|
||||
// NOTE: Updates must be written to scheduler cache before invalidating
|
||||
@@ -917,7 +917,7 @@ func (c *configFactory) deletePodFromCache(obj interface{}) {
|
||||
// snapshotted before updates are written, we would update equivalence
|
||||
// cache with stale information which is based on snapshot of old cache.
|
||||
if err := c.schedulerCache.RemovePod(pod); err != nil {
|
||||
glog.Errorf("scheduler cache RemovePod failed: %v", err)
|
||||
klog.Errorf("scheduler cache RemovePod failed: %v", err)
|
||||
}
|
||||
|
||||
c.invalidateCachedPredicatesOnDeletePod(pod)
|
||||
@@ -948,7 +948,7 @@ func (c *configFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) {
|
||||
func (c *configFactory) addNodeToCache(obj interface{}) {
|
||||
node, ok := obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.Node: %v", obj)
|
||||
klog.Errorf("cannot convert to *v1.Node: %v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -960,7 +960,7 @@ func (c *configFactory) addNodeToCache(obj interface{}) {
|
||||
}
|
||||
|
||||
if err := c.schedulerCache.AddNode(node); err != nil {
|
||||
glog.Errorf("scheduler cache AddNode failed: %v", err)
|
||||
klog.Errorf("scheduler cache AddNode failed: %v", err)
|
||||
}
|
||||
|
||||
c.podQueue.MoveAllToActiveQueue()
|
||||
@@ -970,12 +970,12 @@ func (c *configFactory) addNodeToCache(obj interface{}) {
|
||||
func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
||||
oldNode, ok := oldObj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj)
|
||||
klog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj)
|
||||
return
|
||||
}
|
||||
newNode, ok := newObj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert newObj to *v1.Node: %v", newObj)
|
||||
klog.Errorf("cannot convert newObj to *v1.Node: %v", newObj)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -985,7 +985,7 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
||||
// snapshotted before updates are written, we would update equivalence
|
||||
// cache with stale information which is based on snapshot of old cache.
|
||||
if err := c.schedulerCache.UpdateNode(oldNode, newNode); err != nil {
|
||||
glog.Errorf("scheduler cache UpdateNode failed: %v", err)
|
||||
klog.Errorf("scheduler cache UpdateNode failed: %v", err)
|
||||
}
|
||||
|
||||
c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode)
|
||||
@@ -1019,11 +1019,11 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
|
||||
|
||||
oldTaints, oldErr := helper.GetTaintsFromNodeAnnotations(oldNode.GetAnnotations())
|
||||
if oldErr != nil {
|
||||
glog.Errorf("Failed to get taints from old node annotation for equivalence cache")
|
||||
klog.Errorf("Failed to get taints from old node annotation for equivalence cache")
|
||||
}
|
||||
newTaints, newErr := helper.GetTaintsFromNodeAnnotations(newNode.GetAnnotations())
|
||||
if newErr != nil {
|
||||
glog.Errorf("Failed to get taints from new node annotation for equivalence cache")
|
||||
klog.Errorf("Failed to get taints from new node annotation for equivalence cache")
|
||||
}
|
||||
if !reflect.DeepEqual(oldTaints, newTaints) ||
|
||||
!reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) {
|
||||
@@ -1070,11 +1070,11 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) {
|
||||
var ok bool
|
||||
node, ok = t.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("cannot convert to *v1.Node: %v", t.Obj)
|
||||
klog.Errorf("cannot convert to *v1.Node: %v", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
glog.Errorf("cannot convert to *v1.Node: %v", t)
|
||||
klog.Errorf("cannot convert to *v1.Node: %v", t)
|
||||
return
|
||||
}
|
||||
// NOTE: Updates must be written to scheduler cache before invalidating
|
||||
@@ -1083,7 +1083,7 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) {
|
||||
// snapshotted before updates are written, we would update equivalence
|
||||
// cache with stale information which is based on snapshot of old cache.
|
||||
if err := c.schedulerCache.RemoveNode(node); err != nil {
|
||||
glog.Errorf("scheduler cache RemoveNode failed: %v", err)
|
||||
klog.Errorf("scheduler cache RemoveNode failed: %v", err)
|
||||
}
|
||||
if c.enableEquivalenceClassCache {
|
||||
c.equivalencePodCache.InvalidateAllPredicatesOnNode(node.GetName())
|
||||
@@ -1097,7 +1097,7 @@ func (c *configFactory) Create() (*Config, error) {
|
||||
|
||||
// Creates a scheduler from the name of a registered algorithm provider.
|
||||
func (c *configFactory) CreateFromProvider(providerName string) (*Config, error) {
|
||||
glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
|
||||
klog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
|
||||
provider, err := GetAlgorithmProvider(providerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1107,7 +1107,7 @@ func (c *configFactory) CreateFromProvider(providerName string) (*Config, error)
|
||||
|
||||
// Creates a scheduler from the configuration file
|
||||
func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, error) {
|
||||
glog.V(2).Infof("Creating scheduler from configuration: %v", policy)
|
||||
klog.V(2).Infof("Creating scheduler from configuration: %v", policy)
|
||||
|
||||
// validate the policy configuration
|
||||
if err := validation.ValidatePolicy(policy); err != nil {
|
||||
@@ -1116,7 +1116,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e
|
||||
|
||||
predicateKeys := sets.NewString()
|
||||
if policy.Predicates == nil {
|
||||
glog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider)
|
||||
klog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider)
|
||||
provider, err := GetAlgorithmProvider(DefaultProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1124,14 +1124,14 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e
|
||||
predicateKeys = provider.FitPredicateKeys
|
||||
} else {
|
||||
for _, predicate := range policy.Predicates {
|
||||
glog.V(2).Infof("Registering predicate: %s", predicate.Name)
|
||||
klog.V(2).Infof("Registering predicate: %s", predicate.Name)
|
||||
predicateKeys.Insert(RegisterCustomFitPredicate(predicate))
|
||||
}
|
||||
}
|
||||
|
||||
priorityKeys := sets.NewString()
|
||||
if policy.Priorities == nil {
|
||||
glog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider)
|
||||
klog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider)
|
||||
provider, err := GetAlgorithmProvider(DefaultProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1139,7 +1139,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e
|
||||
priorityKeys = provider.PriorityFunctionKeys
|
||||
} else {
|
||||
for _, priority := range policy.Priorities {
|
||||
glog.V(2).Infof("Registering priority: %s", priority.Name)
|
||||
klog.V(2).Infof("Registering priority: %s", priority.Name)
|
||||
priorityKeys.Insert(RegisterCustomPriorityFunction(priority))
|
||||
}
|
||||
}
|
||||
@@ -1148,7 +1148,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e
|
||||
if len(policy.ExtenderConfigs) != 0 {
|
||||
ignoredExtendedResources := sets.NewString()
|
||||
for ii := range policy.ExtenderConfigs {
|
||||
glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
|
||||
klog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
|
||||
extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1196,7 +1196,7 @@ func (c *configFactory) getBinderFunc(extenders []algorithm.SchedulerExtender) f
|
||||
|
||||
// Creates a scheduler from a set of registered fit predicate keys and priority keys.
|
||||
func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) {
|
||||
glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys)
|
||||
klog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys)
|
||||
|
||||
if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 {
|
||||
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight())
|
||||
@@ -1225,7 +1225,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
|
||||
// Init equivalence class cache
|
||||
if c.enableEquivalenceClassCache {
|
||||
c.equivalencePodCache = equivalence.NewCache(predicates.Ordering())
|
||||
glog.Info("Created equivalence class cache")
|
||||
klog.Info("Created equivalence class cache")
|
||||
}
|
||||
|
||||
algo := core.NewGenericScheduler(
|
||||
@@ -1331,10 +1331,10 @@ func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) {
|
||||
func (c *configFactory) getNextPod() *v1.Pod {
|
||||
pod, err := c.podQueue.Pop()
|
||||
if err == nil {
|
||||
glog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name)
|
||||
klog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name)
|
||||
return pod
|
||||
}
|
||||
glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
|
||||
klog.Errorf("Error while retrieving next pod from scheduling queue: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1433,10 +1433,10 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) core
|
||||
func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error) {
|
||||
return func(pod *v1.Pod, err error) {
|
||||
if err == core.ErrNoNodesAvailable {
|
||||
glog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
||||
klog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
||||
} else {
|
||||
if _, ok := err.(*core.FitError); ok {
|
||||
glog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err)
|
||||
klog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err)
|
||||
} else if errors.IsNotFound(err) {
|
||||
if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" {
|
||||
nodeName := errStatus.Status().Details.Name
|
||||
@@ -1458,7 +1458,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err)
|
||||
klog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1480,7 +1480,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue
|
||||
if !util.PodPriorityEnabled() {
|
||||
entry := backoff.GetEntry(podID)
|
||||
if !entry.TryWait(backoff.MaxDuration()) {
|
||||
glog.Warningf("Request for pod %v already in flight, abandoning", podID)
|
||||
klog.Warningf("Request for pod %v already in flight, abandoning", podID)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1500,7 +1500,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue
|
||||
break
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Warningf("A pod %v no longer exists", podID)
|
||||
klog.Warningf("A pod %v no longer exists", podID)
|
||||
|
||||
if c.volumeBinder != nil {
|
||||
// Volume binder only wants to keep unassigned pods
|
||||
@@ -1508,7 +1508,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue
|
||||
}
|
||||
return
|
||||
}
|
||||
glog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err)
|
||||
klog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err)
|
||||
if getBackoff = getBackoff * 2; getBackoff > maximalGetBackoff {
|
||||
getBackoff = maximalGetBackoff
|
||||
}
|
||||
@@ -1542,7 +1542,7 @@ type binder struct {
|
||||
|
||||
// Bind just does a POST binding RPC.
|
||||
func (b *binder) Bind(binding *v1.Binding) error {
|
||||
glog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name)
|
||||
klog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name)
|
||||
return b.Client.CoreV1().Pods(binding.Namespace).Bind(binding)
|
||||
}
|
||||
|
||||
@@ -1551,7 +1551,7 @@ type podConditionUpdater struct {
|
||||
}
|
||||
|
||||
func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error {
|
||||
glog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
|
||||
klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
|
||||
if podutil.UpdatePodCondition(&pod.Status, condition) {
|
||||
_, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
return err
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||
@@ -233,12 +233,12 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
||||
}
|
||||
} else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok {
|
||||
// checking to see if a pre-defined predicate is requested
|
||||
glog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
klog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name)
|
||||
return policy.Name
|
||||
}
|
||||
|
||||
if predicateFactory == nil {
|
||||
glog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
klog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterFitPredicateFactory(policy.Name, predicateFactory)
|
||||
@@ -345,7 +345,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
}
|
||||
}
|
||||
} else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok {
|
||||
glog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
klog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name)
|
||||
// set/update the weight based on the policy
|
||||
pcf = &PriorityConfigFactory{
|
||||
Function: existingPcf.Function,
|
||||
@@ -355,7 +355,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
|
||||
}
|
||||
|
||||
if pcf == nil {
|
||||
glog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
klog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name)
|
||||
}
|
||||
|
||||
return RegisterPriorityConfigFactory(policy.Name, *pcf)
|
||||
@@ -369,7 +369,7 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s
|
||||
}
|
||||
shape, err := priorities.NewFunctionShape(points)
|
||||
if err != nil {
|
||||
glog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error())
|
||||
}
|
||||
return shape
|
||||
}
|
||||
@@ -500,7 +500,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$")
|
||||
|
||||
func validateAlgorithmNameOrDie(name string) {
|
||||
if !validName.MatchString(name) {
|
||||
glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
klog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,7 +514,7 @@ func validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
klog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -532,7 +532,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
|
||||
numArgs++
|
||||
}
|
||||
if numArgs != 1 {
|
||||
glog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
klog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
pkg/scheduler/internal/cache/BUILD
vendored
2
pkg/scheduler/internal/cache/BUILD
vendored
@@ -18,7 +18,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
20
pkg/scheduler/internal/cache/cache.go
vendored
20
pkg/scheduler/internal/cache/cache.go
vendored
@@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -205,7 +205,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
|
||||
klog.V(5).Infof("Finished binding for pod %v. Can be expired.", key)
|
||||
currState, ok := cache.podStates[key]
|
||||
if ok && cache.assumedPods[key] {
|
||||
dl := now.Add(cache.ttl)
|
||||
@@ -289,7 +289,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
|
||||
case ok && cache.assumedPods[key]:
|
||||
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
|
||||
// The pod was added to a different node than it was assumed to.
|
||||
glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
klog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
// Clean this up.
|
||||
cache.removePod(currState.pod)
|
||||
cache.addPod(pod)
|
||||
@@ -325,8 +325,8 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
|
||||
// before Update event, in which case the state would change from Assumed to Added.
|
||||
case ok && !cache.assumedPods[key]:
|
||||
if currState.pod.Spec.NodeName != newPod.Spec.NodeName {
|
||||
glog.Errorf("Pod %v updated on a different node than previously added to.", key)
|
||||
glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
klog.Errorf("Pod %v updated on a different node than previously added to.", key)
|
||||
klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
}
|
||||
if err := cache.updatePod(oldPod, newPod); err != nil {
|
||||
return err
|
||||
@@ -353,8 +353,8 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
|
||||
// before Remove event, in which case the state would change from Assumed to Added.
|
||||
case ok && !cache.assumedPods[key]:
|
||||
if currState.pod.Spec.NodeName != pod.Spec.NodeName {
|
||||
glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
klog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName)
|
||||
klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions")
|
||||
}
|
||||
err := cache.removePod(currState.pod)
|
||||
if err != nil {
|
||||
@@ -526,14 +526,14 @@ func (cache *schedulerCache) cleanupAssumedPods(now time.Time) {
|
||||
panic("Key found in assumed set but not in podStates. Potentially a logical error.")
|
||||
}
|
||||
if !ps.bindingFinished {
|
||||
glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.",
|
||||
klog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.",
|
||||
ps.pod.Namespace, ps.pod.Name)
|
||||
continue
|
||||
}
|
||||
if now.After(*ps.deadline) {
|
||||
glog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name)
|
||||
klog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name)
|
||||
if err := cache.expirePod(key, ps); err != nil {
|
||||
glog.Errorf("ExpirePod failed for %s: %v", key, err)
|
||||
klog.Errorf("ExpirePod failed for %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
pkg/scheduler/internal/cache/debugger/BUILD
vendored
2
pkg/scheduler/internal/cache/debugger/BUILD
vendored
@@ -16,7 +16,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
@@ -39,8 +39,8 @@ type CacheComparer struct {
|
||||
|
||||
// Compare compares the nodes and pods of NodeLister with Cache.Snapshot.
|
||||
func (c *CacheComparer) Compare() error {
|
||||
glog.V(3).Info("cache comparer started")
|
||||
defer glog.V(3).Info("cache comparer finished")
|
||||
klog.V(3).Info("cache comparer started")
|
||||
defer klog.V(3).Info("cache comparer finished")
|
||||
|
||||
nodes, err := c.NodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
@@ -57,11 +57,11 @@ func (c *CacheComparer) Compare() error {
|
||||
waitingPods := c.PodQueue.WaitingPods()
|
||||
|
||||
if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
@@ -44,9 +44,9 @@ func (d *CacheDumper) DumpAll() {
|
||||
// dumpNodes writes NodeInfo to the scheduler logs.
|
||||
func (d *CacheDumper) dumpNodes() {
|
||||
snapshot := d.cache.Snapshot()
|
||||
glog.Info("Dump of cached NodeInfo")
|
||||
klog.Info("Dump of cached NodeInfo")
|
||||
for _, nodeInfo := range snapshot.Nodes {
|
||||
glog.Info(printNodeInfo(nodeInfo))
|
||||
klog.Info(printNodeInfo(nodeInfo))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (d *CacheDumper) dumpSchedulingQueue() {
|
||||
for _, p := range waitingPods {
|
||||
podData.WriteString(printPod(p))
|
||||
}
|
||||
glog.Infof("Dump of scheduling queue:\n%s", podData.String())
|
||||
klog.Infof("Dump of scheduling queue:\n%s", podData.String())
|
||||
}
|
||||
|
||||
// printNodeInfo writes parts of NodeInfo to a string.
|
||||
|
||||
12
pkg/scheduler/internal/cache/node_tree.go
vendored
12
pkg/scheduler/internal/cache/node_tree.go
vendored
@@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NodeTree is a tree-like data structure that holds node names in each zone. Zone names are
|
||||
@@ -46,7 +46,7 @@ type nodeArray struct {
|
||||
|
||||
func (na *nodeArray) next() (nodeName string, exhausted bool) {
|
||||
if len(na.nodes) == 0 {
|
||||
glog.Error("The nodeArray is empty. It should have been deleted from NodeTree.")
|
||||
klog.Error("The nodeArray is empty. It should have been deleted from NodeTree.")
|
||||
return "", false
|
||||
}
|
||||
if na.lastIndex >= len(na.nodes) {
|
||||
@@ -81,7 +81,7 @@ func (nt *NodeTree) addNode(n *v1.Node) {
|
||||
if na, ok := nt.tree[zone]; ok {
|
||||
for _, nodeName := range na.nodes {
|
||||
if nodeName == n.Name {
|
||||
glog.Warningf("node %v already exist in the NodeTree", n.Name)
|
||||
klog.Warningf("node %v already exist in the NodeTree", n.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func (nt *NodeTree) addNode(n *v1.Node) {
|
||||
nt.zones = append(nt.zones, zone)
|
||||
nt.tree[zone] = &nodeArray{nodes: []string{n.Name}, lastIndex: 0}
|
||||
}
|
||||
glog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone)
|
||||
klog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone)
|
||||
nt.NumNodes++
|
||||
}
|
||||
|
||||
@@ -110,13 +110,13 @@ func (nt *NodeTree) removeNode(n *v1.Node) error {
|
||||
if len(na.nodes) == 0 {
|
||||
nt.removeZone(zone)
|
||||
}
|
||||
glog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone)
|
||||
klog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone)
|
||||
nt.NumNodes--
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.Errorf("Node %v in group %v was not found", n.Name, zone)
|
||||
klog.Errorf("Node %v in group %v was not found", n.Name, zone)
|
||||
return fmt.Errorf("node %v in group %v was not found", n.Name, zone)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -219,7 +219,7 @@ func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) {
|
||||
if len(nnn) > 0 {
|
||||
for _, np := range p.nominatedPods[nnn] {
|
||||
if np.UID == pod.UID {
|
||||
glog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name)
|
||||
klog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -258,10 +258,10 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error {
|
||||
defer p.lock.Unlock()
|
||||
err := p.activeQ.Add(pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
if p.unschedulableQ.get(pod) != nil {
|
||||
glog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name)
|
||||
klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name)
|
||||
p.deleteNominatedPodIfExists(pod)
|
||||
p.unschedulableQ.delete(pod)
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error {
|
||||
}
|
||||
err := p.activeQ.Add(pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
p.addNominatedPodIfNeeded(pod)
|
||||
p.cond.Broadcast()
|
||||
@@ -433,7 +433,7 @@ func (p *PriorityQueue) MoveAllToActiveQueue() {
|
||||
defer p.lock.Unlock()
|
||||
for _, pod := range p.unschedulableQ.pods {
|
||||
if err := p.activeQ.Add(pod); err != nil {
|
||||
glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
p.unschedulableQ.clear()
|
||||
@@ -448,7 +448,7 @@ func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) {
|
||||
if err := p.activeQ.Add(pod); err == nil {
|
||||
p.unschedulableQ.delete(pod)
|
||||
} else {
|
||||
glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}
|
||||
p.receivedMoveRequest = true
|
||||
@@ -469,7 +469,7 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(up, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting label selectors for pod: %v.", up.Name)
|
||||
klog.Errorf("Error getting label selectors for pod: %v.", up.Name)
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
||||
podsToMove = append(podsToMove, up)
|
||||
|
||||
@@ -45,7 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -295,20 +295,20 @@ func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) {
|
||||
// It returns the node name and an error if any.
|
||||
func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) {
|
||||
if !util.PodPriorityEnabled() || sched.config.DisablePreemption {
|
||||
glog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." +
|
||||
klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." +
|
||||
" No preemption is performed.")
|
||||
return "", nil
|
||||
}
|
||||
preemptor, err := sched.config.PodPreemptor.GetUpdatedPod(preemptor)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting the updated preemptor pod object: %v", err)
|
||||
klog.Errorf("Error getting the updated preemptor pod object: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr)
|
||||
metrics.PreemptionVictims.Set(float64(len(victims)))
|
||||
if err != nil {
|
||||
glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name)
|
||||
klog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name)
|
||||
return "", err
|
||||
}
|
||||
var nodeName = ""
|
||||
@@ -316,12 +316,12 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e
|
||||
nodeName = node.Name
|
||||
err = sched.config.PodPreemptor.SetNominatedNodeName(preemptor, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err)
|
||||
klog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err)
|
||||
return "", err
|
||||
}
|
||||
for _, victim := range victims {
|
||||
if err := sched.config.PodPreemptor.DeletePod(victim); err != nil {
|
||||
glog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
||||
klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
||||
return "", err
|
||||
}
|
||||
sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName)
|
||||
@@ -334,7 +334,7 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e
|
||||
for _, p := range nominatedPodsToClear {
|
||||
rErr := sched.config.PodPreemptor.RemoveNominatedNodeName(p)
|
||||
if rErr != nil {
|
||||
glog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr)
|
||||
klog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr)
|
||||
// We do not return as this error is not critical.
|
||||
}
|
||||
}
|
||||
@@ -376,14 +376,14 @@ func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error {
|
||||
var reason string
|
||||
var eventType string
|
||||
|
||||
glog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name)
|
||||
klog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name)
|
||||
err := sched.config.VolumeBinder.Binder.BindPodVolumes(assumed)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\": %v", assumed.Namespace, assumed.Name, err)
|
||||
klog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\": %v", assumed.Namespace, assumed.Name, err)
|
||||
|
||||
// Unassume the Pod and retry scheduling
|
||||
if forgetErr := sched.config.SchedulerCache.ForgetPod(assumed); forgetErr != nil {
|
||||
glog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr)
|
||||
klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr)
|
||||
}
|
||||
|
||||
reason = "VolumeBindingFailed"
|
||||
@@ -398,7 +398,7 @@ func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Success binding volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name)
|
||||
klog.V(5).Infof("Success binding volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -416,7 +416,7 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error {
|
||||
// snapshotted before updates are written, we would update equivalence
|
||||
// cache with stale information which is based on snapshot of old cache.
|
||||
if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil {
|
||||
glog.Errorf("scheduler cache AssumePod failed: %v", err)
|
||||
klog.Errorf("scheduler cache AssumePod failed: %v", err)
|
||||
|
||||
// This is most probably result of a BUG in retrying logic.
|
||||
// We report an error here so that pod scheduling can be retried.
|
||||
@@ -451,12 +451,12 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error {
|
||||
// it's atomic with setting host.
|
||||
err := sched.config.GetBinder(assumed).Bind(b)
|
||||
if finErr := sched.config.SchedulerCache.FinishBinding(assumed); finErr != nil {
|
||||
glog.Errorf("scheduler cache FinishBinding failed: %v", finErr)
|
||||
klog.Errorf("scheduler cache FinishBinding failed: %v", finErr)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name)
|
||||
klog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name)
|
||||
if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil {
|
||||
glog.Errorf("scheduler cache ForgetPod failed: %v", err)
|
||||
klog.Errorf("scheduler cache ForgetPod failed: %v", err)
|
||||
}
|
||||
sched.config.Error(assumed, err)
|
||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err)
|
||||
@@ -483,11 +483,11 @@ func (sched *Scheduler) scheduleOne() {
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||
glog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||
klog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name)
|
||||
klog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name)
|
||||
|
||||
// Synchronously attempt to find a fit for the pod.
|
||||
start := time.Now()
|
||||
@@ -508,7 +508,7 @@ func (sched *Scheduler) scheduleOne() {
|
||||
// schedule it. (hopefully)
|
||||
metrics.PodScheduleFailures.Inc()
|
||||
} else {
|
||||
glog.Errorf("error selecting node for pod: %v", err)
|
||||
klog.Errorf("error selecting node for pod: %v", err)
|
||||
metrics.PodScheduleErrors.Inc()
|
||||
}
|
||||
return
|
||||
@@ -527,7 +527,7 @@ func (sched *Scheduler) scheduleOne() {
|
||||
// This function modifies 'assumedPod' if volume binding is required.
|
||||
allBound, err := sched.assumeVolumes(assumedPod, suggestedHost)
|
||||
if err != nil {
|
||||
glog.Errorf("error assuming volumes: %v", err)
|
||||
klog.Errorf("error assuming volumes: %v", err)
|
||||
metrics.PodScheduleErrors.Inc()
|
||||
return
|
||||
}
|
||||
@@ -535,7 +535,7 @@ func (sched *Scheduler) scheduleOne() {
|
||||
// assume modifies `assumedPod` by setting NodeName=suggestedHost
|
||||
err = sched.assume(assumedPod, suggestedHost)
|
||||
if err != nil {
|
||||
glog.Errorf("error assuming pod: %v", err)
|
||||
klog.Errorf("error assuming pod: %v", err)
|
||||
metrics.PodScheduleErrors.Inc()
|
||||
return
|
||||
}
|
||||
@@ -545,7 +545,7 @@ func (sched *Scheduler) scheduleOne() {
|
||||
if !allBound {
|
||||
err := sched.bindVolumes(assumedPod)
|
||||
if err != nil {
|
||||
glog.Errorf("error binding volumes: %v", err)
|
||||
klog.Errorf("error binding volumes: %v", err)
|
||||
metrics.PodScheduleErrors.Inc()
|
||||
return
|
||||
}
|
||||
@@ -560,7 +560,7 @@ func (sched *Scheduler) scheduleOne() {
|
||||
})
|
||||
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
if err != nil {
|
||||
glog.Errorf("error binding pod: %v", err)
|
||||
klog.Errorf("error binding pod: %v", err)
|
||||
metrics.PodScheduleErrors.Inc()
|
||||
} else {
|
||||
metrics.PodScheduleSuccesses.Inc()
|
||||
|
||||
@@ -34,7 +34,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
ktypes "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type clock interface {
|
||||
@@ -76,7 +76,7 @@ func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration {
|
||||
newDuration = maxDuration
|
||||
}
|
||||
b.backoff = newDuration
|
||||
glog.V(4).Infof("Backing off %s", duration.String())
|
||||
klog.V(4).Infof("Backing off %s", duration.String())
|
||||
return duration
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user