mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 19:28:16 +00:00
Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -29,10 +29,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -180,11 +180,11 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
|
||||
// CPU cgroup is required and so it expected to be mounted at this point.
|
||||
periodExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us"))
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err)
|
||||
klog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err)
|
||||
}
|
||||
quotaExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us"))
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err)
|
||||
klog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err)
|
||||
}
|
||||
if quotaExists && periodExists {
|
||||
f.cpuHardcapping = true
|
||||
@@ -244,12 +244,12 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
if !cgroupManager.Exists(cgroupRoot) {
|
||||
return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err)
|
||||
}
|
||||
glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot)
|
||||
klog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot)
|
||||
// Include the top level cgroup for enforcing node allocatable into cgroup-root.
|
||||
// This way, all sub modules can avoid having to understand the concept of node allocatable.
|
||||
cgroupRoot = NewCgroupName(cgroupRoot, defaultNodeAllocatableCgroupName)
|
||||
}
|
||||
glog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig)
|
||||
klog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig)
|
||||
|
||||
qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager)
|
||||
if err != nil {
|
||||
@@ -268,7 +268,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
qosContainerManager: qosContainerManager,
|
||||
}
|
||||
|
||||
glog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
|
||||
klog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
|
||||
if devicePluginEnabled {
|
||||
cm.deviceManager, err = devicemanager.NewManagerImpl()
|
||||
} else {
|
||||
@@ -288,7 +288,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
nodeConfig.KubeletRootDir,
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to initialize cpu manager: %v", err)
|
||||
klog.Errorf("failed to initialize cpu manager: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -370,9 +370,9 @@ func setupKernelTunables(option KernelTunableBehavior) error {
|
||||
case KernelTunableError:
|
||||
errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
|
||||
case KernelTunableWarn:
|
||||
glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
case KernelTunableModify:
|
||||
glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
klog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
|
||||
err = sysctl.SetSysctl(flag, expectedValue)
|
||||
if err != nil {
|
||||
errList = append(errList, err)
|
||||
@@ -424,13 +424,13 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
|
||||
// the cgroup for docker and serve stats for the runtime.
|
||||
// TODO(#27097): Fix this after NodeSpec is clearly defined.
|
||||
cm.periodicTasks = append(cm.periodicTasks, func() {
|
||||
glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration")
|
||||
klog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration")
|
||||
cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont)
|
||||
klog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont)
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
cm.RuntimeCgroupsName = cont
|
||||
@@ -467,12 +467,12 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
|
||||
} else {
|
||||
cm.periodicTasks = append(cm.periodicTasks, func() {
|
||||
if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
cont, err := getContainer(os.Getpid())
|
||||
if err != nil {
|
||||
glog.Errorf("failed to find cgroups of kubelet - %v", err)
|
||||
klog.Errorf("failed to find cgroups of kubelet - %v", err)
|
||||
return
|
||||
}
|
||||
cm.Lock()
|
||||
@@ -579,7 +579,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
|
||||
for _, cont := range cm.systemContainers {
|
||||
if cont.ensureStateFunc != nil {
|
||||
if err := cont.ensureStateFunc(cont.manager); err != nil {
|
||||
glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
|
||||
klog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -652,12 +652,12 @@ func isProcessRunningInHost(pid int) (bool, error) {
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to find pid namespace of init process")
|
||||
}
|
||||
glog.V(10).Infof("init pid ns is %q", initPidNs)
|
||||
klog.V(10).Infof("init pid ns is %q", initPidNs)
|
||||
processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to find pid namespace of process %q", pid)
|
||||
}
|
||||
glog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs)
|
||||
klog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs)
|
||||
return initPidNs == processPidNs, nil
|
||||
}
|
||||
|
||||
@@ -699,7 +699,7 @@ func getPidsForProcess(name, pidFile string) ([]int, error) {
|
||||
|
||||
// Return error from getPidFromPidFile since that should have worked
|
||||
// and is the real source of the problem.
|
||||
glog.V(4).Infof("unable to get pid from %s: %v", pidFile, err)
|
||||
klog.V(4).Infof("unable to get pid from %s: %v", pidFile, err)
|
||||
return []int{}, err
|
||||
}
|
||||
|
||||
@@ -737,7 +737,7 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.
|
||||
return err
|
||||
} else if !runningInHost {
|
||||
// Process is running inside a container. Don't touch that.
|
||||
glog.V(2).Infof("pid %d is not running in the host namespaces", pid)
|
||||
klog.V(2).Infof("pid %d is not running in the host namespaces", pid)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -758,9 +758,9 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.
|
||||
|
||||
// Also apply oom-score-adj to processes
|
||||
oomAdjuster := oom.NewOOMAdjuster()
|
||||
glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
|
||||
klog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
|
||||
if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
|
||||
glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
|
||||
klog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
|
||||
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err))
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
@@ -800,10 +800,10 @@ func getContainer(pid int) (string, error) {
|
||||
// in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally).
|
||||
if systemd, found := cgs["name=systemd"]; found {
|
||||
if systemd != cpu {
|
||||
glog.Warningf("CPUAccounting not enabled for pid: %d", pid)
|
||||
klog.Warningf("CPUAccounting not enabled for pid: %d", pid)
|
||||
}
|
||||
if systemd != memory {
|
||||
glog.Warningf("MemoryAccounting not enabled for pid: %d", pid)
|
||||
klog.Warningf("MemoryAccounting not enabled for pid: %d", pid)
|
||||
}
|
||||
return systemd, nil
|
||||
}
|
||||
@@ -841,14 +841,14 @@ func ensureSystemCgroups(rootCgroupPath string, manager *fs.Manager) error {
|
||||
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids))
|
||||
klog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids))
|
||||
|
||||
// Check if we have moved all the non-kernel PIDs.
|
||||
if len(pids) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
glog.Infof("Moving non-kernel processes: %v", pids)
|
||||
klog.Infof("Moving non-kernel processes: %v", pids)
|
||||
for _, pid := range pids {
|
||||
err := manager.Apply(pid)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user