mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Revert "Fix the race between configuring cbr0 and restarting static pods"
This commit is contained in:
		@@ -2,5 +2,5 @@ DOCKER_OPTS=""
 | 
			
		||||
{% if grains.docker_opts is defined and grains.docker_opts %}
 | 
			
		||||
DOCKER_OPTS="${DOCKER_OPTS} {{grains.docker_opts}}"
 | 
			
		||||
{% endif %}
 | 
			
		||||
DOCKER_OPTS="${DOCKER_OPTS} --bridge=cbr0 --iptables=false --ip-masq=false"
 | 
			
		||||
DOCKER_OPTS="${DOCKER_OPTS} --bridge cbr0 --iptables=false --ip-masq=false"
 | 
			
		||||
DOCKER_NOFILE=1000000
 | 
			
		||||
 
 | 
			
		||||
@@ -48,6 +48,11 @@ net.ipv4.ip_forward:
 | 
			
		||||
  sysctl.present:
 | 
			
		||||
    - value: 1
 | 
			
		||||
 | 
			
		||||
cbr0:
 | 
			
		||||
  container_bridge.ensure:
 | 
			
		||||
    - cidr: {{ grains['cbr-cidr'] }}
 | 
			
		||||
    - mtu: 1460
 | 
			
		||||
 | 
			
		||||
{{ environment_file }}:
 | 
			
		||||
  file.managed:
 | 
			
		||||
    - source: salt://docker/docker-defaults
 | 
			
		||||
@@ -119,6 +124,7 @@ docker:
 | 
			
		||||
    - enable: True
 | 
			
		||||
    - watch:
 | 
			
		||||
      - file: {{ environment_file }}
 | 
			
		||||
      - container_bridge: cbr0
 | 
			
		||||
{% if override_docker_ver != '' %}
 | 
			
		||||
    - require:
 | 
			
		||||
      - pkg: lxc-docker-{{ override_docker_ver }}
 | 
			
		||||
 
 | 
			
		||||
@@ -37,26 +37,10 @@ master-docker-image-tags:
 | 
			
		||||
  file.touch:
 | 
			
		||||
    - name: /srv/pillar/docker-images.sls
 | 
			
		||||
 | 
			
		||||
# Current containervm image by default has both docker and kubelet
 | 
			
		||||
# running. But during cluster creation stage, docker and kubelet
 | 
			
		||||
# could be overwritten completely, or restarted due to flag changes.
 | 
			
		||||
# The ordering of salt states for service docker, kubelet and
 | 
			
		||||
# master-addon below is very important to avoid the race between
 | 
			
		||||
# salt restart docker or kubelet and kubelet start master components.
 | 
			
		||||
# Without the ordering of salt states, when gce instance boot up,
 | 
			
		||||
# configure-vm.sh will run and download the release. At the end of
 | 
			
		||||
# boot, run-salt will run kube-master-addons service which installs
 | 
			
		||||
# master component manifest files to kubelet config directory before
 | 
			
		||||
# the installation of proper version kubelet. Please see
 | 
			
		||||
# https://github.com/GoogleCloudPlatform/kubernetes/issues/10122#issuecomment-114566063
 | 
			
		||||
# for detail explanation on this very issue.
 | 
			
		||||
kube-master-addons:
 | 
			
		||||
  service.running:
 | 
			
		||||
    - enable: True
 | 
			
		||||
    - restart: True
 | 
			
		||||
    - require:
 | 
			
		||||
      - service: docker
 | 
			
		||||
      - service: kubelet
 | 
			
		||||
    - watch:
 | 
			
		||||
      - file: master-docker-image-tags
 | 
			
		||||
      - file: /etc/kubernetes/kube-master-addons.sh
 | 
			
		||||
 
 | 
			
		||||
@@ -76,9 +76,4 @@
 | 
			
		||||
  {% set cgroup_root = "--cgroup_root=/" -%}
 | 
			
		||||
{% endif -%}
 | 
			
		||||
 | 
			
		||||
{% set pod_cidr = "" %}
 | 
			
		||||
{% if grains['roles'][0] == 'kubernetes-master' %}
 | 
			
		||||
  {% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
 | 
			
		||||
{% endif %} 
 | 
			
		||||
 | 
			
		||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}}"
 | 
			
		||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}}"
 | 
			
		||||
 
 | 
			
		||||
@@ -115,7 +115,6 @@ type KubeletServer struct {
 | 
			
		||||
	DockerDaemonContainer          string
 | 
			
		||||
	SystemContainer                string
 | 
			
		||||
	ConfigureCBR0                  bool
 | 
			
		||||
	PodCIDR                        string
 | 
			
		||||
	MaxPods                        int
 | 
			
		||||
	DockerExecHandlerName          string
 | 
			
		||||
 | 
			
		||||
@@ -242,7 +241,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
 | 
			
		||||
	fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
 | 
			
		||||
	fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
 | 
			
		||||
	fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
 | 
			
		||||
	fs.StringVar(&s.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode.  In cluster mode, this is obtained from the master.")
 | 
			
		||||
 | 
			
		||||
	// Flags intended for testing, not recommended used in production environments.
 | 
			
		||||
	fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
 | 
			
		||||
	fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]")
 | 
			
		||||
@@ -362,7 +361,6 @@ func (s *KubeletServer) Run(_ []string) error {
 | 
			
		||||
		DockerDaemonContainer:     s.DockerDaemonContainer,
 | 
			
		||||
		SystemContainer:           s.SystemContainer,
 | 
			
		||||
		ConfigureCBR0:             s.ConfigureCBR0,
 | 
			
		||||
		PodCIDR:                   s.PodCIDR,
 | 
			
		||||
		MaxPods:                   s.MaxPods,
 | 
			
		||||
		DockerExecHandler:         dockerExecHandler,
 | 
			
		||||
	}
 | 
			
		||||
@@ -716,7 +714,6 @@ type KubeletConfig struct {
 | 
			
		||||
	DockerDaemonContainer          string
 | 
			
		||||
	SystemContainer                string
 | 
			
		||||
	ConfigureCBR0                  bool
 | 
			
		||||
	PodCIDR                        string
 | 
			
		||||
	MaxPods                        int
 | 
			
		||||
	DockerExecHandler              dockertools.ExecHandler
 | 
			
		||||
}
 | 
			
		||||
@@ -774,7 +771,6 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
 | 
			
		||||
		kc.DockerDaemonContainer,
 | 
			
		||||
		kc.SystemContainer,
 | 
			
		||||
		kc.ConfigureCBR0,
 | 
			
		||||
		kc.PodCIDR,
 | 
			
		||||
		kc.MaxPods,
 | 
			
		||||
		kc.DockerExecHandler)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -354,7 +354,6 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
 | 
			
		||||
		kc.DockerDaemonContainer,
 | 
			
		||||
		kc.SystemContainer,
 | 
			
		||||
		kc.ConfigureCBR0,
 | 
			
		||||
		kc.PodCIDR,
 | 
			
		||||
		kc.MaxPods,
 | 
			
		||||
		kc.DockerExecHandler,
 | 
			
		||||
	)
 | 
			
		||||
 
 | 
			
		||||
@@ -19,17 +19,27 @@ package kubelet
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"net"
 | 
			
		||||
	"os"
 | 
			
		||||
	"os/exec"
 | 
			
		||||
	"regexp"
 | 
			
		||||
 | 
			
		||||
	"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
 | 
			
		||||
	"github.com/golang/glog"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var cidrRegexp = regexp.MustCompile(`inet ([0-9a-fA-F.:]*/[0-9]*)`)
 | 
			
		||||
 | 
			
		||||
func createCBR0(wantCIDR *net.IPNet) error {
 | 
			
		||||
func ensureCbr0(wantCIDR *net.IPNet) error {
 | 
			
		||||
	if !cbr0CidrCorrect(wantCIDR) {
 | 
			
		||||
		glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR)
 | 
			
		||||
 | 
			
		||||
		// delete cbr0
 | 
			
		||||
		if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		if err := exec.Command("brctl", "delbr", "cbr0").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		// recreate cbr0 with wantCIDR
 | 
			
		||||
		if err := exec.Command("brctl", "addbr", "cbr0").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
@@ -44,60 +54,16 @@ func createCBR0(wantCIDR *net.IPNet) error {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		// restart docker
 | 
			
		||||
	// For now just log the error. The containerRuntime check will catch docker failures.
 | 
			
		||||
	// TODO (dawnchen) figure out what we should do for rkt here.
 | 
			
		||||
	if util.UsingSystemdInitSystem() {
 | 
			
		||||
		if err := exec.Command("systemctl", "restart", "docker").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if err := exec.Command("service", "docker", "restart").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
		}
 | 
			
		||||
			// For now just log the error. The containerRuntime check will catch docker failures.
 | 
			
		||||
			// TODO (dawnchen) figure out what we should do for rkt here.
 | 
			
		||||
		}
 | 
			
		||||
		glog.V(2).Info("Recreated cbr0 and restarted docker")
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ensureCbr0(wantCIDR *net.IPNet) error {
 | 
			
		||||
	exists, err := cbr0Exists()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if !exists {
 | 
			
		||||
		glog.V(2).Infof("CBR0 doesn't exist, attempting to create it with range: %s", wantCIDR)
 | 
			
		||||
		return createCBR0(wantCIDR)
 | 
			
		||||
	}
 | 
			
		||||
	if !cbr0CidrCorrect(wantCIDR) {
 | 
			
		||||
		glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR)
 | 
			
		||||
 | 
			
		||||
		// delete cbr0
 | 
			
		||||
		if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		if err := exec.Command("brctl", "delbr", "cbr0").Run(); err != nil {
 | 
			
		||||
			glog.Error(err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		return createCBR0(wantCIDR)
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Check if cbr0 network interface is configured or not, and take action
 | 
			
		||||
// when the configuration is missing on the node, and propagate the rest
 | 
			
		||||
// error to kubelet to handle.
 | 
			
		||||
func cbr0Exists() (bool, error) {
 | 
			
		||||
	if _, err := os.Stat("/sys/class/net/cbr0"); err != nil {
 | 
			
		||||
		if os.IsNotExist(err) {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
 | 
			
		||||
	output, err := exec.Command("ip", "addr", "show", "cbr0").Output()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -113,7 +79,6 @@ func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	cbr0CIDR.IP = cbr0IP
 | 
			
		||||
 | 
			
		||||
	glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR)
 | 
			
		||||
	return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -147,7 +147,6 @@ func NewMainKubelet(
 | 
			
		||||
	dockerDaemonContainer string,
 | 
			
		||||
	systemContainer string,
 | 
			
		||||
	configureCBR0 bool,
 | 
			
		||||
	podCIDR string,
 | 
			
		||||
	pods int,
 | 
			
		||||
	dockerExecHandler dockertools.ExecHandler) (*Kubelet, error) {
 | 
			
		||||
	if rootDirectory == "" {
 | 
			
		||||
@@ -262,7 +261,6 @@ func NewMainKubelet(
 | 
			
		||||
		cgroupRoot:                     cgroupRoot,
 | 
			
		||||
		mounter:                        mounter,
 | 
			
		||||
		configureCBR0:                  configureCBR0,
 | 
			
		||||
		podCIDR:                        podCIDR,
 | 
			
		||||
		pods:                           pods,
 | 
			
		||||
		syncLoopMonitor:                util.AtomicValue{},
 | 
			
		||||
	}
 | 
			
		||||
@@ -320,10 +318,6 @@ func NewMainKubelet(
 | 
			
		||||
	}
 | 
			
		||||
	klet.containerManager = containerManager
 | 
			
		||||
 | 
			
		||||
	// Start syncing node status immediately, this may set up things the runtime needs to run.
 | 
			
		||||
	go util.Until(klet.syncNetworkStatus, 30*time.Second, util.NeverStop)
 | 
			
		||||
	go klet.syncNodeStatus()
 | 
			
		||||
 | 
			
		||||
	// Wait for the runtime to be up with a timeout.
 | 
			
		||||
	if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err)
 | 
			
		||||
@@ -418,10 +412,6 @@ type Kubelet struct {
 | 
			
		||||
	runtimeUpThreshold     time.Duration
 | 
			
		||||
	lastTimestampRuntimeUp time.Time
 | 
			
		||||
 | 
			
		||||
	// Network Status information
 | 
			
		||||
	networkConfigMutex sync.Mutex
 | 
			
		||||
	networkConfigured  bool
 | 
			
		||||
 | 
			
		||||
	// Volume plugins.
 | 
			
		||||
	volumePluginMgr volume.VolumePluginMgr
 | 
			
		||||
 | 
			
		||||
@@ -499,7 +489,6 @@ type Kubelet struct {
 | 
			
		||||
	// Whether or not kubelet should take responsibility for keeping cbr0 in
 | 
			
		||||
	// the correct state.
 | 
			
		||||
	configureCBR0 bool
 | 
			
		||||
	podCIDR       string
 | 
			
		||||
 | 
			
		||||
	// Number of Pods which can be run by this Kubelet
 | 
			
		||||
	pods int
 | 
			
		||||
@@ -718,7 +707,7 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop)
 | 
			
		||||
 | 
			
		||||
	go kl.syncNodeStatus()
 | 
			
		||||
	// Run the system oom watcher forever.
 | 
			
		||||
	kl.statusManager.Start()
 | 
			
		||||
	kl.syncLoop(updates, kl)
 | 
			
		||||
@@ -1716,11 +1705,6 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandl
 | 
			
		||||
		glog.Infof("Skipping pod synchronization, container runtime is not up.")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	if !kl.doneNetworkConfigure() {
 | 
			
		||||
		time.Sleep(5 * time.Second)
 | 
			
		||||
		glog.Infof("Skipping pod synchronization, network is not configured")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	unsyncedPod := false
 | 
			
		||||
	podSyncTypes := make(map[types.UID]SyncPodType)
 | 
			
		||||
	select {
 | 
			
		||||
@@ -1877,7 +1861,6 @@ func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
 | 
			
		||||
		glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
 | 
			
		||||
	_, cidr, err := net.ParseCIDR(podCIDR)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
@@ -1912,22 +1895,6 @@ func (kl *Kubelet) recordNodeStatusEvent(event string) {
 | 
			
		||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
 | 
			
		||||
var oldNodeUnschedulable bool
 | 
			
		||||
 | 
			
		||||
func (kl *Kubelet) syncNetworkStatus() {
 | 
			
		||||
	kl.networkConfigMutex.Lock()
 | 
			
		||||
	defer kl.networkConfigMutex.Unlock()
 | 
			
		||||
 | 
			
		||||
	networkConfigured := true
 | 
			
		||||
	if kl.configureCBR0 {
 | 
			
		||||
		if len(kl.podCIDR) == 0 {
 | 
			
		||||
			networkConfigured = false
 | 
			
		||||
		} else if err := kl.reconcileCBR0(kl.podCIDR); err != nil {
 | 
			
		||||
			networkConfigured = false
 | 
			
		||||
			glog.Errorf("Error configuring cbr0: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	kl.networkConfigured = networkConfigured
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
 | 
			
		||||
// any fields that are currently set.
 | 
			
		||||
func (kl *Kubelet) setNodeStatus(node *api.Node) error {
 | 
			
		||||
@@ -1961,6 +1928,16 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	networkConfigured := true
 | 
			
		||||
	if kl.configureCBR0 {
 | 
			
		||||
		if len(node.Spec.PodCIDR) == 0 {
 | 
			
		||||
			networkConfigured = false
 | 
			
		||||
		} else if err := kl.reconcileCBR0(node.Spec.PodCIDR); err != nil {
 | 
			
		||||
			networkConfigured = false
 | 
			
		||||
			glog.Errorf("Error configuring cbr0: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
 | 
			
		||||
	// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
 | 
			
		||||
	info, err := kl.GetCachedMachineInfo()
 | 
			
		||||
@@ -2004,8 +1981,6 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
 | 
			
		||||
 | 
			
		||||
	// Check whether container runtime can be reported as up.
 | 
			
		||||
	containerRuntimeUp := kl.containerRuntimeUp()
 | 
			
		||||
	// Check whether network is configured properly
 | 
			
		||||
	networkConfigured := kl.doneNetworkConfigure()
 | 
			
		||||
 | 
			
		||||
	currentTime := util.Now()
 | 
			
		||||
	var newNodeReadyCondition api.NodeCondition
 | 
			
		||||
@@ -2074,12 +2049,6 @@ func (kl *Kubelet) containerRuntimeUp() bool {
 | 
			
		||||
	return kl.lastTimestampRuntimeUp.Add(kl.runtimeUpThreshold).After(time.Now())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (kl *Kubelet) doneNetworkConfigure() bool {
 | 
			
		||||
	kl.networkConfigMutex.Lock()
 | 
			
		||||
	defer kl.networkConfigMutex.Unlock()
 | 
			
		||||
	return kl.networkConfigured
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
 | 
			
		||||
// is set, this function will also confirm that cbr0 is configured correctly.
 | 
			
		||||
func (kl *Kubelet) tryUpdateNodeStatus() error {
 | 
			
		||||
@@ -2090,8 +2059,6 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
 | 
			
		||||
	if node == nil {
 | 
			
		||||
		return fmt.Errorf("no node instance returned for %q", kl.nodeName)
 | 
			
		||||
	}
 | 
			
		||||
	kl.podCIDR = node.Spec.PodCIDR
 | 
			
		||||
 | 
			
		||||
	if err := kl.setNodeStatus(node); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -127,7 +127,6 @@ func newTestKubelet(t *testing.T) *TestKubelet {
 | 
			
		||||
	}
 | 
			
		||||
	kubelet.volumeManager = newVolumeManager()
 | 
			
		||||
	kubelet.containerManager, _ = newContainerManager(mockCadvisor, "", "", "")
 | 
			
		||||
	kubelet.networkConfigured = true
 | 
			
		||||
	return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -17,7 +17,6 @@ limitations under the License.
 | 
			
		||||
package kubelet
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"sort"
 | 
			
		||||
@@ -143,9 +142,6 @@ func (s *statusManager) RemoveOrphanedStatuses(podFullNames map[string]bool) {
 | 
			
		||||
 | 
			
		||||
// syncBatch syncs pods statuses with the apiserver.
 | 
			
		||||
func (s *statusManager) syncBatch() error {
 | 
			
		||||
	if s.kubeClient == nil {
 | 
			
		||||
		return errors.New("Kubernetes client is nil, skipping pod status updates")
 | 
			
		||||
	}
 | 
			
		||||
	syncRequest := <-s.podStatusChannel
 | 
			
		||||
	pod := syncRequest.pod
 | 
			
		||||
	podFullName := kubecontainer.GetPodFullName(pod)
 | 
			
		||||
 
 | 
			
		||||
@@ -198,20 +198,6 @@ func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
 | 
			
		||||
	return regexps, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Detects if using systemd as the init system
 | 
			
		||||
// Please note that simply reading /proc/1/cmdline can be misleading because
 | 
			
		||||
// some installation of various init programs can automatically make /sbin/init
 | 
			
		||||
// a symlink or even a renamed version of their main program.
 | 
			
		||||
// TODO(dchen1107): realiably detects the init system using on the system:
 | 
			
		||||
// systemd, upstart, initd, etc.
 | 
			
		||||
func UsingSystemdInitSystem() bool {
 | 
			
		||||
	if _, err := os.Stat("/run/systemd/system"); err != nil {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
 | 
			
		||||
func ApplyOomScoreAdj(pid int, value int) error {
 | 
			
		||||
	if value < -1000 || value > 1000 {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user