mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-24 18:35:10 +00:00
separated from the apiserver running locally on the master node so that it can be optionally enabled or disabled as needed. Also, fix the healthchecking configuration for the master components, which was previously only working by coincidence: If a kubelet doesn't register with a master, it never bothers to figure out what its local address is. In which case it ends up constructing a URL like http://:8080/healthz for the http probe. This happens to work on the master because all of the pods are using host networking and explicitly binding to 127.0.0.1. Once the kubelet is registered with the master and it determines the local node address, it tries to healthcheck on an address where the pod isn't listening and the kubelet periodically restarts each master component when the liveness probe fails.
82 lines
2.1 KiB
Plaintext
82 lines
2.1 KiB
Plaintext
{% if pillar.get('is_systemd') %}
|
|
{% set environment_file = '/etc/sysconfig/kubelet' %}
|
|
{% else %}
|
|
{% set environment_file = '/etc/default/kubelet' %}
|
|
{% endif %}
|
|
|
|
{{ environment_file}}:
|
|
file.managed:
|
|
- source: salt://kubelet/default
|
|
- template: jinja
|
|
- user: root
|
|
- group: root
|
|
- mode: 644
|
|
|
|
/usr/local/bin/kubelet:
|
|
file.managed:
|
|
- source: salt://kube-bins/kubelet
|
|
- user: root
|
|
- group: root
|
|
- mode: 755
|
|
|
|
# The default here is that this file is blank. If this is the case, the kubelet
|
|
# won't be able to parse it as JSON and it will not be able to publish events
|
|
# to the apiserver. You'll see a single error line in the kubelet start up file
|
|
# about this.
|
|
/var/lib/kubelet/kubeconfig:
|
|
file.managed:
|
|
- source: salt://kubelet/kubeconfig
|
|
- user: root
|
|
- group: root
|
|
- mode: 400
|
|
- makedirs: true
|
|
|
|
|
|
{% if pillar.get('is_systemd') %}
|
|
|
|
{{ pillar.get('systemd_system_path') }}/kubelet.service:
|
|
file.managed:
|
|
- source: salt://kubelet/kubelet.service
|
|
- user: root
|
|
- group: root
|
|
|
|
# The service.running block below doesn't work reliably
|
|
# Instead we run our script which e.g. does a systemd daemon-reload
|
|
# But we keep the service block below, so it can be used by dependencies
|
|
# TODO: Fix this
|
|
fix-service-kubelet:
|
|
cmd.wait:
|
|
- name: /opt/kubernetes/helpers/services bounce kubelet
|
|
- watch:
|
|
- file: /usr/local/bin/kubelet
|
|
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
|
- file: {{ environment_file }}
|
|
- file: /var/lib/kubelet/kubeconfig
|
|
|
|
{% else %}
|
|
|
|
/etc/init.d/kubelet:
|
|
file.managed:
|
|
- source: salt://kubelet/initd
|
|
- user: root
|
|
- group: root
|
|
- mode: 755
|
|
|
|
{% endif %}
|
|
|
|
kubelet:
|
|
service.running:
|
|
- enable: True
|
|
- watch:
|
|
- file: /usr/local/bin/kubelet
|
|
{% if pillar.get('is_systemd') %}
|
|
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
|
{% else %}
|
|
- file: /etc/init.d/kubelet
|
|
{% endif %}
|
|
{% if grains['os_family'] == 'RedHat' %}
|
|
- file: /usr/lib/systemd/system/kubelet.service
|
|
{% endif %}
|
|
- file: {{ environment_file }}
|
|
- file: /var/lib/kubelet/kubeconfig
|