mirror of
https://github.com/outbackdingo/matchbox.git
synced 2026-01-27 10:19:35 +00:00
examples/bootkube: Update self-hosted Kubernetes deployment
* scp kubeconfig to hosts rather than insecurely distributing credentials within Ignition configs. This is also easier than copy-pasting k8s secrets into machine metadata (slow). * Self-hosted Kubernetes machine configurations can be versioned without containing Kubernetes credentials * Use path-based activiation for the host kubelet * Update from Kubernetes v1.2.2 to v1.3.0-alpha.5_coreos.0. * Update host kubelet flags accordingly
This commit is contained in:
@@ -5,7 +5,7 @@ The self-hosted Kubernetes examples provision a 3 node cluster with etcd, flanne
|
||||
|
||||
## Experimental
|
||||
|
||||
Self-hosted Kubernetes is under very active development by CoreOS. We're working on upstreaming the required Hyperkube patches. Be aware that a deployment with a single apiserver cannot tolerate its failure without intervention. We'll be improving this to allow CoreOS auto-updates.
|
||||
Self-hosted Kubernetes is under very active development by CoreOS. We're working on upstreaming the required Hyperkube patches. Be aware that a deployment with a single apiserver cannot tolerate its failure. We'll be improving this to allow CoreOS auto-updates.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -34,14 +34,11 @@ Use the `bootkube` tool to render Kubernetes manifests and credentials into an `
|
||||
|
||||
bootkube render --asset-dir=assets --api-servers=https://172.15.0.21:443 --etcd-servers=http://172.15.0.21:2379 --api-server-alt-names=IP=172.15.0.21
|
||||
|
||||
Copy the `certificate-authority-data`, `client-certificate-data`, and `client-key-data` from the generated `assets/auth/kubeconfig` file into each master group definition (e.g. under `examples/groups/bootkube` or `examples/groups/bootkube-install`). Also, add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
{
|
||||
"profile": "bootkube-worker",
|
||||
"metadata": {
|
||||
"k8s_certificate_authority": "...",
|
||||
"k8s_client_certificate": "...",
|
||||
"k8s_client_key": "...",
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
}
|
||||
@@ -58,12 +55,18 @@ Create a network boot environment with `coreos/dnsmasq` and create VMs with `scr
|
||||
|
||||
We're ready to use [bootkube](https://github.com/coreos/bootkube) to create a temporary control plane and bootstrap self-hosted Kubernetes cluster. This is a **one-time** procedure.
|
||||
|
||||
Copy the `bootkube` generated assets to any one of the master nodes.
|
||||
Secure copy the `bootkube` generated assets to any one of the master nodes.
|
||||
|
||||
scp -r assets core@172.15.0.21:/home/core/assets
|
||||
scp $(which bootkube) core@172.15.0.21:/home/core
|
||||
|
||||
Connect to that Kubernetes master node,
|
||||
Secure copy the `kubeconfig` to `/etc/kuberentes/kubeconfig` on **every** node (repeat for 172.15.0.22, 172.15.0.23).
|
||||
|
||||
scp assets/auth/kubeconfig core@172.15.0.21:/home/core/kubeconfig
|
||||
ssh core@172.15.0.21
|
||||
sudo mv kubeconfig /etc/kubernetes/kubeconfig
|
||||
|
||||
Connect to the Kubernetes master node,
|
||||
|
||||
ssh core@172.15.0.21
|
||||
|
||||
|
||||
@@ -14,9 +14,6 @@
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_certificate_authority": "ADD ME",
|
||||
"k8s_client_certificate": "ADD ME",
|
||||
"k8s_client_key": "ADD ME",
|
||||
"k8s_etcd_endpoints": "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379",
|
||||
"networkd_address": "172.15.0.21/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
|
||||
@@ -14,11 +14,11 @@
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_certificate_authority": "ADD ME",
|
||||
"k8s_client_certificate": "ADD ME",
|
||||
"k8s_client_key": "ADD ME",
|
||||
"networkd_address": "172.15.0.22/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1"
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,11 +14,11 @@
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_certificate_authority": "ADD ME",
|
||||
"k8s_client_certificate": "ADD ME",
|
||||
"k8s_client_key": "ADD ME",
|
||||
"networkd_address": "172.15.0.23/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1"
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,9 +14,6 @@
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_certificate_authority": "ADD ME",
|
||||
"k8s_client_certificate": "ADD ME",
|
||||
"k8s_client_key": "ADD ME",
|
||||
"networkd_address": "172.15.0.21/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_certificate_authority": "ADD ME",
|
||||
"k8s_client_certificate": "ADD ME",
|
||||
"k8s_client_key": "ADD ME",
|
||||
"networkd_address": "172.15.0.22/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"pxe": "true"
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_certificate_authority": "ADD ME",
|
||||
"k8s_client_certificate": "ADD ME",
|
||||
"k8s_client_key": "ADD ME",
|
||||
"networkd_address": "172.15.0.23/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"pxe": "true"
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,22 +30,29 @@ systemd:
|
||||
[Unit]
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
- name: kubelet.service
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
[Service]
|
||||
Environment=KUBELET_ACI=quay.io/aaron_levy/hyperkube
|
||||
Environment=KUBELET_VERSION=v1.2.2_runonce.0
|
||||
Environment=KUBELET_ACI=quay.io/coreos/hyperkube
|
||||
Environment=KUBELET_VERSION=v1.3.0-alpha.5_coreos.0
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--runonce \
|
||||
--runonce-timeout=60s \
|
||||
--api-servers={{.k8s_master_endpoint}} \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.ipv4_address}} \
|
||||
--node-labels=master=true \
|
||||
@@ -77,25 +84,9 @@ storage:
|
||||
format: "ext4"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
- path: /etc/kubernetes/empty
|
||||
mode: 0644
|
||||
contents: |
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: {{.k8s_master_endpoint}}
|
||||
certificate-authority-data: {{.k8s_certificate_authority}}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: {{.k8s_client_certificate}}
|
||||
client-key-data: {{.k8s_client_key}}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
- path: /opt/init-flannel
|
||||
mode: 0544
|
||||
contents: |
|
||||
@@ -123,6 +114,8 @@ storage:
|
||||
fi
|
||||
}
|
||||
init_flannel
|
||||
|
||||
{{ if not (index . "skip_networkd") }}
|
||||
networkd:
|
||||
units:
|
||||
- name: 10-static.network
|
||||
@@ -133,6 +126,7 @@ networkd:
|
||||
Gateway={{.networkd_gateway}}
|
||||
DNS={{.networkd_dns}}
|
||||
Address={{.networkd_address}}
|
||||
{{end}}
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
|
||||
@@ -25,29 +25,36 @@ systemd:
|
||||
[Unit]
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
- name: kubelet.service
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
Requires=flanneld.service
|
||||
After=flanneld.service
|
||||
[Service]
|
||||
Environment=KUBELET_ACI=quay.io/aaron_levy/hyperkube
|
||||
Environment=KUBELET_VERSION=v1.2.2_runonce.0
|
||||
Environment=KUBELET_ACI=quay.io/coreos/hyperkube
|
||||
Environment=KUBELET_VERSION=v1.3.0-alpha.5_coreos.0
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--runonce \
|
||||
--runonce-timeout=60s \
|
||||
--api-servers={{.k8s_master_endpoint}} \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.ipv4_address}} \
|
||||
--minimum-container-ttl-duration=3m0s \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -71,26 +78,11 @@ storage:
|
||||
format: "ext4"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubeconfig
|
||||
- path: /etc/kubernetes/empty
|
||||
mode: 0644
|
||||
contents: |
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
server: {{.k8s_master_endpoint}}
|
||||
certificate-authority-data: {{.k8s_certificate_authority}}
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: {{.k8s_client_certificate}}
|
||||
client-key-data: {{.k8s_client_key}}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
|
||||
{{ if not (index . "skip_networkd") }}
|
||||
networkd:
|
||||
units:
|
||||
- name: 10-static.network
|
||||
@@ -101,6 +93,7 @@ networkd:
|
||||
Gateway={{.networkd_gateway}}
|
||||
DNS={{.networkd_dns}}
|
||||
Address={{.networkd_address}}
|
||||
{{end}}
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
|
||||
Reference in New Issue
Block a user