Remove experimental self-hosted etcd options

This commit is contained in:
Dalton Hubble
2017-11-17 01:58:12 -08:00
parent 533e82f833
commit ec48758c5e
14 changed files with 5 additions and 339 deletions

View File

@@ -36,8 +36,6 @@ Find bootkube assets rendered to the `asset_dir` path. That's it.
Render bootkube assets directly with bootkube v0.9.0.
#### On-host etcd (recommended)
```sh
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379
```
@@ -50,21 +48,3 @@ mv manifests-networking/* manifests
popd
diff -rw assets /home/core/mycluster
```
#### Self-hosted etcd (deprecated)
```sh
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --experimental-self-hosted-etcd
```
Compare assets. Note that experimental must be generated to a separate directory for terraform applies to sync. Move the experimental `bootstrap-manifests` and `manifests` files during deployment.
```sh
pushd /home/core/mycluster
mv experimental/bootstrap-manifests/* boostrap-manifests
mv experimental/manifests/* manifests
mv manifests-networking/* manifests
popd
diff -rw assets /home/core/mycluster
```

View File

@@ -5,7 +5,7 @@ resource "template_dir" "bootstrap-manifests" {
vars {
hyperkube_image = "${var.container_images["hyperkube"]}"
etcd_servers = "${var.experimental_self_hosted_etcd ? format("https://%s:2379,https://127.0.0.1:12379", cidrhost(var.service_cidr, 15)) : join(",", formatlist("https://%s:2379", var.etcd_servers))}"
etcd_servers = "${join(",", formatlist("https://%s:2379", var.etcd_servers))}"
cloud_provider = "${var.cloud_provider}"
pod_cidr = "${var.pod_cidr}"
@@ -25,7 +25,7 @@ resource "template_dir" "manifests" {
kubedns_dnsmasq_image = "${var.container_images["kubedns_dnsmasq"]}"
kubedns_sidecar_image = "${var.container_images["kubedns_sidecar"]}"
etcd_servers = "${var.experimental_self_hosted_etcd ? format("https://%s:2379", cidrhost(var.service_cidr, 15)) : join(",", formatlist("https://%s:2379", var.etcd_servers))}"
etcd_servers = "${join(",", formatlist("https://%s:2379", var.etcd_servers))}"
cloud_provider = "${var.cloud_provider}"
pod_cidr = "${var.pod_cidr}"

View File

@@ -1,4 +1,4 @@
# Assets generated only when experimental self-hosted etcd is enabled
# Assets generated only when certain options are chosen
resource "template_dir" "flannel-manifests" {
count = "${var.networking == "flannel" ? 1 : 0}"
@@ -26,49 +26,3 @@ resource "template_dir" "calico-manifests" {
pod_cidr = "${var.pod_cidr}"
}
}
# bootstrap-etcd.yaml pod bootstrap-manifest
resource "template_dir" "experimental-bootstrap-manifests" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
source_dir = "${path.module}/resources/experimental/bootstrap-manifests"
destination_dir = "${var.asset_dir}/experimental/bootstrap-manifests"
vars {
etcd_image = "${var.container_images["etcd"]}"
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 20)}"
}
}
# etcd subfolder - bootstrap-etcd-service.json and migrate-etcd-cluster.json TPR
resource "template_dir" "etcd-subfolder" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
source_dir = "${path.module}/resources/etcd"
destination_dir = "${var.asset_dir}/etcd"
vars {
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 20)}"
}
}
# etcd-operator deployment and etcd-service manifests
# etcd client, server, and peer tls secrets
resource "template_dir" "experimental-manifests" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
source_dir = "${path.module}/resources/experimental/manifests"
destination_dir = "${var.asset_dir}/experimental/manifests"
vars {
etcd_operator_image = "${var.container_images["etcd_operator"]}"
etcd_checkpointer_image = "${var.container_images["etcd_checkpointer"]}"
etcd_service_ip = "${cidrhost(var.service_cidr, 15)}"
# Self-hosted etcd TLS certs / keys
etcd_ca_cert = "${base64encode(tls_self_signed_cert.etcd-ca.cert_pem)}"
etcd_client_cert = "${base64encode(tls_locally_signed_cert.client.cert_pem)}"
etcd_client_key = "${base64encode(tls_private_key.client.private_key_pem)}"
etcd_server_cert = "${base64encode(tls_locally_signed_cert.server.cert_pem)}"
etcd_server_key = "${base64encode(tls_private_key.server.private_key_pem)}"
etcd_peer_cert = "${base64encode(tls_locally_signed_cert.peer.cert_pem)}"
etcd_peer_key = "${base64encode(tls_private_key.peer.private_key_pem)}"
}
}

View File

@@ -1,26 +0,0 @@
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "bootstrap-etcd-service",
"namespace": "kube-system"
},
"spec": {
"selector": {
"k8s-app": "boot-etcd"
},
"clusterIP": "${bootstrap_etcd_service_ip}",
"ports": [
{
"name": "client",
"port": 12379,
"protocol": "TCP"
},
{
"name": "peers",
"port": 12380,
"protocol": "TCP"
}
]
}
}

View File

@@ -1,36 +0,0 @@
{
"apiVersion": "etcd.database.coreos.com/v1beta2",
"kind": "EtcdCluster",
"metadata": {
"name": "kube-etcd",
"namespace": "kube-system"
},
"spec": {
"size": 1,
"version": "v3.1.8",
"pod": {
"nodeSelector": {
"node-role.kubernetes.io/master": ""
},
"tolerations": [
{
"key": "node-role.kubernetes.io/master",
"operator": "Exists",
"effect": "NoSchedule"
}
]
},
"selfHosted": {
"bootMemberClientEndpoint": "https://${bootstrap_etcd_service_ip}:12379"
},
"TLS": {
"static": {
"member": {
"peerSecret": "etcd-peer-tls",
"serverSecret": "etcd-server-tls"
},
"operatorSecret": "etcd-client-tls"
}
}
}
}

View File

@@ -1,41 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: bootstrap-etcd
namespace: kube-system
labels:
k8s-app: boot-etcd
spec:
containers:
- name: etcd
image: ${etcd_image}
command:
- /usr/local/bin/etcd
- --name=boot-etcd
- --listen-client-urls=https://0.0.0.0:12379
- --listen-peer-urls=https://0.0.0.0:12380
- --advertise-client-urls=https://${bootstrap_etcd_service_ip}:12379
- --initial-advertise-peer-urls=https://${bootstrap_etcd_service_ip}:12380
- --initial-cluster=boot-etcd=https://${bootstrap_etcd_service_ip}:12380
- --initial-cluster-token=bootkube
- --initial-cluster-state=new
- --data-dir=/var/etcd/data
- --peer-client-cert-auth=true
- --peer-trusted-ca-file=/etc/kubernetes/secrets/etcd/peer-ca.crt
- --peer-cert-file=/etc/kubernetes/secrets/etcd/peer.crt
- --peer-key-file=/etc/kubernetes/secrets/etcd/peer.key
- --client-cert-auth=true
- --trusted-ca-file=/etc/kubernetes/secrets/etcd/server-ca.crt
- --cert-file=/etc/kubernetes/secrets/etcd/server.crt
- --key-file=/etc/kubernetes/secrets/etcd/server.key
volumeMounts:
- mountPath: /etc/kubernetes/secrets
name: secrets
readOnly: true
volumes:
- name: secrets
hostPath:
path: /etc/kubernetes/bootstrap-secrets
hostNetwork: true
restartPolicy: Never
dnsPolicy: ClusterFirstWithHostNet

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: etcd-client-tls
namespace: kube-system
type: Opaque
data:
etcd-client-ca.crt: ${etcd_ca_cert}
etcd-client.crt: ${etcd_client_cert}
etcd-client.key: ${etcd_client_key}

View File

@@ -1,46 +0,0 @@
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: etcd-operator
namespace: kube-system
labels:
k8s-app: etcd-operator
spec:
replicas: 1
selector:
matchLabels:
k8s-app: etcd-operator
template:
metadata:
labels:
k8s-app: etcd-operator
spec:
containers:
- name: etcd-operator
image: ${etcd_operator_image}
command:
- /usr/local/bin/etcd-operator
- --analytics=false
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
nodeSelector:
node-role.kubernetes.io/master: ""
securityContext:
runAsNonRoot: true
runAsUser: 65534
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: etcd-peer-tls
namespace: kube-system
type: Opaque
data:
peer-ca.crt: ${etcd_ca_cert}
peer.crt: ${etcd_peer_cert}
peer.key: ${etcd_peer_key}

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: etcd-server-tls
namespace: kube-system
type: Opaque
data:
server-ca.crt: ${etcd_ca_cert}
server.crt: ${etcd_server_cert}
server.key: ${etcd_server_key}

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: etcd-service
namespace: kube-system
# This alpha annotation will retain the endpoints even if the etcd pod isn't ready.
# This feature is always enabled in endpoint controller in k8s even it is alpha.
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
selector:
app: etcd
etcd_cluster: kube-etcd
clusterIP: ${etcd_service_ip}
ports:
- name: client
port: 2379
protocol: TCP

View File

@@ -1,62 +0,0 @@
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
name: kube-etcd-network-checkpointer
namespace: kube-system
labels:
tier: control-plane
k8s-app: kube-etcd-network-checkpointer
spec:
selector:
matchLabels:
tier: control-plane
k8s-app: kube-etcd-network-checkpointer
template:
metadata:
labels:
tier: control-plane
k8s-app: kube-etcd-network-checkpointer
annotations:
checkpointer.alpha.coreos.com/checkpoint: "true"
spec:
containers:
- image: ${etcd_checkpointer_image}
name: kube-etcd-network-checkpointer
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/kubernetes/selfhosted-etcd
name: checkpoint-dir
readOnly: false
- mountPath: /var/etcd
name: etcd-dir
readOnly: false
- mountPath: /var/lock
name: var-lock
readOnly: false
command:
- /usr/bin/flock
- /var/lock/kenc.lock
- -c
- "kenc -r -m iptables && kenc -m iptables"
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: checkpoint-dir
hostPath:
path: /etc/kubernetes/checkpoint-iptables
- name: etcd-dir
hostPath:
path: /var/etcd
- name: var-lock
hostPath:
path: /var/lock
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -3,4 +3,3 @@ api_servers = ["node1.example.com"]
etcd_servers = ["node1.example.com"]
asset_dir = "/home/core/mycluster"
networking = "flannel"
experimental_self_hosted_etcd = false

View File

@@ -9,15 +9,10 @@ variable "api_servers" {
}
variable "etcd_servers" {
description = "List of URLs used to reach etcd servers. Ignored if experimental self-hosted etcd is enabled."
description = "List of URLs used to reach etcd servers."
type = "list"
}
variable "experimental_self_hosted_etcd" {
description = "(Experimental) Create self-hosted etcd assets"
default = false
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
@@ -50,7 +45,7 @@ variable "pod_cidr" {
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 20th IP will be reserved for bootstrap self-hosted etcd.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
EOD
type = "string"
@@ -64,9 +59,6 @@ variable "container_images" {
default = {
calico = "quay.io/calico/node:v2.6.3"
calico_cni = "quay.io/calico/cni:v1.11.1"
etcd = "quay.io/coreos/etcd:v3.1.8"
etcd_operator = "quay.io/coreos/etcd-operator:v0.5.0"
etcd_checkpointer = "quay.io/coreos/kenc:0.0.2"
flannel = "quay.io/coreos/flannel:v0.9.1-amd64"
flannel_cni = "quay.io/coreos/flannel-cni:v0.3.0"
hyperkube = "gcr.io/google_containers/hyperkube:v1.8.5"