Remove Kubernetes cluster provisioning examples

* Matchbox examples should be simple and educational to
show how to PXE provision machines into clusters. Today,
these goals are achieved well enough by the 3-node etcd
cluster example
* Several years ago, I put together examples PXE booting
Kubernetes clusters with Matchbox. That was before we wrote
Tectonic or Kubernetes was as popular as it is. Today, a
Kubernetes distro is a project in its own right. It no
longer makes sense to maintain (duplicate) a Kubernetes
distro as "an example" inside Matchbox.
* Matchbox is now used for Kubernetes cluster provisioning in
more organizations than ever. It backs the poseidon/Typhoon and
kinvolk/Locomotive distros. These both serve as great external
examples of using Matchbox to provision Kubernetes clusters

Attention: If you relied on Matchbox Kubernetes docs, you can
find a similar guide at https://typhoon.psdn.io/cl/bare-metal/
(same author). https://github.com/poseidon/typhoon/
This commit is contained in:
Dalton Hubble
2019-07-07 23:17:33 -07:00
parent 908e89c3a1
commit a7d19dfdd2
16 changed files with 0 additions and 1019 deletions

View File

@@ -10,7 +10,6 @@ These examples use [Terraform](https://www.terraform.io/intro/) as a client to M
|-------------------------------|-------------------------------|
| [simple-install](terraform/simple-install/) | Install Container Linux with an SSH key |
| [etcd3-install](terraform/etcd3-install/) | Install a 3-node etcd3 cluster |
| [bootkube-install](terraform/bootkube-install/) | Install a 3-node Kubernetes v1.14.1 cluster |
### Customization
@@ -27,8 +26,6 @@ These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/mat
| grub | CoreOS Container Linux via GRUB2 Netboot | stable/1967.3.0 | RAM | NA |
| etcd3 | PXE boot a 3-node etcd3 cluster with proxies | stable/1967.3.0 | RAM | None |
| etcd3-install | Install a 3-node etcd3 cluster to disk | stable/1967.3.0 | Disk | None |
| bootkube | PXE boot a 3-node Kubernetes v1.8.5 cluster | stable/1967.3.0 | Disk | [tutorial](../Documentation/bootkube.md) |
| bootkube-install | Install a 3-node Kubernetes v1.8.5 cluster | stable/1967.3.0 | Disk | [tutorial](../Documentation/bootkube.md) |
### Customization

View File

@@ -1,56 +0,0 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: container-linux-update-agent
namespace: kube-system
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
app: container-linux-update-agent
spec:
containers:
- name: update-agent
image: quay.io/coreos/container-linux-update-operator:v0.3.1
command:
- "/bin/update-agent"
volumeMounts:
- mountPath: /var/run/dbus
name: var-run-dbus
- mountPath: /etc/coreos
name: etc-coreos
- mountPath: /usr/share/coreos
name: usr-share-coreos
- mountPath: /etc/os-release
name: etc-os-release
env:
# read by update-agent as the node name to manage reboots for
- name: UPDATE_AGENT_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: var-run-dbus
hostPath:
path: /var/run/dbus
- name: etc-coreos
hostPath:
path: /etc/coreos
- name: usr-share-coreos
hostPath:
path: /usr/share/coreos
- name: etc-os-release
hostPath:
path: /etc/os-release

View File

@@ -1,22 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: container-linux-update-operator
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: container-linux-update-operator
spec:
containers:
- name: update-operator
image: quay.io/coreos/container-linux-update-operator:v0.3.1
command:
- "/bin/update-operator"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace

View File

@@ -1,183 +0,0 @@
---
systemd:
units:
- name: etcd-member.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.2.0"
Environment="ETCD_NAME={{.etcd_name}}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{.domain_name}}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
Environment="ETCD_CLIENT_CERT_AUTH=true"
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet via Hyperkube ACI
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log \
--insecure-options=image"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--allow-privileged \
--anonymous-auth=false \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--hostname-override={{.domain_name}} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/master \
--pod-manifest-path=/etc/kubernetes/manifests \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--require-kubeconfig
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes control plane with a temp api-server
[Service]
Type=simple
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
storage:
{{ if index . "pxe" }}
disks:
- device: /dev/sda
wipe_table: true
partitions:
- label: ROOT
filesystems:
- name: root
mount:
device: "/dev/sda1"
format: "ext4"
create:
force: true
options:
- "-LROOT"
{{end}}
files:
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
KUBELET_IMAGE_TAG=v1.8.5
- path: /etc/ssl/etcd/.empty
filesystem: root
mode: 0644
contents:
inline: |
empty
- path: /etc/hostname
filesystem: root
mode: 0644
contents:
inline:
{{.domain_name}}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.9.1}"
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$RKT_OPTS \
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
{{ if index . "ssh_authorized_keys" }}
passwd:
users:
- name: core
ssh_authorized_keys:
{{ range $element := .ssh_authorized_keys }}
- {{$element}}
{{end}}
{{end}}

View File

@@ -1,126 +0,0 @@
---
systemd:
units:
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet via Hyperkube ACI
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log \
--insecure-options=image"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--allow-privileged \
--anonymous-auth=false \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--hostname-override={{.domain_name}} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/node \
--pod-manifest-path=/etc/kubernetes/manifests \
--require-kubeconfig
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
storage:
{{ if index . "pxe" }}
disks:
- device: /dev/sda
wipe_table: true
partitions:
- label: ROOT
filesystems:
- name: root
mount:
device: "/dev/sda1"
format: "ext4"
create:
force: true
options:
- "-LROOT"
{{end}}
files:
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
KUBELET_IMAGE_TAG=v1.8.5
- path: /etc/ssl/etcd/.empty
filesystem: root
mode: 0644
contents:
inline: |
empty
- path: /etc/hostname
filesystem: root
mode: 0644
contents:
inline:
{{.domain_name}}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
{{ if index . "ssh_authorized_keys" }}
passwd:
users:
- name: core
ssh_authorized_keys:
{{ range $element := .ssh_authorized_keys }}
- {{$element}}
{{end}}
{{end}}

View File

@@ -1,18 +0,0 @@
{
"id": "bootkube-controller",
"name": "bootkube Ready Controller",
"boot": {
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"initrd=coreos_production_pxe_image.cpio.gz",
"root=/dev/sda1",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
"coreos.autologin"
]
},
"ignition_id": "bootkube-controller.yaml"
}

View File

@@ -1,18 +0,0 @@
{
"id": "bootkube-worker",
"name": "bootkube Ready Worker",
"boot": {
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"initrd=coreos_production_pxe_image.cpio.gz",
"root=/dev/sda1",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
"coreos.autologin"
]
},
"ignition_id": "bootkube-worker.yaml"
}

View File

@@ -1,184 +0,0 @@
# Kubernetes
The Kubernetes example shows how to use Matchbox to network boot and provision a 3 node Kubernetes v1.14.1 cluster. This example uses [Terraform](https://www.terraform.io/intro/index.html) and a module provided by [Typhoon](https://github.com/poseidon/typhoon) to describe cluster resources. [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) is run once to bootstrap the Kubernetes control plane.
## Requirements
Follow the getting started [tutorial](../../../Documentation/getting-started.md) to learn about matchbox and set up an environment that meets the requirements:
* Matchbox v0.6+ [installation](../../../Documentation/deployment.md) with gRPC API enabled
* Matchbox provider credentials `client.crt`, `client.key`, and `ca.crt`
* PXE [network boot](../../../Documentation/network-setup.md) environment
* Terraform v0.11.x, [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox), and [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) installed locally
* Machines with known DNS names and MAC addresses
If you prefer to provision QEMU/KVM VMs on your local Linux machine, set up the matchbox [development environment](../../../Documentation/getting-started-docker.md).
```sh
sudo ./scripts/devnet create
```
## Terraform Setup
Install [Terraform](https://www.terraform.io/downloads.html) v0.11.x on your system.
```sh
$ terraform version
Terraform v0.11.7
```
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
```sh
wget https://github.com/poseidon/terraform-provider-matchbox/releases/download/v0.2.3/terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
tar xzf terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
mv terraform-provider-matchbox-v0.2.3-linux-amd64/terraform-provider-matchbox ~/.terraform.d/plugins/terraform-provider-matchbox_v0.2.3
```
Add the [terraform-provider-ct](https://github.com/poseidon/terraform-provider-ct) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
```sh
wget https://github.com/poseidon/terraform-provider-ct/releases/download/v0.3.1/terraform-provider-ct-v0.3.1-linux-amd64.tar.gz
tar xzf terraform-provider-ct-v0.3.1-linux-amd64.tar.gz
mv terraform-provider-ct-v0.3.1-linux-amd64/terraform-provider-ct ~/.terraform.d/plugins/terraform-provider-ct_v0.3.1
```
## Usage
Clone the [matchbox](https://github.com/poseidon/matchbox) project and take a look at the cluster examples.
```sh
$ git clone https://github.com/poseidon/matchbox.git
$ cd matchbox/examples/terraform/bootkube-install
```
Configure the Matchbox provider to use your Matchbox API endpoint and client certificate in a `providers.tf` file.
```
provider "matchbox" {
version = "0.2.3"
endpoint = "matchbox.example.com:8081"
client_cert = "${file("~/.matchbox/client.crt")}"
client_key = "${file("~/.matchbox/client.key")}"
ca = "${file("~/.matchbox/ca.crt")}"
}
provider "ct" {
version = "0.3.1"
}
...
```
Copy the `terraform.tfvars.example` file to `terraform.tfvars`. It defines a few variables needed for examples. Set your `ssh_authorized_key` to use in the cluster definition.
Note: With `cached_install="true"`, machines will PXE boot and install Container Linux from matchbox [assets](https://github.com/poseidon/matchbox/blob/master/Documentation/api.md#assets). For convenience, `scripts/get-coreos` can download needed images.
## Terraform
Initialize Terraform from the `bootkube-install` directory.
```sh
terraform init
```
Plan the resources to be created.
```sh
$ terraform plan
Plan: 75 to add, 0 to change, 0 to destroy.
```
Terraform will configure matchbox with profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`) and add groups to match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `node3` provision themselves as workers.
The module referenced in `cluster.tf` will also generate bootkube assets to `assets_dir` (exactly like the [bootkube](https://github.com/kubernetes-incubator/bootkube) binary would). These assets include Kubernetes bootstrapping and control plane manifests as well as a kubeconfig you can use to access the cluster.
### ssh-agent
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`, otherwise `terraform apply` will hang.
```sh
ssh-add ~/.ssh/id_rsa
ssh-add -L
```
### Apply
Apply the changes.
```sh
$ terraform apply
module.cluster.null_resource.copy-secrets.0: Still creating... (5m0s elapsed)
module.cluster.null_resource.copy-secrets.1: Still creating... (5m0s elapsed)
module.cluster.null_resource.copy-secrets.2: Still creating... (5m0s elapsed)
...
module.cluster.null_resource.bootkube-start: Still creating... (8m40s elapsed)
...
```
Apply will then loop until it can successfully copy credentials to each machine and start the one-time Kubernetes bootstrap service. Proceed to the next step while this loops.
## Machines
Power on each machine (with PXE boot device on next boot). Machines should network boot, install Container Linux to disk, reboot, and provision themselves as bootkube controllers or workers.
```sh
$ ipmitool -H node1.example.com -U USER -P PASS chassis bootdev pxe
$ ipmitool -H node1.example.com -U USER -P PASS power on
```
For local QEMU/KVM development, create the QEMU/KVM VMs.
```sh
$ sudo ./scripts/libvirt create
$ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
```
## Verify
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
```sh
$ export KUBECONFIG=assets/auth/kubeconfig
$ kubectl get nodes
NAME STATUS AGE VERSION
node1.example.com Ready 11m v1.14.1
node2.example.com Ready 11m v1.14.1
node3.example.com Ready 11m v1.14.1
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-1187388186-mx9rt 3/3 Running 0 11m
kube-system coredns-1187388186-dsfk3 3/3 Running 0 11m
kube-system flannel-fqp7f 2/2 Running 1 11m
kube-system flannel-gnjrm 2/2 Running 0 11m
kube-system flannel-llbgt 2/2 Running 0 11m
kube-system kube-apiserver-7336w 1/1 Running 0 11m
kube-system kube-controller-manager-3271970485-b9chx 1/1 Running 0 11m
kube-system kube-controller-manager-3271970485-v30js 1/1 Running 1 11m
kube-system kube-proxy-50sd4 1/1 Running 0 11m
kube-system kube-proxy-bczhp 1/1 Running 0 11m
kube-system kube-proxy-mp2fw 1/1 Running 0 11m
kube-system kube-scheduler-3895335239-fd3l7 1/1 Running 1 11m
kube-system kube-scheduler-3895335239-hfjv0 1/1 Running 0 11m
kube-system pod-checkpointer-wf65d 1/1 Running 0 11m
kube-system pod-checkpointer-wf65d-node1.example.com 1/1 Running 0 11m
```
## Optional
Several Terraform module variables can override cluster defaults. [Check upstream](https://typhoon.psdn.io/bare-metal/) for the full list of options.
```hcl
...
cached_install = "false"
install_disk = "/dev/sda"
networking = "calico"
```
## Addons
Install **important** cluster [addons](../../../Documentation/cluster-addons.md).
## Going Further
Learn more about [matchbox](../../../Documentation/matchbox.md) or explore the other [example](../) clusters.

View File

@@ -1,46 +0,0 @@
// Kubernetes cluster
module "cluster" {
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.14.1"
providers = {
local = "local.default"
null = "null.default"
template = "template.default"
tls = "tls.default"
}
# bare-metal
cluster_name = "example"
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
os_channel = "coreos-stable"
os_version = "1967.3.0"
# default iPXE firmware (used in dnsmasq image) doesn't offer https
download_protocol = "http"
# configuration
k8s_domain_name = "cluster.example.com"
ssh_authorized_key = "${var.ssh_authorized_key}"
asset_dir = "assets"
cached_install = "true"
# machines
controller_names = ["node1"]
controller_macs = ["52:54:00:a1:9c:ae"]
controller_domains = ["node1.example.com"]
worker_names = [
"node2",
"node3",
]
worker_macs = [
"52:54:00:b2:2f:86",
"52:54:00:c3:61:77",
]
worker_domains = [
"node2.example.com",
"node3.example.com",
]
}

View File

@@ -1,32 +0,0 @@
// Configure the matchbox provider
provider "matchbox" {
version = "0.2.3"
endpoint = "${var.matchbox_rpc_endpoint}"
client_cert = "${file("~/.matchbox/client.crt")}"
client_key = "${file("~/.matchbox/client.key")}"
ca = "${file("~/.matchbox/ca.crt")}"
}
provider "ct" {
version = "0.3.1"
}
provider "local" {
version = "~> 1.0"
alias = "default"
}
provider "null" {
version = "~> 1.0"
alias = "default"
}
provider "template" {
version = "~> 1.0"
alias = "default"
}
provider "tls" {
version = "~> 1.0"
alias = "default"
}

View File

@@ -1,3 +0,0 @@
matchbox_http_endpoint = "http://matchbox.example.com:8080"
matchbox_rpc_endpoint = "matchbox.example.com:8081"
ssh_authorized_key = "ADD ME"

View File

@@ -1,14 +0,0 @@
variable "matchbox_http_endpoint" {
type = "string"
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
}
variable "matchbox_rpc_endpoint" {
type = "string"
description = "Matchbox gRPC API endpoint, without the protocol (e.g. matchbox.example.com:8081)"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key to set as an authorized_key on machines"
}