102 Commits

Author SHA1 Message Date
Dalton Hubble
581f24d11a Update README to correspond to bootkube v0.12.0 2018-04-12 20:09:05 -07:00
Dalton Hubble
15b380a471 Remove deprecated bootstrap apiserver flags
* Remove flags deprecated in Kubernetes v1.10.x
* https://github.com/poseidon/terraform-render-bootkube/pull/50
2018-04-12 19:50:25 -07:00
Dalton Hubble
33e00a6dc5 Use k8s.gcr.io instead of gcr.io/google_containers
* Kubernetes recommends using the alias to fetch images
from the nearest GCR regional mirror, to abstract the
use of GCR, and to drop names containing "google"
* https://groups.google.com/forum/#!msg/kubernetes-dev/ytjk_rNrTa0/3EFUHvovCAAJ
2018-04-08 11:41:48 -07:00
qbast
109ddd2dc1 Add flexvolume plugin mount to controller-manager
* Mount /var/lib/kubelet/volumeplugins by default
2018-04-08 11:37:21 -07:00
Dalton Hubble
b408d80c59 Update kube-dns from v1.14.8 to v1.14.9
* https://github.com/kubernetes/kubernetes/pull/61908
2018-04-04 20:49:59 -07:00
Dalton Hubble
61fb176647 Add optional trusted certs directory variable 2018-04-04 00:35:00 -07:00
Dalton Hubble
5f3546b66f Remove deprecated apiserver flags 2018-03-26 20:52:56 -07:00
Dalton Hubble
e01ff60e42 Update hyperkube from v1.9.6 to v1.10.0
* Update pod checkpointer from CRI v1alpha1 to v1alpha2
* https://github.com/kubernetes-incubator/bootkube/pull/940
* https://github.com/kubernetes-incubator/bootkube/pull/938
2018-03-26 19:45:14 -07:00
Dalton Hubble
88b361207d Update hyperkube from v1.9.5 to v1.9.6 2018-03-21 20:27:11 -07:00
Dalton Hubble
747603e90d Update Calico from v3.0.3 to v3.0.4
* Update cni-plugin from v2.0.0 to v2.0.1
* https://github.com/projectcalico/calico/releases/tag/v3.0.4
* https://github.com/projectcalico/cni-plugin/releases/tag/v2.0.1
2018-03-21 20:25:04 -07:00
Andy Cobaugh
366f751283 Change user-kubeconfig output to rendered content 2018-03-21 20:21:04 -07:00
Dalton Hubble
457b596fa0 Update hyperkube from v1.9.4 to v1.9.5 2018-03-18 17:10:15 -07:00
Dalton Hubble
36bf88af70 Add /var/lib/calico volume mount for Calico
* 73705b2cb3
2018-03-18 16:35:45 -07:00
Dalton Hubble
c5fc93d95f Update hyperkube from v1.9.3 to v1.9.4 2018-03-10 23:00:59 -08:00
Dalton Hubble
c92f3589db Update Calico from v3.0.2 to v3.0.3
* https://github.com/projectcalico/calico/releases/tag/v3.0.3
2018-02-24 19:10:49 -08:00
Dalton Hubble
13a20039f5 Update README to correspond to bootkube v0.11.0 2018-02-22 21:48:30 -08:00
Dalton Hubble
070d184644 Update pod-checkpointer image version
* No notable changes except a grace period flag we don't use
* https://github.com/kubernetes-incubator/bootkube/pull/826
2018-02-15 08:03:16 -08:00
Dalton Hubble
cd6f6fa20d Remove PersistentVolumeLabel admission controller flag
* PersistentVolumeLabel admission controller is deprecated in 1.9
2018-02-11 11:25:02 -08:00
Dalton Hubble
8159561165 Switch Deployments and DaemonSets to apps/v1 2018-02-11 11:22:52 -08:00
Dalton Hubble
203b90169e Add Calico GlobalNetworkSet CRD 2018-02-10 13:04:13 -08:00
Dalton Hubble
72ab2b6aa8 Update Calico from v3.0.1 to v3.0.2
* https://github.com/projectcalico/calico/releases/tag/v3.0.2
2018-02-10 12:58:07 -08:00
Dalton Hubble
5d8a9e8986 Remove deprecated apiserver --etcd-quorum-read flag 2018-02-09 17:53:55 -08:00
Dalton Hubble
27857322df Update hyperkube from v1.9.2 to v1.9.3 2018-02-09 16:44:54 -08:00
Dalton Hubble
27d5f62f6c Change DaemonSets to tolerate NoSchedule and NoExecute taints
* Change kube-proxy, flannel, and calico to tolerate any NoSchedule
or NoExecute taint, not just allow running on masters
* https://github.com/kubernetes-incubator/bootkube/pull/704
2018-02-03 05:58:23 +01:00
Dalton Hubble
20adb15d32 Add flannel service account and RBAC cluster role
* Define a limited ClusterRole and service account for flannel
* https://github.com/kubernetes-incubator/bootkube/pull/869
2018-02-03 05:46:31 +01:00
Dalton Hubble
8d40d6c64d Update flannel from v0.9.0 to v0.10.0
* https://github.com/coreos/flannel/releases/tag/v0.10.0
2018-01-28 22:19:42 -08:00
Dalton Hubble
f4ccbeee10 Migrate from Calico v2.6.6 to to 3.0.1
* https://github.com/projectcalico/calico/releases/tag/v3.0.1
2018-01-19 23:04:57 -08:00
Dalton Hubble
b339254ed5 Update README to correspond to bootkube v0.10.0 2018-01-19 23:03:03 -08:00
Dalton Hubble
9ccedf7b1e Update Calico from v2.6.5 to v2.6.6
* https://github.com/projectcalico/calico/releases/tag/v2.6.6
2018-01-19 22:18:58 -08:00
Dalton Hubble
9795894004 Update hyperkube from v1.9.1 to v1.9.2 2018-01-19 08:19:28 -08:00
Dalton Hubble
bf07c3edad Update kube-dns from v1.14.7 to v1.14.8
* https://github.com/kubernetes/kubernetes/pull/57918
2018-01-12 09:57:01 -08:00
Dalton Hubble
41a16db127 Add separate service account for kube-dns 2018-01-12 09:15:36 -08:00
Dalton Hubble
b83e321b35 Enable portmap plugin to fix hostPort with Calico
* Ask the Calico sidecar to add a CNI conflist to each node
(for calico and portmap plugins). Cleans up Switch from CNI conf to conflist
* https://github.com/projectcalico/cni-plugin/blob/v1.11.2/k8s-install/scripts/install-cni.sh
* Related https://github.com/kubernetes-incubator/bootkube/pull/711
2018-01-06 13:33:17 -08:00
Dalton Hubble
28333ec9da Update Calico from v2.6.4 to 2.6.5 2018-01-06 13:17:46 -08:00
Dalton Hubble
891e88a70b Update apiserver --admission-control for v1.9.x
* https://kubernetes.io/docs/admin/admission-controllers
2018-01-06 13:16:27 -08:00
Dalton Hubble
5326239074 Update hyperkube from v1.9.0 to v1.9.1 2018-01-06 11:25:26 -08:00
Dalton Hubble
abe1f6dbf3 Update kube-dns from v1.14.6 to v1.14.7
* https://github.com/kubernetes/kubernetes/pull/54443
2018-01-06 11:24:55 -08:00
Dalton Hubble
4260d9ae87 Update kube-dns version and probe for SRV records
* https://github.com/kubernetes/kubernetes/pull/51378
2018-01-06 11:24:55 -08:00
Dalton Hubble
84c86ed81a Update hyperkube from v1.8.6 to v1.9.0 2018-01-06 11:24:55 -08:00
Dalton Hubble
a97f2ea8de Use an isolated service account for controller-manager
* https://github.com/kubernetes-incubator/bootkube/pull/795
2018-01-06 11:19:11 -08:00
Dalton Hubble
5072569bb7 Update calico/cni sidecar from v1.11.1 to v1.11.2 2017-12-21 11:16:55 -08:00
Dalton Hubble
7a52b30713 Update hyperkube image from v1.8.5 to v1.8.6 2017-12-21 10:26:06 -08:00
Dalton Hubble
73fcee2471 Switch kubeconfig-in-cluster from Secret to ConfigMap
* kubeconfig-in-cluster doesn't contain secrets, just refernces
to locations
2017-12-21 09:15:15 -08:00
Dalton Hubble
b25d802e3e Update Calico from v2.6.3 to v2.6.4
* https://github.com/projectcalico/calico/releases/tag/v2.6.4
2017-12-21 08:57:02 -08:00
Dalton Hubble
df22b04db7 Update README to correspond to bootkube v0.9.1 2017-12-15 01:40:25 -08:00
Dalton Hubble
6dc7630020 Fix Terraform formatting with fmt 2017-12-13 00:58:26 -08:00
Dalton Hubble
3ec47194ce Rename cluster_dns_fqdn variable to cluster_domain_suffix 2017-12-13 00:11:16 -08:00
Barak Michener
03ca146ef3 Add option for Cluster DNS having a FQDN other than cluster.local 2017-12-12 10:17:53 -08:00
Dalton Hubble
5763b447de Remove self-hosted etcd TLS cert SANs
* Remove self-hosted etcd service IP out, defunct
2017-12-12 00:30:04 -08:00
Dalton Hubble
36243ff89b Update pod-checkpointer and drop ClusterRole to Role
* pod-checkpointer no longer needs to watch pods in all namespaces,
it should only have permission to watch kube-system
* https://github.com/kubernetes-incubator/bootkube/pull/784
2017-12-12 00:10:55 -08:00
Dalton Hubble
810ddfad9f Add controller-manager flag for service_cidr
* controller-manager can handle overlapping pod and service CIDRs
to avoid address collisions, if its informed of both ranges
* Still favor non-overlapping pod and service ranges of course
* https://github.com/kubernetes-incubator/bootkube/pull/797
2017-12-12 00:00:26 -08:00
Dalton Hubble
ec48758c5e Remove experimental self-hosted etcd options 2017-12-11 21:51:07 -08:00
Dalton Hubble
533e82f833 Update hyperkube from v1.8.4 to v1.8.5 2017-12-08 08:46:22 -08:00
Dalton Hubble
31cfae5789 Update README to correspond to v0.9.0 2017-12-01 22:13:33 -08:00
Dalton Hubble
680244706c Update Calico from v2.6.1 to v2.6.3
* Bug fixes for Calico 2.6.x
https://github.com/projectcalico/calico/releases/tag/v2.6.3
* Bug fixes for cni-plugin (i.e. cni) v1.11.x
https://github.com/projectcalico/cni-plugin/releases/tag/v1.11.1
2017-11-28 21:33:51 -08:00
Dalton Hubble
dbcf3b599f Remove flock from bootstrap-apiserver and kube-apiserver
* https://github.com/kubernetes-incubator/bootkube/pull/616
2017-11-28 21:13:15 -08:00
Dalton Hubble
b7b56a6e55 Update hyperkube from v1.8.3 to v1.8.4 2017-11-28 21:11:52 -08:00
Dalton Hubble
a613c7dfa6 Remove unused critical-pod annotations in manifests
* https://github.com/kubernetes-incubator/bootkube/pull/777
2017-11-28 21:10:05 -08:00
Dalton Hubble
ab4d7becce Disable Calico termination grace period
* Disable termination grace period to account for Kubernetes v1.8
changes to DaemonSet rolling behavior
* https://github.com/projectcalico/calico/pull/1293
* Fix IPIP mode casing https://github.com/projectcalico/calico/pull/1233
2017-11-17 00:40:25 -08:00
Dalton Hubble
4d85d9c0d1 Update flannel version from v0.9.0 to v0.9.1
* https://github.com/kubernetes-incubator/bootkube/pull/776
2017-11-17 00:38:37 -08:00
Dalton Hubble
ec5f86b014 Use service accounts for kube-proxy and pod-checkpointer
* Create separate service accounts for kube-proxy and pod-checkpointer
* Switch kube-proxy and pod-checkpointer to use a kubeconfig that
references the local service account, rather than the host kubeconfig
* https://github.com/kubernetes-incubator/bootkube/pull/767
2017-11-17 00:33:22 -08:00
Dalton Hubble
92ff0f253a Update README to correspond to bootkube v0.8.2 2017-11-10 19:54:35 -08:00
Dalton Hubble
4f6af5b811 Update hyperkube from v1.8.2 to v1.8.3
* https://github.com/kubernetes-incubator/bootkube/pull/765
2017-11-08 21:48:21 -08:00
Dalton Hubble
f76e58b56d Update checkpointer with state machine impl
* https://github.com/kubernetes-incubator/bootkube/pull/759
2017-11-08 21:45:01 -08:00
Dalton Hubble
383aba4e8e Add /lib/modules mount to kube-proxy
* Starting in Kubernetes v1.8, kube-proxy modprobes ipvs
* kube-proxy still uses iptables, but in future may switch to
ipvs, this prepares the way for that to happen
* https://github.com/kubernetes-incubator/bootkube/issues/741
2017-11-08 21:39:07 -08:00
Dalton Hubble
aebb45e6e9 Update README to correspond to bootkube v0.8.1 2017-10-28 12:44:06 -07:00
Dalton Hubble
b6b320ef6a Update hyperkube from v1.8.1 to v1.8.2
* v1.8.2 includes an apiserver memory leak fix
2017-10-24 21:27:46 -07:00
Dalton Hubble
9f4ffe273b Switch hyperkube from quay.io/coreos to gcr.io/google_containers
* Use the Kubernetes official hyperkube image
* Patches in quay.io/coreos/hyperkube are no longer needed
for kubernetes-incubator/bootkube clusters starting in
Kubernetes 1.8
2017-10-22 17:05:52 -07:00
Dalton Hubble
74366f6076 Enable hairpinMode in flannel CNI config
* Allow pods to communicate with themselves via service IP
* https://github.com/coreos/flannel/pull/849
2017-10-22 13:51:46 -07:00
Dalton Hubble
db7c13f5ee Update flannel from v0.8.0-amd64 to v0.9.0-amd64 2017-10-22 13:48:14 -07:00
Dalton Hubble
3ac28c9210 Add --no-negcache flag to dnsmasq args
* e1d6bcc227
2017-10-21 17:15:19 -07:00
Dalton Hubble
64748203ba Update assets generation for bootkube v0.8.0
* Update from Kubernetes v1.7.7 to v1.8.1
2017-10-19 20:48:24 -07:00
Dalton Hubble
262cc49856 Update README intro, repo name, and links 2017-10-08 23:00:58 -07:00
Dalton Hubble
125f29d43d Render images from the container_images map variable
* Container images may be customized to facilitate using mirrored
images or development with custom images
2017-10-08 22:29:26 -07:00
Dalton Hubble
aded06a0a7 Update assets generation for bootkube v0.7.0 2017-10-03 09:27:30 -07:00
Dalton Hubble
cc2b45780a Add square brackets for lists to be explicit
* Terraform's "type system" sometimes doesn't identify list
types correctly so be explicit
* https://github.com/hashicorp/terraform/issues/12263#issuecomment-282571256
2017-10-03 09:23:25 -07:00
Dalton Hubble
d93b7e4dc8 Update kube-dns image to address dnsmasq vulnerability
* https://security.googleblog.com/2017/10/behind-masq-yet-more-dns-and-dhcp.html
2017-10-02 10:23:22 -07:00
Dalton Hubble
48b33db1f1 Update Calico from v2.6.0 to v2.6.1 2017-09-30 16:12:29 -07:00
Dalton Hubble
8a9b6f1270 Update Calico from v2.5.1 to v2.6.0
* Update cni sidecar image from v1.10.0 to v1.11.0
* Lower log level in CNI config from debug to info
2017-09-28 20:43:15 -07:00
Dalton Hubble
3b8d762081 Merge pull request #16 from poseidon/etcd-network-checkpointer
Add kube-etcd-network-checkpointer for self-hosted etcd only
2017-09-27 18:06:19 -07:00
Dalton Hubble
9c144e6522 Add kube-etcd-network-checkpointer for self-hosted etcd only 2017-09-26 00:39:42 -07:00
Dalton Hubble
c0d4f56a4c Merge pull request #12 from cloudnativelabs/doc-fix-etcd_servers
Update etcd_servers variable description
2017-09-26 00:12:34 -07:00
bzub
62c887f41b Update etcd_servers variable description. 2017-09-16 16:12:40 -05:00
Dalton Hubble
dbfb11c6ea Update assets generation for bootkube v0.6.2
* Update hyperkube to v1.7.5_coreos.0
* Update etcd-operator to v0.5.0
* Update pod-checkpointer
* Update flannel-cni to v0.2.0
* Change etcd-operator TPR to CRD
2017-09-08 13:46:28 -07:00
Dalton Hubble
5ffbfec46d Configure the Calico MTU
* Add a network_mtu input variable (default 1500)
* Set the Calico CNI config (i.e. workload network interfaces)
* Set the Calico IP in IP MTU (for tunnel network interfaces)
2017-09-05 10:50:26 -07:00
Dalton Hubble
a52f99e8cc Add support for calico networking
* Add support for using Calico pod networking instead of flannel
* Add variable "networking" which may be "calico" or "flannel"
* Users MUST move the contents of assets_dir/manifests-networking
into the assets_dir/manifests directory before running bootkube
start. This is needed because Terraform cannot generate conditional
files into a template_dir because other resources write to the same
directory and delete.
https://github.com/terraform-providers/terraform-provider-template/issues/10
2017-09-01 10:27:43 -07:00
Dalton Hubble
1c1c4b36f8 Enable hairpin mode on cbr0 in kube-flannel-cfg 2017-08-16 18:22:42 -07:00
Dalton Hubble
c4e87f9695 Update assets generation for bootkube v0.6.1 2017-08-16 18:20:40 -07:00
Dalton Hubble
4cd0360a1a Add MIT License 2017-08-02 00:05:04 -07:00
Dalton Hubble
e7d2c1e597 Update assets generation for bootkube v0.6.0 2017-07-24 13:12:32 -07:00
Dalton Hubble
ce1cc6ae34 Update assets generation for bootkube v0.5.1 2017-07-19 10:46:24 -07:00
Dalton Hubble
498a7b0aea Merge pull request #5 from dghubble/bootkube-v0.5.0
Update assets generation for bootkube v0.5.0
2017-07-12 20:07:23 -07:00
Dalton Hubble
c8c56ca64a Update assets generation for bootkube v0.5.0 2017-07-12 19:17:11 -07:00
Dalton Hubble
99f50c5317 *: Upgrade manifests for Kubernetes v1.6.6 and bootkube v0.4.5
* Enable TLS for experimental self-hosted etcd
* Update the flannel Daemonset based on upstream
* Switch control plane components to run as non-root
* Add UpdateStrategy to control plane components
2017-06-24 14:05:32 -07:00
Dalton Hubble
dd26460395 Fix bootkube version mentioned in the README 2017-06-12 14:11:43 -07:00
Dalton Hubble
21131aa65e Add generated etcd credentials to kube-apiserver-secret.yaml 2017-06-07 16:11:19 -07:00
Dalton Hubble
f03b4c1c60 Update etcd_servers example in README.md 2017-06-07 13:55:11 -07:00
Dalton Hubble
99bf97aa79 Fix etcd_servers interpolation and example 2017-06-07 13:51:07 -07:00
Dalton Hubble
4cadd6f873 Output etcd TLS assets and fmt configs 2017-06-07 11:33:56 -07:00
Dalton Hubble
dc66e59fb2 Merge pull request #3 from dghubble/on-host-etcd
Generate on-host etcd CA, client, and peer TLS cert/key pairs
2017-06-07 10:56:24 -07:00
Dalton Hubble
6e8f0f9a1d Generate on-host etcd CA, client, and peer TLS cert/key pairs 2017-06-06 18:01:36 -07:00
Dalton Hubble
3720aff28a Bump hyperkube to v1.6.4_coreos.0 for bootkube v0.4.4 2017-05-19 16:26:52 -07:00
55 changed files with 1072 additions and 424 deletions

2
.gitignore vendored
View File

@@ -1,2 +1,4 @@
*.tfvars
.terraform
*.tfstate*
assets

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Dalton Hubble
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -1,58 +1,50 @@
# bootkube-terraform
# terraform-render-bootkube
`bootkube-terraform` is a Terraform module that renders [bootkube](https://github.com/kubernetes-incubator/bootkube) assets, just like running the binary `bootkube render`. It aims to provide the same variable names, defaults, features, and outputs.
`terraform-render-bootkube` is a Terraform module that renders [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) assets for bootstrapping a Kubernetes cluster.
## Audience
`terraform-render-bootkube` is a low-level component of the [Typhoon](https://github.com/poseidon/typhoon) Kubernetes distribution. Use Typhoon modules to create and manage Kubernetes clusters across supported platforms. Use the bootkube module if you'd like to customize a Kubernetes control plane or build your own distribution.
## Usage
Use the `bootkube-terraform` module within your existing Terraform configs. Provide the variables listed in `variables.tf` or check `terraform.tfvars.example` for examples.
Use the module to declare bootkube assets. Check [variables.tf](variables.tf) for options and [terraform.tfvars.example](terraform.tfvars.example) for examples.
```hcl
module "bootkube" {
source = "git://https://github.com/dghubble/bootkube-terraform.git"
source = "git://https://github.com/poseidon/terraform-render-bootkube.git?ref=SHA"
cluster_name = "example"
api_servers = ["node1.example.com"]
etcd_servers = ["http://127.0.0.1:2379"]
etcd_servers = ["node1.example.com"]
asset_dir = "/home/core/clusters/mycluster"
experimental_self_hosted_etcd = false
}
```
Alternately, use a local checkout of this repo and copy `terraform.tfvars.example` to `terraform.tfvars` to generate assets without an existing terraform config repo.
Generate the bootkube assets.
Generate the assets.
```sh
terraform get
terraform init
terraform get --update
terraform plan
terraform apply
```
Find bootkube assets rendered to the `asset_dir` path. That's it.
### Comparison
Render bootkube assets directly with bootkube v0.4.2.
#### On-host etcd
Render bootkube assets directly with bootkube v0.12.0.
```sh
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=http://127.0.0.1:2379
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379
```
Compare assets. The only diffs you should see are TLS credentials.
Compare assets. Rendered assets may differ slightly from bootkube assets to reflect decisions made by the [Typhoon](https://github.com/poseidon/typhoon) distribution.
```sh
diff -rw assets /home/core/cluster/mycluster
pushd /home/core/mycluster
mv manifests-networking/* manifests
popd
diff -rw assets /home/core/mycluster
```
#### Self-hosted etcd
```sh
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --experimental-self-hosted-etcd
```
Compare assets. Note that experimental must be generated to a separate directory for terraform applies to sync. Move the experimental `bootstrap-manifests` and `manifests` files during deployment.
```sh
diff -rw assets /home/core/cluster/mycluster
```

View File

@@ -1,15 +1,17 @@
# Self-hosted Kubernetes bootstrap manifests
# Self-hosted Kubernetes bootstrap-manifests
resource "template_dir" "bootstrap-manifests" {
source_dir = "${path.module}/resources/bootstrap-manifests"
destination_dir = "${var.asset_dir}/bootstrap-manifests"
vars {
hyperkube_image = "${var.container_images["hyperkube"]}"
etcd_servers = "${var.experimental_self_hosted_etcd ? format("http://%s:2379,http://127.0.0.1:12379", cidrhost(var.service_cidr, 15)) : join(",", var.etcd_servers)}"
etcd_servers = "${join(",", formatlist("https://%s:2379", var.etcd_servers))}"
cloud_provider = "${var.cloud_provider}"
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
trusted_certs_dir = "${var.trusted_certs_dir}"
}
}
@@ -19,25 +21,46 @@ resource "template_dir" "manifests" {
destination_dir = "${var.asset_dir}/manifests"
vars {
hyperkube_image = "${var.container_images["hyperkube"]}"
etcd_servers = "${var.experimental_self_hosted_etcd ? format("http://%s:2379", cidrhost(var.service_cidr, 15)) : join(",", var.etcd_servers)}"
hyperkube_image = "${var.container_images["hyperkube"]}"
pod_checkpointer_image = "${var.container_images["pod_checkpointer"]}"
kubedns_image = "${var.container_images["kubedns"]}"
kubedns_dnsmasq_image = "${var.container_images["kubedns_dnsmasq"]}"
kubedns_sidecar_image = "${var.container_images["kubedns_sidecar"]}"
cloud_provider = "${var.cloud_provider}"
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
kube_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
etcd_servers = "${join(",", formatlist("https://%s:2379", var.etcd_servers))}"
cloud_provider = "${var.cloud_provider}"
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
kube_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
trusted_certs_dir = "${var.trusted_certs_dir}"
ca_cert = "${base64encode(var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate)}"
server = "${format("https://%s:443", element(var.api_servers, 0))}"
apiserver_key = "${base64encode(tls_private_key.apiserver.private_key_pem)}"
apiserver_cert = "${base64encode(tls_locally_signed_cert.apiserver.cert_pem)}"
serviceaccount_pub = "${base64encode(tls_private_key.service-account.public_key_pem)}"
serviceaccount_key = "${base64encode(tls_private_key.service-account.private_key_pem)}"
etcd_ca_cert = "${base64encode(tls_self_signed_cert.etcd-ca.cert_pem)}"
etcd_client_cert = "${base64encode(tls_locally_signed_cert.client.cert_pem)}"
etcd_client_key = "${base64encode(tls_private_key.client.private_key_pem)}"
}
}
# Generated kubeconfig
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "${var.asset_dir}/auth/kubeconfig"
}
# Generated kubeconfig with user-context
resource "local_file" "user-kubeconfig" {
content = "${data.template_file.user-kubeconfig.rendered}"
filename = "${var.asset_dir}/auth/${var.cluster_name}-config"
}
# Generated kubeconfig (auth/kubeconfig)
data "template_file" "kubeconfig" {
template = "${file("${path.module}/resources/kubeconfig")}"
@@ -49,12 +72,6 @@ data "template_file" "kubeconfig" {
}
}
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "${var.asset_dir}/auth/kubeconfig"
}
# Generated kubeconfig (auth/kubeconfig)
data "template_file" "user-kubeconfig" {
template = "${file("${path.module}/resources/user-kubeconfig")}"
@@ -66,8 +83,3 @@ data "template_file" "user-kubeconfig" {
server = "${format("https://%s:443", element(var.api_servers, 0))}"
}
}
resource "local_file" "user-kubeconfig" {
content = "${data.template_file.user-kubeconfig.rendered}"
filename = "${var.asset_dir}/auth/${var.cluster_name}-config"
}

28
conditional.tf Normal file
View File

@@ -0,0 +1,28 @@
# Assets generated only when certain options are chosen
resource "template_dir" "flannel-manifests" {
count = "${var.networking == "flannel" ? 1 : 0}"
source_dir = "${path.module}/resources/flannel"
destination_dir = "${var.asset_dir}/manifests-networking"
vars {
flannel_image = "${var.container_images["flannel"]}"
flannel_cni_image = "${var.container_images["flannel_cni"]}"
pod_cidr = "${var.pod_cidr}"
}
}
resource "template_dir" "calico-manifests" {
count = "${var.networking == "calico" ? 1 : 0}"
source_dir = "${path.module}/resources/calico"
destination_dir = "${var.asset_dir}/manifests-networking"
vars {
calico_image = "${var.container_images["calico"]}"
calico_cni_image = "${var.container_images["calico_cni"]}"
network_mtu = "${var.network_mtu}"
pod_cidr = "${var.pod_cidr}"
}
}

View File

@@ -1,68 +0,0 @@
# Experimental self-hosted etcd
# etcd pod and service bootstrap-manifests
data "template_file" "bootstrap-etcd" {
template = "${file("${path.module}/resources/experimental/bootstrap-manifests/bootstrap-etcd.yaml")}"
vars {
etcd_image = "${var.container_images["etcd"]}"
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 200)}"
}
}
resource "local_file" "bootstrap-etcd" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
content = "${data.template_file.bootstrap-etcd.rendered}"
filename = "${var.asset_dir}/experimental/bootstrap-manifests/bootstrap-etcd.yaml"
}
data "template_file" "bootstrap-etcd-service" {
template = "${file("${path.module}/resources/etcd/bootstrap-etcd-service.json")}"
vars {
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 200)}"
}
}
resource "local_file" "bootstrap-etcd-service" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
content = "${data.template_file.bootstrap-etcd-service.rendered}"
filename = "${var.asset_dir}/etcd/bootstrap-etcd-service.json"
}
data "template_file" "etcd-tpr" {
template = "${file("${path.module}/resources/etcd/migrate-etcd-cluster.json")}"
vars {
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 200)}"
}
}
resource "local_file" "etcd-tpr" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
content = "${data.template_file.etcd-tpr.rendered}"
filename = "${var.asset_dir}/etcd/migrate-etcd-cluster.json"
}
# etcd operator deployment and service manifests
resource "local_file" "etcd-operator" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
depends_on = ["template_dir.manifests"]
content = "${file("${path.module}/resources/experimental/manifests/etcd-operator.yaml")}"
filename = "${var.asset_dir}/experimental/manifests/etcd-operator.yaml"
}
data "template_file" "etcd-service" {
template = "${file("${path.module}/resources/experimental/manifests/etcd-service.yaml")}"
vars {
etcd_service_ip = "${cidrhost(var.service_cidr, 15)}"
}
}
resource "local_file" "etcd-service" {
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
depends_on = ["template_dir.manifests"]
content = "${data.template_file.etcd-service.rendered}"
filename = "${var.asset_dir}/experimental/manifests/etcd-service.yaml"
}

View File

@@ -10,16 +10,42 @@ output "kube_dns_service_ip" {
value = "${cidrhost(var.service_cidr, 10)}"
}
output "etcd_service_ip" {
value = "${cidrhost(var.service_cidr, 15)}"
}
output "kubeconfig" {
value = "${data.template_file.kubeconfig.rendered}"
}
output "user-kubeconfig" {
value = "${local_file.user-kubeconfig.filename}"
value = "${data.template_file.user-kubeconfig.rendered}"
}
# etcd TLS assets
output "etcd_ca_cert" {
value = "${tls_self_signed_cert.etcd-ca.cert_pem}"
}
output "etcd_client_cert" {
value = "${tls_locally_signed_cert.client.cert_pem}"
}
output "etcd_client_key" {
value = "${tls_private_key.client.private_key_pem}"
}
output "etcd_server_cert" {
value = "${tls_locally_signed_cert.server.cert_pem}"
}
output "etcd_server_key" {
value = "${tls_private_key.server.private_key_pem}"
}
output "etcd_peer_cert" {
value = "${tls_locally_signed_cert.peer.cert_pem}"
}
output "etcd_peer_key" {
value = "${tls_private_key.peer.private_key_pem}"
}
# Some platforms may need to reconstruct the kubeconfig directly in user-data.

View File

@@ -8,20 +8,18 @@ spec:
- name: kube-apiserver
image: ${hyperkube_image}
command:
- /usr/bin/flock
- --exclusive
- --timeout=30
- /var/lock/api-server.lock
- /hyperkube
- apiserver
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --advertise-address=$(POD_IP)
- --allow-privileged=true
- --authorization-mode=RBAC
- --bind-address=0.0.0.0
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultTolerationSeconds,DefaultStorageClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota
- --etcd-cafile=/etc/kubernetes/secrets/etcd-client-ca.crt
- --etcd-certfile=/etc/kubernetes/secrets/etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/secrets/etcd-client.key
- --etcd-servers=${etcd_servers}
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
- --secure-port=443
@@ -29,7 +27,6 @@ spec:
- --service-cluster-ip-range=${service_cidr}
- --cloud-provider=${cloud_provider}
- --storage-backend=etcd3
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
env:
@@ -54,7 +51,7 @@ spec:
path: /etc/kubernetes/bootstrap-secrets
- name: ssl-certs-host
hostPath:
path: /usr/share/ca-certificates
path: ${trusted_certs_dir}
- name: var-lock
hostPath:
path: /var/lock

View File

@@ -12,6 +12,7 @@ spec:
- controller-manager
- --allocate-node-cidrs=true
- --cluster-cidr=${pod_cidr}
- --service-cluster-ip-range=${service_cidr}
- --cloud-provider=${cloud_provider}
- --configure-cloud-routes=false
- --kubeconfig=/etc/kubernetes/kubeconfig
@@ -32,4 +33,4 @@ spec:
path: /etc/kubernetes
- name: ssl-host
hostPath:
path: /usr/share/ca-certificates
path: ${trusted_certs_dir}

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico BGP Configuration
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico BGP Peers
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system

View File

@@ -0,0 +1,68 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: calico-node
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- patch
- apiGroups: [""]
resources:
- services
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
verbs:
- create
- get
- list
- update
- watch

View File

@@ -0,0 +1,40 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: calico-config
namespace: kube-system
data:
# Disable Typha for now.
typha_service_name: "none"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": ${network_mtu},
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system

View File

@@ -0,0 +1,152 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
hostNetwork: true
serviceAccountName: calico-node
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
containers:
- name: calico-node
image: ${calico_image}
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix info logging.
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPV6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
value: "${network_mtu}"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# The Calico IPv4 pool CIDR (should match `--cluster-cidr`).
- name: CALICO_IPV4POOL_CIDR
value: "${pod_cidr}"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Enable IP-in-IP within Felix.
- name: FELIX_IPINIPENABLED
value: "true"
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
# Set node name based on k8s nodeName.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
# Install Calico CNI binaries and CNI network config file on nodes
- name: install-cni
image: ${calico_cni_image}
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create on each node.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# Contents of the CNI config to create on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set node name based on k8s nodeName
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CNI_NET_DIR
value: "/etc/kubernetes/cni/net.d"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
terminationGracePeriodSeconds: 0
volumes:
# Used by calico/node
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
# Used by install-cni
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/kubernetes/cni/net.d

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Cluster Information
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Felix Configuration
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Network Policies
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Global Network Sets
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico IP Pools
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool

View File

@@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
description: Calico Network Policies
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy

View File

@@ -1,26 +0,0 @@
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "bootstrap-etcd-service",
"namespace": "kube-system"
},
"spec": {
"selector": {
"k8s-app": "boot-etcd"
},
"clusterIP": "${bootstrap_etcd_service_ip}",
"ports": [
{
"name": "client",
"port": 12379,
"protocol": "TCP"
},
{
"name": "peers",
"port": 12380,
"protocol": "TCP"
}
]
}
}

View File

@@ -1,27 +0,0 @@
{
"apiVersion": "etcd.coreos.com/v1beta1",
"kind": "Cluster",
"metadata": {
"name": "kube-etcd",
"namespace": "kube-system"
},
"spec": {
"size": 1,
"version": "v3.1.6",
"pod": {
"nodeSelector": {
"node-role.kubernetes.io/master": ""
},
"tolerations": [
{
"key": "node-role.kubernetes.io/master",
"operator": "Exists",
"effect": "NoSchedule"
}
]
},
"selfHosted": {
"bootMemberClientEndpoint": "http://${bootstrap_etcd_service_ip}:12379"
}
}
}

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: bootstrap-etcd
namespace: kube-system
labels:
k8s-app: boot-etcd
spec:
containers:
- name: etcd
image: ${etcd_image}
command:
- /usr/local/bin/etcd
- --name=boot-etcd
- --listen-client-urls=http://0.0.0.0:12379
- --listen-peer-urls=http://0.0.0.0:12380
- --advertise-client-urls=http://${bootstrap_etcd_service_ip}:12379
- --initial-advertise-peer-urls=http://${bootstrap_etcd_service_ip}:12380
- --initial-cluster=boot-etcd=http://${bootstrap_etcd_service_ip}:12380
- --initial-cluster-token=bootkube
- --initial-cluster-state=new
- --data-dir=/var/etcd/data
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
hostNetwork: true
restartPolicy: Never

View File

@@ -1,35 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: etcd-operator
namespace: kube-system
labels:
k8s-app: etcd-operator
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: etcd-operator
spec:
containers:
- name: etcd-operator
image: quay.io/coreos/etcd-operator:v0.3.0
command:
- /usr/local/bin/etcd-operator
- --analytics=false
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: etcd-service
namespace: kube-system
spec:
selector:
app: etcd
etcd_cluster: kube-etcd
clusterIP: ${etcd_service_ip}
ports:
- name: client
port: 2379
protocol: TCP

View File

@@ -0,0 +1,36 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
k8s-app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "${pod_cidr}",
"Backend": {
"Type": "vxlan"
}
}

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system

View File

@@ -0,0 +1,24 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system

View File

@@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel
@@ -7,15 +7,20 @@ metadata:
tier: node
k8s-app: flannel
spec:
selector:
matchLabels:
tier: node
k8s-app: flannel
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.7.1-amd64
image: ${flannel_image}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface=$(POD_IP)"]
securityContext:
privileged: true
@@ -40,18 +45,25 @@ spec:
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: busybox
command: [ "/bin/sh", "-c", "set -e -x; TMP=/etc/cni/net.d/.tmp-flannel-cfg; cp /etc/kube-flannel/cni-conf.json $TMP; mv $TMP /etc/cni/net.d/10-flannel.conf; while :; do sleep 3600; done" ]
image: ${flannel_cni_image}
command: ["/install-cni.sh"]
env:
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
effect: NoSchedule
volumes:
- name: run
hostPath:
@@ -62,3 +74,10 @@ spec:
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -9,3 +9,6 @@ data:
apiserver.crt: ${apiserver_cert}
service-account.pub: ${serviceaccount_pub}
ca.crt: ${ca_cert}
etcd-client-ca.crt: ${etcd_ca_cert}
etcd-client.crt: ${etcd_client_cert}
etcd-client.key: ${etcd_client_key}

View File

@@ -1,4 +1,4 @@
apiVersion: "extensions/v1beta1"
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-apiserver
@@ -7,6 +7,10 @@ metadata:
tier: control-plane
k8s-app: kube-apiserver
spec:
selector:
matchLabels:
tier: control-plane
k8s-app: kube-apiserver
template:
metadata:
labels:
@@ -14,19 +18,13 @@ spec:
k8s-app: kube-apiserver
annotations:
checkpointer.alpha.coreos.com/checkpoint: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kube-apiserver
image: ${hyperkube_image}
command:
- /usr/bin/flock
- --exclusive
- --timeout=30
- /var/lock/api-server.lock
- /hyperkube
- apiserver
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --advertise-address=$(POD_IP)
- --allow-privileged=true
- --anonymous-auth=false
@@ -34,15 +32,17 @@ spec:
- --bind-address=0.0.0.0
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
- --cloud-provider=${cloud_provider}
- --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultTolerationSeconds,DefaultStorageClass,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota
- --etcd-cafile=/etc/kubernetes/secrets/etcd-client-ca.crt
- --etcd-certfile=/etc/kubernetes/secrets/etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/secrets/etcd-client.key
- --etcd-servers=${etcd_servers}
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
- --secure-port=443
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
- --service-cluster-ip-range=${service_cidr}
- --storage-backend=etcd3
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
env:
@@ -64,18 +64,20 @@ spec:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: ssl-certs-host
hostPath:
path: /usr/share/ca-certificates
path: ${trusted_certs_dir}
- name: secrets
secret:
secretName: kube-apiserver
- name: var-lock
hostPath:
path: /var/lock
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-controller-manager
subjects:
- kind: ServiceAccount
name: kube-controller-manager
namespace: kube-system

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: kube-system
name: kube-controller-manager

View File

@@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-controller-manager
@@ -8,13 +8,15 @@ metadata:
k8s-app: kube-controller-manager
spec:
replicas: 2
selector:
matchLabels:
tier: control-plane
k8s-app: kube-controller-manager
template:
metadata:
labels:
tier: control-plane
k8s-app: kube-controller-manager
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
affinity:
podAntiAffinity:
@@ -30,7 +32,7 @@ spec:
- key: k8s-app
operator: In
values:
- kube-contoller-manager
- kube-controller-manager
topologyKey: kubernetes.io/hostname
containers:
- name: kube-controller-manager
@@ -38,11 +40,14 @@ spec:
command:
- ./hyperkube
- controller-manager
- --use-service-account-credentials
- --allocate-node-cidrs=true
- --cloud-provider=${cloud_provider}
- --cluster-cidr=${pod_cidr}
- --service-cluster-ip-range=${service_cidr}
- --configure-cloud-routes=false
- --leader-elect=true
- --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins
- --root-ca-file=/etc/kubernetes/secrets/ca.crt
- --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key
livenessProbe:
@@ -55,14 +60,19 @@ spec:
- name: secrets
mountPath: /etc/kubernetes/secrets
readOnly: true
- name: volumeplugins
mountPath: /var/lib/kubelet/volumeplugins
readOnly: true
- name: ssl-host
mountPath: /etc/ssl/certs
readOnly: true
nodeSelector:
node-role.kubernetes.io/master: ""
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: kube-controller-manager
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
@@ -72,5 +82,8 @@ spec:
secretName: kube-controller-manager
- name: ssl-host
hostPath:
path: /usr/share/ca-certificates
path: ${trusted_certs_dir}
- name: volumeplugins
hostPath:
path: /var/lib/kubelet/volumeplugins
dnsPolicy: Default # Don't use cluster DNS.

View File

@@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
@@ -6,6 +6,7 @@ metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
@@ -22,12 +23,21 @@ spec:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
image: ${kubedns_image}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@@ -57,7 +67,7 @@ spec:
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --domain=${cluster_domain_suffix}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
@@ -78,7 +88,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
image: ${kubedns_dnsmasq_image}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@@ -96,8 +106,9 @@ spec:
- --
- -k
- --cache-size=1000
- --no-negcache
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/${cluster_domain_suffix}/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
@@ -116,7 +127,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
image: ${kubedns_sidecar_image}
livenessProbe:
httpGet:
path: /metrics
@@ -129,8 +140,8 @@ spec:
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cluster_domain_suffix},5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cluster_domain_suffix},5,SRV
ports:
- containerPort: 10054
name: metrics
@@ -140,16 +151,4 @@ spec:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
serviceAccountName: kube-dns

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system

View File

@@ -1,48 +0,0 @@
apiVersion: "extensions/v1beta1"
kind: DaemonSet
metadata:
name: kube-etcd-network-checkpointer
namespace: kube-system
labels:
tier: control-plane
k8s-app: kube-etcd-network-checkpointer
spec:
template:
metadata:
labels:
tier: control-plane
k8s-app: kube-etcd-network-checkpointer
annotations:
checkpointer.alpha.coreos.com/checkpoint: "true"
spec:
containers:
- image: quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035
name: kube-etcd-network-checkpointer
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/kubernetes/selfhosted-etcd
name: checkpoint-dir
readOnly: false
- mountPath: /var/lock
name: var-lock
readOnly: false
command:
- /usr/bin/flock
- /var/lock/kenc.lock
- -c
- "kenc -r -m iptables && kenc -m iptables"
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: checkpoint-dir
hostPath:
path: /etc/kubernetes/checkpoint-iptables
- name: var-lock
hostPath:
path: /var/lock

View File

@@ -1,24 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
k8s-app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "${pod_cidr}",
"Backend": {
"Type": "vxlan"
}
}

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-proxy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-proxier # Automatically created system role.
subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: kube-system
name: kube-proxy

View File

@@ -1,4 +1,4 @@
apiVersion: "extensions/v1beta1"
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-proxy
@@ -7,19 +7,21 @@ metadata:
tier: node
k8s-app: kube-proxy
spec:
selector:
matchLabels:
tier: node
k8s-app: kube-proxy
template:
metadata:
labels:
tier: node
k8s-app: kube-proxy
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kube-proxy
image: ${hyperkube_image}
command:
- /hyperkube
- ./hyperkube
- proxy
- --cluster-cidr=${pod_cidr}
- --hostname-override=$(NODE_NAME)
@@ -33,23 +35,33 @@ spec:
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- name: etc-kubernetes
- name: kubeconfig
mountPath: /etc/kubernetes
readOnly: true
hostNetwork: true
serviceAccountName: kube-proxy
tolerations:
- key: CriticalAddonsOnly
- effect: NoSchedule
operator: Exists
- key: node-role.kubernetes.io/master
- effect: NoExecute
operator: Exists
effect: NoSchedule
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- name: etc-kubernetes
- name: lib-modules
hostPath:
path: /etc/kubernetes
path: /lib/modules
- name: ssl-certs-host
hostPath:
path: ${trusted_certs_dir}
- name: kubeconfig
configMap:
name: kubeconfig-in-cluster
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -1,4 +1,4 @@
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-scheduler
@@ -8,13 +8,15 @@ metadata:
k8s-app: kube-scheduler
spec:
replicas: 2
selector:
matchLabels:
tier: control-plane
k8s-app: kube-scheduler
template:
metadata:
labels:
tier: control-plane
k8s-app: kube-scheduler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
affinity:
podAntiAffinity:
@@ -47,9 +49,10 @@ spec:
timeoutSeconds: 15
nodeSelector:
node-role.kubernetes.io/master: ""
securityContext:
runAsNonRoot: true
runAsUser: 65534
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule

View File

@@ -1,4 +1,4 @@
apiVersion: rbac.authorization.k8s.io/v1alpha1
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:default-sa

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeconfig-in-cluster
namespace: kube-system
data:
kubeconfig: |
apiVersion: v1
clusters:
- name: local
cluster:
server: ${server}
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
users:
- name: service-account
user:
# Use service account token
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
contexts:
- context:
cluster: local
user: service-account

View File

@@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: pod-checkpointer
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: pod-checkpointer
subjects:
- kind: ServiceAccount
name: pod-checkpointer
namespace: kube-system

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pod-checkpointer
namespace: kube-system
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["pods"]
verbs: ["get", "watch", "list"]
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets", "configmaps"]
verbs: ["get"]

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: kube-system
name: pod-checkpointer

View File

@@ -1,4 +1,4 @@
apiVersion: "extensions/v1beta1"
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: pod-checkpointer
@@ -7,6 +7,10 @@ metadata:
tier: control-plane
k8s-app: pod-checkpointer
spec:
selector:
matchLabels:
tier: control-plane
k8s-app: pod-checkpointer
template:
metadata:
labels:
@@ -16,12 +20,12 @@ spec:
checkpointer.alpha.coreos.com/checkpoint: "true"
spec:
containers:
- name: checkpoint
image: quay.io/coreos/pod-checkpointer:2cad4cac4186611a79de1969e3ea4924f02f459e
- name: pod-checkpointer
image: ${pod_checkpointer_image}
command:
- /checkpoint
- --v=4
- --lock-file=/var/run/lock/pod-checkpointer.lock
- --kubeconfig=/etc/checkpointer/kubeconfig
env:
- name: NODE_NAME
valueFrom:
@@ -37,10 +41,13 @@ spec:
fieldPath: metadata.namespace
imagePullPolicy: Always
volumeMounts:
- mountPath: /etc/checkpointer
name: kubeconfig
- mountPath: /etc/kubernetes
name: etc-kubernetes
- mountPath: /var/run
name: var-run
serviceAccountName: pod-checkpointer
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
@@ -50,9 +57,16 @@ spec:
operator: Exists
effect: NoSchedule
volumes:
- name: kubeconfig
configMap:
name: kubeconfig-in-cluster
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes
- name: var-run
hostPath:
path: /var/run
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -1,5 +1,5 @@
cluster_name = "example"
api_servers = ["node1.example.com"]
etcd_servers = ["http://127.0.0.1:2379"]
asset_dir = "/home/core/clusters/mycluster"
experimental_self_hosted_etcd = false
etcd_servers = ["node1.example.com"]
asset_dir = "/home/core/mycluster"
networking = "flannel"

199
tls-etcd.tf Normal file
View File

@@ -0,0 +1,199 @@
# etcd-client-ca.crt
resource "local_file" "etcd_client_ca_crt" {
content = "${tls_self_signed_cert.etcd-ca.cert_pem}"
filename = "${var.asset_dir}/tls/etcd-client-ca.crt"
}
# etcd-client.crt
resource "local_file" "etcd_client_crt" {
content = "${tls_locally_signed_cert.client.cert_pem}"
filename = "${var.asset_dir}/tls/etcd-client.crt"
}
# etcd-client.key
resource "local_file" "etcd_client_key" {
content = "${tls_private_key.client.private_key_pem}"
filename = "${var.asset_dir}/tls/etcd-client.key"
}
# server-ca.crt
resource "local_file" "etcd_server_ca_crt" {
content = "${tls_self_signed_cert.etcd-ca.cert_pem}"
filename = "${var.asset_dir}/tls/etcd/server-ca.crt"
}
# server.crt
resource "local_file" "etcd_server_crt" {
content = "${tls_locally_signed_cert.server.cert_pem}"
filename = "${var.asset_dir}/tls/etcd/server.crt"
}
# server.key
resource "local_file" "etcd_server_key" {
content = "${tls_private_key.server.private_key_pem}"
filename = "${var.asset_dir}/tls/etcd/server.key"
}
# peer-ca.crt
resource "local_file" "etcd_peer_ca_crt" {
content = "${tls_self_signed_cert.etcd-ca.cert_pem}"
filename = "${var.asset_dir}/tls/etcd/peer-ca.crt"
}
# peer.crt
resource "local_file" "etcd_peer_crt" {
content = "${tls_locally_signed_cert.peer.cert_pem}"
filename = "${var.asset_dir}/tls/etcd/peer.crt"
}
# peer.key
resource "local_file" "etcd_peer_key" {
content = "${tls_private_key.peer.private_key_pem}"
filename = "${var.asset_dir}/tls/etcd/peer.key"
}
# certificates and keys
resource "tls_private_key" "etcd-ca" {
algorithm = "RSA"
rsa_bits = "2048"
}
resource "tls_self_signed_cert" "etcd-ca" {
key_algorithm = "${tls_private_key.etcd-ca.algorithm}"
private_key_pem = "${tls_private_key.etcd-ca.private_key_pem}"
subject {
common_name = "etcd-ca"
organization = "etcd"
}
is_ca_certificate = true
validity_period_hours = 8760
allowed_uses = [
"key_encipherment",
"digital_signature",
"cert_signing",
]
}
# client certs are used for client (apiserver, locksmith, etcd-operator)
# to etcd communication
resource "tls_private_key" "client" {
algorithm = "RSA"
rsa_bits = "2048"
}
resource "tls_cert_request" "client" {
key_algorithm = "${tls_private_key.client.algorithm}"
private_key_pem = "${tls_private_key.client.private_key_pem}"
subject {
common_name = "etcd-client"
organization = "etcd"
}
ip_addresses = [
"127.0.0.1",
]
dns_names = ["${concat(
var.etcd_servers,
list(
"localhost",
))}"]
}
resource "tls_locally_signed_cert" "client" {
cert_request_pem = "${tls_cert_request.client.cert_request_pem}"
ca_key_algorithm = "${join(" ", tls_self_signed_cert.etcd-ca.*.key_algorithm)}"
ca_private_key_pem = "${join(" ", tls_private_key.etcd-ca.*.private_key_pem)}"
ca_cert_pem = "${join(" ", tls_self_signed_cert.etcd-ca.*.cert_pem)}"
validity_period_hours = 8760
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
"client_auth",
]
}
resource "tls_private_key" "server" {
algorithm = "RSA"
rsa_bits = "2048"
}
resource "tls_cert_request" "server" {
key_algorithm = "${tls_private_key.server.algorithm}"
private_key_pem = "${tls_private_key.server.private_key_pem}"
subject {
common_name = "etcd-server"
organization = "etcd"
}
ip_addresses = [
"127.0.0.1",
]
dns_names = ["${concat(
var.etcd_servers,
list(
"localhost",
))}"]
}
resource "tls_locally_signed_cert" "server" {
cert_request_pem = "${tls_cert_request.server.cert_request_pem}"
ca_key_algorithm = "${join(" ", tls_self_signed_cert.etcd-ca.*.key_algorithm)}"
ca_private_key_pem = "${join(" ", tls_private_key.etcd-ca.*.private_key_pem)}"
ca_cert_pem = "${join(" ", tls_self_signed_cert.etcd-ca.*.cert_pem)}"
validity_period_hours = 8760
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
"client_auth",
]
}
resource "tls_private_key" "peer" {
algorithm = "RSA"
rsa_bits = "2048"
}
resource "tls_cert_request" "peer" {
key_algorithm = "${tls_private_key.peer.algorithm}"
private_key_pem = "${tls_private_key.peer.private_key_pem}"
subject {
common_name = "etcd-peer"
organization = "etcd"
}
dns_names = ["${var.etcd_servers}"]
}
resource "tls_locally_signed_cert" "peer" {
cert_request_pem = "${tls_cert_request.peer.cert_request_pem}"
ca_key_algorithm = "${join(" ", tls_self_signed_cert.etcd-ca.*.key_algorithm)}"
ca_private_key_pem = "${join(" ", tls_private_key.etcd-ca.*.private_key_pem)}"
ca_cert_pem = "${join(" ", tls_self_signed_cert.etcd-ca.*.cert_pem)}"
validity_period_hours = 8760
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
"client_auth",
]
}

View File

@@ -70,7 +70,7 @@ resource "tls_cert_request" "apiserver" {
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"kubernetes.default.svc.${var.cluster_domain_suffix}",
]
ip_addresses = [

View File

@@ -4,20 +4,15 @@ variable "cluster_name" {
}
variable "api_servers" {
description = "URL used to reach kube-apiserver"
description = "List of URLs used to reach kube-apiserver"
type = "list"
}
variable "etcd_servers" {
description = "List of etcd server URLs including protocol, host, and port"
description = "List of URLs used to reach etcd servers."
type = "list"
}
variable "experimental_self_hosted_etcd" {
description = "(Experimental) Create self-hosted etcd assets"
default = false
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
@@ -29,6 +24,18 @@ variable "cloud_provider" {
default = ""
}
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "flannel"
}
variable "network_mtu" {
description = "CNI interface MTU (applies to calico only)"
type = "string"
default = "1500"
}
variable "pod_cidr" {
description = "CIDR IP range to assign Kubernetes pods"
type = "string"
@@ -38,10 +45,17 @@ variable "pod_cidr" {
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
EOD
type = "string"
default = "10.3.0.0/24"
}
variable "cluster_domain_suffix" {
description = "Queries for domains with the suffix will be answered by kube-dns"
type = "string"
default = "10.3.0.0/24"
default = "cluster.local"
}
variable "container_images" {
@@ -49,11 +63,24 @@ variable "container_images" {
type = "map"
default = {
hyperkube = "quay.io/coreos/hyperkube:v1.6.2_coreos.0"
etcd = "quay.io/coreos/etcd:v3.1.6"
calico = "quay.io/calico/node:v3.0.4"
calico_cni = "quay.io/calico/cni:v2.0.1"
flannel = "quay.io/coreos/flannel:v0.10.0-amd64"
flannel_cni = "quay.io/coreos/flannel-cni:v0.3.0"
hyperkube = "k8s.gcr.io/hyperkube:v1.10.0"
kubedns = "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.9"
kubedns_dnsmasq = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.9"
kubedns_sidecar = "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.9"
pod_checkpointer = "quay.io/coreos/pod-checkpointer:9dc83e1ab3bc36ca25c9f7c18ddef1b91d4a0558"
}
}
variable "trusted_certs_dir" {
description = "Path to the directory on cluster nodes where trust TLS certs are kept"
type = "string"
default = "/usr/share/ca-certificates"
}
variable "ca_certificate" {
description = "Existing PEM-encoded CA certificate (generated if blank)"
type = "string"