mirror of
https://github.com/outbackdingo/terraform-render-bootstrap.git
synced 2026-01-27 18:20:40 +00:00
Compare commits
241 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ddd90fd05 | ||
|
|
4369c706e2 | ||
|
|
7df6bd8d1e | ||
|
|
dce49114a0 | ||
|
|
50a221e042 | ||
|
|
4d7484f72a | ||
|
|
6c7ba3864f | ||
|
|
8005052cfb | ||
|
|
0f1f16c612 | ||
|
|
43e1230c55 | ||
|
|
1bba891d95 | ||
|
|
0daa1276c6 | ||
|
|
a2b1dbe2c0 | ||
|
|
3c7334ab55 | ||
|
|
e09d6bef33 | ||
|
|
0fcc067476 | ||
|
|
6f2734bb3c | ||
|
|
10d9cec5c2 | ||
|
|
1f8b634652 | ||
|
|
586d6e36f6 | ||
|
|
18b7a74d30 | ||
|
|
539b725093 | ||
|
|
d6206abedd | ||
|
|
e839ec5a2b | ||
|
|
3dade188f2 | ||
|
|
97bbed6c3a | ||
|
|
6e59af7113 | ||
|
|
98cc19f80f | ||
|
|
248675e7a9 | ||
|
|
8b3738b2cc | ||
|
|
c21da02249 | ||
|
|
83dd5a7cfc | ||
|
|
ed94836925 | ||
|
|
5b9faa9031 | ||
|
|
119cb00fa7 | ||
|
|
4caca47776 | ||
|
|
3bfd1253ec | ||
|
|
95f6fc7fa5 | ||
|
|
62df9ad69c | ||
|
|
89c3ab4e27 | ||
|
|
0103bc06bb | ||
|
|
33d033f1a6 | ||
|
|
082921d679 | ||
|
|
efd1cfd9bf | ||
|
|
85571f6dae | ||
|
|
eca7c49fe1 | ||
|
|
42b9e782b2 | ||
|
|
fc7a6fb20a | ||
|
|
b96d641f6d | ||
|
|
614defe090 | ||
|
|
a80eed2b6a | ||
|
|
53b2520d70 | ||
|
|
feb6e4cb3e | ||
|
|
88fd15c2f6 | ||
|
|
b9bef14a0b | ||
|
|
a693381400 | ||
|
|
bcb015e105 | ||
|
|
da0321287b | ||
|
|
9862888bb2 | ||
|
|
23f81a5e8c | ||
|
|
6cda319b9d | ||
|
|
e6e051ef47 | ||
|
|
1528266595 | ||
|
|
953521dbba | ||
|
|
0a7c4fda35 | ||
|
|
593f0e3655 | ||
|
|
c5f5aacce9 | ||
|
|
4d315afd41 | ||
|
|
c12a11c800 | ||
|
|
1de56ef7c8 | ||
|
|
7dc8f8bf8c | ||
|
|
c5bc23ef7a | ||
|
|
54f15b6c8c | ||
|
|
7b06557b7a | ||
|
|
ef99293eb2 | ||
|
|
e892e291b5 | ||
|
|
2353c586a1 | ||
|
|
bcbdddd8d0 | ||
|
|
f1e69f1d93 | ||
|
|
48730c0f12 | ||
|
|
0e65e3567e | ||
|
|
4f8952a956 | ||
|
|
ea30087577 | ||
|
|
847ec5929b | ||
|
|
f5ea389e8c | ||
|
|
3431a12ac1 | ||
|
|
a7bd306679 | ||
|
|
f382415f2b | ||
|
|
7bcca25043 | ||
|
|
fa4c2d8a68 | ||
|
|
d14348a368 | ||
|
|
51e3323a6d | ||
|
|
95e568935c | ||
|
|
b101fddf6e | ||
|
|
cff13f9248 | ||
|
|
9d6f0c31d3 | ||
|
|
7dc6e199f9 | ||
|
|
bfb3d23d1b | ||
|
|
4021467b7f | ||
|
|
bffb5d5d23 | ||
|
|
dbf67da1cb | ||
|
|
3d9f957aec | ||
|
|
39f9afb336 | ||
|
|
3f3ab6b5c0 | ||
|
|
1cb00c8270 | ||
|
|
d045a8e6b8 | ||
|
|
8742024bbf | ||
|
|
365d089610 | ||
|
|
f39f8294c4 | ||
|
|
6a77775e52 | ||
|
|
e0e5577d37 | ||
|
|
79065baa8c | ||
|
|
81f19507fa | ||
|
|
2437023c10 | ||
|
|
4e0ad77f96 | ||
|
|
f7c2f8d590 | ||
|
|
7797377d50 | ||
|
|
bccf3da096 | ||
|
|
9929abef7d | ||
|
|
5378e166ef | ||
|
|
6f024c457e | ||
|
|
70c2839970 | ||
|
|
9e6fc7e697 | ||
|
|
81ba300e71 | ||
|
|
eb2dfa64de | ||
|
|
34992426f6 | ||
|
|
1d4db824f0 | ||
|
|
2bcf61b2b5 | ||
|
|
0e98e89e14 | ||
|
|
24e900af46 | ||
|
|
3fa3c2d73b | ||
|
|
2a776e7054 | ||
|
|
28f68db28e | ||
|
|
305c813234 | ||
|
|
911f411508 | ||
|
|
a43af2562c | ||
|
|
dc721063af | ||
|
|
6ec5e3c3af | ||
|
|
db36b92abc | ||
|
|
581f24d11a | ||
|
|
15b380a471 | ||
|
|
33e00a6dc5 | ||
|
|
109ddd2dc1 | ||
|
|
b408d80c59 | ||
|
|
61fb176647 | ||
|
|
5f3546b66f | ||
|
|
e01ff60e42 | ||
|
|
88b361207d | ||
|
|
747603e90d | ||
|
|
366f751283 | ||
|
|
457b596fa0 | ||
|
|
36bf88af70 | ||
|
|
c5fc93d95f | ||
|
|
c92f3589db | ||
|
|
13a20039f5 | ||
|
|
070d184644 | ||
|
|
cd6f6fa20d | ||
|
|
8159561165 | ||
|
|
203b90169e | ||
|
|
72ab2b6aa8 | ||
|
|
5d8a9e8986 | ||
|
|
27857322df | ||
|
|
27d5f62f6c | ||
|
|
20adb15d32 | ||
|
|
8d40d6c64d | ||
|
|
f4ccbeee10 | ||
|
|
b339254ed5 | ||
|
|
9ccedf7b1e | ||
|
|
9795894004 | ||
|
|
bf07c3edad | ||
|
|
41a16db127 | ||
|
|
b83e321b35 | ||
|
|
28333ec9da | ||
|
|
891e88a70b | ||
|
|
5326239074 | ||
|
|
abe1f6dbf3 | ||
|
|
4260d9ae87 | ||
|
|
84c86ed81a | ||
|
|
a97f2ea8de | ||
|
|
5072569bb7 | ||
|
|
7a52b30713 | ||
|
|
73fcee2471 | ||
|
|
b25d802e3e | ||
|
|
df22b04db7 | ||
|
|
6dc7630020 | ||
|
|
3ec47194ce | ||
|
|
03ca146ef3 | ||
|
|
5763b447de | ||
|
|
36243ff89b | ||
|
|
810ddfad9f | ||
|
|
ec48758c5e | ||
|
|
533e82f833 | ||
|
|
31cfae5789 | ||
|
|
680244706c | ||
|
|
dbcf3b599f | ||
|
|
b7b56a6e55 | ||
|
|
a613c7dfa6 | ||
|
|
ab4d7becce | ||
|
|
4d85d9c0d1 | ||
|
|
ec5f86b014 | ||
|
|
92ff0f253a | ||
|
|
4f6af5b811 | ||
|
|
f76e58b56d | ||
|
|
383aba4e8e | ||
|
|
aebb45e6e9 | ||
|
|
b6b320ef6a | ||
|
|
9f4ffe273b | ||
|
|
74366f6076 | ||
|
|
db7c13f5ee | ||
|
|
3ac28c9210 | ||
|
|
64748203ba | ||
|
|
262cc49856 | ||
|
|
125f29d43d | ||
|
|
aded06a0a7 | ||
|
|
cc2b45780a | ||
|
|
d93b7e4dc8 | ||
|
|
48b33db1f1 | ||
|
|
8a9b6f1270 | ||
|
|
3b8d762081 | ||
|
|
9c144e6522 | ||
|
|
c0d4f56a4c | ||
|
|
62c887f41b | ||
|
|
dbfb11c6ea | ||
|
|
5ffbfec46d | ||
|
|
a52f99e8cc | ||
|
|
1c1c4b36f8 | ||
|
|
c4e87f9695 | ||
|
|
4cd0360a1a | ||
|
|
e7d2c1e597 | ||
|
|
ce1cc6ae34 | ||
|
|
498a7b0aea | ||
|
|
c8c56ca64a | ||
|
|
99f50c5317 | ||
|
|
dd26460395 | ||
|
|
21131aa65e | ||
|
|
f03b4c1c60 | ||
|
|
99bf97aa79 | ||
|
|
4cadd6f873 | ||
|
|
dc66e59fb2 | ||
|
|
6e8f0f9a1d | ||
|
|
3720aff28a |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
*.tfvars
|
||||
.terraform
|
||||
*.tfstate*
|
||||
assets
|
||||
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017 Dalton Hubble
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
52
README.md
52
README.md
@@ -1,58 +1,32 @@
|
||||
# bootkube-terraform
|
||||
# terraform-render-bootstrap
|
||||
|
||||
`bootkube-terraform` is a Terraform module that renders [bootkube](https://github.com/kubernetes-incubator/bootkube) assets, just like running the binary `bootkube render`. It aims to provide the same variable names, defaults, features, and outputs.
|
||||
`terraform-render-bootstrap` is a Terraform module that renders TLS certificates, static pods, and manifests for bootstrapping a Kubernetes cluster.
|
||||
|
||||
## Audience
|
||||
|
||||
`terraform-render-bootstrap` is a low-level component of the [Typhoon](https://github.com/poseidon/typhoon) Kubernetes distribution. Use Typhoon modules to create and manage Kubernetes clusters across supported platforms. Use the bootstrap module if you'd like to customize a Kubernetes control plane or build your own distribution.
|
||||
|
||||
## Usage
|
||||
|
||||
Use the `bootkube-terraform` module within your existing Terraform configs. Provide the variables listed in `variables.tf` or check `terraform.tfvars.example` for examples.
|
||||
Use the module to declare bootstrap assets. Check [variables.tf](variables.tf) for options and [terraform.tfvars.example](terraform.tfvars.example) for examples.
|
||||
|
||||
```hcl
|
||||
module "bootkube" {
|
||||
source = "git://https://github.com/dghubble/bootkube-terraform.git"
|
||||
module "bootstrap" {
|
||||
source = "git::https://github.com/poseidon/terraform-render-bootstrap.git?ref=SHA"
|
||||
|
||||
cluster_name = "example"
|
||||
api_servers = ["node1.example.com"]
|
||||
etcd_servers = ["http://127.0.0.1:2379"]
|
||||
asset_dir = "/home/core/clusters/mycluster"
|
||||
experimental_self_hosted_etcd = false
|
||||
etcd_servers = ["node1.example.com"]
|
||||
}
|
||||
```
|
||||
|
||||
Alternately, use a local checkout of this repo and copy `terraform.tfvars.example` to `terraform.tfvars` to generate assets without an existing terraform config repo.
|
||||
|
||||
Generate the bootkube assets.
|
||||
Generate the assets.
|
||||
|
||||
```sh
|
||||
terraform get
|
||||
terraform init
|
||||
terraform plan
|
||||
terraform apply
|
||||
```
|
||||
|
||||
### Comparison
|
||||
|
||||
Render bootkube assets directly with bootkube v0.4.2.
|
||||
|
||||
#### On-host etcd
|
||||
|
||||
```sh
|
||||
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=http://127.0.0.1:2379
|
||||
```
|
||||
|
||||
Compare assets. The only diffs you should see are TLS credentials.
|
||||
|
||||
```sh
|
||||
diff -rw assets /home/core/cluster/mycluster
|
||||
```
|
||||
|
||||
#### Self-hosted etcd
|
||||
|
||||
```sh
|
||||
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --experimental-self-hosted-etcd
|
||||
```
|
||||
|
||||
Compare assets. Note that experimental must be generated to a separate directory for terraform applies to sync. Move the experimental `bootstrap-manifests` and `manifests` files during deployment.
|
||||
|
||||
```sh
|
||||
diff -rw assets /home/core/cluster/mycluster
|
||||
```
|
||||
Find bootstrap assets rendered to the `asset_dir` path. That's it.
|
||||
|
||||
|
||||
73
assets.tf
73
assets.tf
@@ -1,73 +0,0 @@
|
||||
# Self-hosted Kubernetes bootstrap manifests
|
||||
resource "template_dir" "bootstrap-manifests" {
|
||||
source_dir = "${path.module}/resources/bootstrap-manifests"
|
||||
destination_dir = "${var.asset_dir}/bootstrap-manifests"
|
||||
|
||||
vars {
|
||||
hyperkube_image = "${var.container_images["hyperkube"]}"
|
||||
etcd_servers = "${var.experimental_self_hosted_etcd ? format("http://%s:2379,http://127.0.0.1:12379", cidrhost(var.service_cidr, 15)) : join(",", var.etcd_servers)}"
|
||||
|
||||
cloud_provider = "${var.cloud_provider}"
|
||||
pod_cidr = "${var.pod_cidr}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
}
|
||||
}
|
||||
|
||||
# Self-hosted Kubernetes manifests
|
||||
resource "template_dir" "manifests" {
|
||||
source_dir = "${path.module}/resources/manifests"
|
||||
destination_dir = "${var.asset_dir}/manifests"
|
||||
|
||||
vars {
|
||||
hyperkube_image = "${var.container_images["hyperkube"]}"
|
||||
etcd_servers = "${var.experimental_self_hosted_etcd ? format("http://%s:2379", cidrhost(var.service_cidr, 15)) : join(",", var.etcd_servers)}"
|
||||
|
||||
cloud_provider = "${var.cloud_provider}"
|
||||
|
||||
pod_cidr = "${var.pod_cidr}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
kube_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
|
||||
|
||||
ca_cert = "${base64encode(var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate)}"
|
||||
apiserver_key = "${base64encode(tls_private_key.apiserver.private_key_pem)}"
|
||||
apiserver_cert = "${base64encode(tls_locally_signed_cert.apiserver.cert_pem)}"
|
||||
serviceaccount_pub = "${base64encode(tls_private_key.service-account.public_key_pem)}"
|
||||
serviceaccount_key = "${base64encode(tls_private_key.service-account.private_key_pem)}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Generated kubeconfig (auth/kubeconfig)
|
||||
data "template_file" "kubeconfig" {
|
||||
template = "${file("${path.module}/resources/kubeconfig")}"
|
||||
|
||||
vars {
|
||||
ca_cert = "${base64encode(var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate)}"
|
||||
kubelet_cert = "${base64encode(tls_locally_signed_cert.kubelet.cert_pem)}"
|
||||
kubelet_key = "${base64encode(tls_private_key.kubelet.private_key_pem)}"
|
||||
server = "${format("https://%s:443", element(var.api_servers, 0))}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = "${data.template_file.kubeconfig.rendered}"
|
||||
filename = "${var.asset_dir}/auth/kubeconfig"
|
||||
}
|
||||
|
||||
# Generated kubeconfig (auth/kubeconfig)
|
||||
data "template_file" "user-kubeconfig" {
|
||||
template = "${file("${path.module}/resources/user-kubeconfig")}"
|
||||
|
||||
vars {
|
||||
name = "${var.cluster_name}"
|
||||
ca_cert = "${base64encode(var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate)}"
|
||||
kubelet_cert = "${base64encode(tls_locally_signed_cert.kubelet.cert_pem)}"
|
||||
kubelet_key = "${base64encode(tls_private_key.kubelet.private_key_pem)}"
|
||||
server = "${format("https://%s:443", element(var.api_servers, 0))}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "user-kubeconfig" {
|
||||
content = "${data.template_file.user-kubeconfig.rendered}"
|
||||
filename = "${var.asset_dir}/auth/${var.cluster_name}-config"
|
||||
}
|
||||
55
auth.tf
Normal file
55
auth.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
locals {
|
||||
# auth kubeconfig assets map
|
||||
auth_kubeconfigs = {
|
||||
"auth/kubeconfig" = data.template_file.kubeconfig-admin.rendered,
|
||||
}
|
||||
}
|
||||
|
||||
# Generated kubeconfig for Kubelets
|
||||
data "template_file" "kubeconfig-kubelet" {
|
||||
template = file("${path.module}/resources/kubeconfig-kubelet")
|
||||
|
||||
vars = {
|
||||
ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem)
|
||||
kubelet_cert = base64encode(tls_locally_signed_cert.kubelet.cert_pem)
|
||||
kubelet_key = base64encode(tls_private_key.kubelet.private_key_pem)
|
||||
server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port)
|
||||
}
|
||||
}
|
||||
|
||||
# Generated admin kubeconfig to bootstrap control plane
|
||||
data "template_file" "kubeconfig-admin" {
|
||||
template = file("${path.module}/resources/kubeconfig-admin")
|
||||
|
||||
vars = {
|
||||
name = var.cluster_name
|
||||
ca_cert = base64encode(tls_self_signed_cert.kube-ca.cert_pem)
|
||||
kubelet_cert = base64encode(tls_locally_signed_cert.admin.cert_pem)
|
||||
kubelet_key = base64encode(tls_private_key.admin.private_key_pem)
|
||||
server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port)
|
||||
}
|
||||
}
|
||||
|
||||
# Generated kubeconfig for Kubelets
|
||||
resource "local_file" "kubeconfig-kubelet" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = data.template_file.kubeconfig-kubelet.rendered
|
||||
filename = "${var.asset_dir}/auth/kubeconfig-kubelet"
|
||||
}
|
||||
|
||||
# Generated admin kubeconfig to bootstrap control plane
|
||||
resource "local_file" "kubeconfig-admin" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = data.template_file.kubeconfig-admin.rendered
|
||||
filename = "${var.asset_dir}/auth/kubeconfig"
|
||||
}
|
||||
|
||||
# Generated admin kubeconfig in a file named after the cluster
|
||||
resource "local_file" "kubeconfig-admin-named" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = data.template_file.kubeconfig-admin.rendered
|
||||
filename = "${var.asset_dir}/auth/${var.cluster_name}-config"
|
||||
}
|
||||
79
conditional.tf
Normal file
79
conditional.tf
Normal file
@@ -0,0 +1,79 @@
|
||||
# Assets generated only when certain options are chosen
|
||||
|
||||
locals {
|
||||
# flannel manifests map
|
||||
# { manifests-networking/manifest.yaml => content }
|
||||
flannel_manifests = {
|
||||
for name in fileset("${path.module}/resources/flannel", "*.yaml") :
|
||||
"manifests-networking/${name}" => templatefile(
|
||||
"${path.module}/resources/flannel/${name}",
|
||||
{
|
||||
flannel_image = var.container_images["flannel"]
|
||||
flannel_cni_image = var.container_images["flannel_cni"]
|
||||
pod_cidr = var.pod_cidr
|
||||
}
|
||||
)
|
||||
if var.networking == "flannel"
|
||||
}
|
||||
|
||||
# calico manifests map
|
||||
# { manifests-networking/manifest.yaml => content }
|
||||
calico_manifests = {
|
||||
for name in fileset("${path.module}/resources/calico", "*.yaml") :
|
||||
"manifests-networking/${name}" => templatefile(
|
||||
"${path.module}/resources/calico/${name}",
|
||||
{
|
||||
calico_image = var.container_images["calico"]
|
||||
calico_cni_image = var.container_images["calico_cni"]
|
||||
network_mtu = var.network_mtu
|
||||
network_encapsulation = indent(2, var.network_encapsulation == "vxlan" ? "vxlanMode: Always" : "ipipMode: Always")
|
||||
ipip_enabled = var.network_encapsulation == "ipip" ? true : false
|
||||
ipip_readiness = var.network_encapsulation == "ipip" ? indent(16, "- --bird-ready") : ""
|
||||
vxlan_enabled = var.network_encapsulation == "vxlan" ? true : false
|
||||
network_ip_autodetection_method = var.network_ip_autodetection_method
|
||||
pod_cidr = var.pod_cidr
|
||||
enable_reporting = var.enable_reporting
|
||||
}
|
||||
)
|
||||
if var.networking == "calico"
|
||||
}
|
||||
|
||||
# kube-router manifests map
|
||||
# { manifests-networking/manifest.yaml => content }
|
||||
kube_router_manifests = {
|
||||
for name in fileset("${path.module}/resources/kube-router", "*.yaml") :
|
||||
"manifests-networking/${name}" => templatefile(
|
||||
"${path.module}/resources/kube-router/${name}",
|
||||
{
|
||||
kube_router_image = var.container_images["kube_router"]
|
||||
flannel_cni_image = var.container_images["flannel_cni"]
|
||||
network_mtu = var.network_mtu
|
||||
}
|
||||
)
|
||||
if var.networking == "kube-router"
|
||||
}
|
||||
}
|
||||
|
||||
# flannel manifests
|
||||
resource "local_file" "flannel-manifests" {
|
||||
for_each = var.asset_dir == "" ? {} : local.flannel_manifests
|
||||
|
||||
filename = "${var.asset_dir}/${each.key}"
|
||||
content = each.value
|
||||
}
|
||||
|
||||
# Calico manifests
|
||||
resource "local_file" "calico-manifests" {
|
||||
for_each = var.asset_dir == "" ? {} : local.calico_manifests
|
||||
|
||||
filename = "${var.asset_dir}/${each.key}"
|
||||
content = each.value
|
||||
}
|
||||
|
||||
# kube-router manifests
|
||||
resource "local_file" "kube-router-manifests" {
|
||||
for_each = var.asset_dir == "" ? {} : local.kube_router_manifests
|
||||
|
||||
filename = "${var.asset_dir}/${each.key}"
|
||||
content = each.value
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
# Experimental self-hosted etcd
|
||||
|
||||
# etcd pod and service bootstrap-manifests
|
||||
|
||||
data "template_file" "bootstrap-etcd" {
|
||||
template = "${file("${path.module}/resources/experimental/bootstrap-manifests/bootstrap-etcd.yaml")}"
|
||||
vars {
|
||||
etcd_image = "${var.container_images["etcd"]}"
|
||||
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 200)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "bootstrap-etcd" {
|
||||
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
|
||||
content = "${data.template_file.bootstrap-etcd.rendered}"
|
||||
filename = "${var.asset_dir}/experimental/bootstrap-manifests/bootstrap-etcd.yaml"
|
||||
}
|
||||
|
||||
data "template_file" "bootstrap-etcd-service" {
|
||||
template = "${file("${path.module}/resources/etcd/bootstrap-etcd-service.json")}"
|
||||
vars {
|
||||
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 200)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "bootstrap-etcd-service" {
|
||||
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
|
||||
content = "${data.template_file.bootstrap-etcd-service.rendered}"
|
||||
filename = "${var.asset_dir}/etcd/bootstrap-etcd-service.json"
|
||||
}
|
||||
|
||||
data "template_file" "etcd-tpr" {
|
||||
template = "${file("${path.module}/resources/etcd/migrate-etcd-cluster.json")}"
|
||||
vars {
|
||||
bootstrap_etcd_service_ip = "${cidrhost(var.service_cidr, 200)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "etcd-tpr" {
|
||||
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
|
||||
content = "${data.template_file.etcd-tpr.rendered}"
|
||||
filename = "${var.asset_dir}/etcd/migrate-etcd-cluster.json"
|
||||
}
|
||||
|
||||
# etcd operator deployment and service manifests
|
||||
|
||||
resource "local_file" "etcd-operator" {
|
||||
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
|
||||
depends_on = ["template_dir.manifests"]
|
||||
|
||||
content = "${file("${path.module}/resources/experimental/manifests/etcd-operator.yaml")}"
|
||||
filename = "${var.asset_dir}/experimental/manifests/etcd-operator.yaml"
|
||||
}
|
||||
|
||||
data "template_file" "etcd-service" {
|
||||
template = "${file("${path.module}/resources/experimental/manifests/etcd-service.yaml")}"
|
||||
vars {
|
||||
etcd_service_ip = "${cidrhost(var.service_cidr, 15)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "etcd-service" {
|
||||
count = "${var.experimental_self_hosted_etcd ? 1 : 0}"
|
||||
depends_on = ["template_dir.manifests"]
|
||||
|
||||
content = "${data.template_file.etcd-service.rendered}"
|
||||
filename = "${var.asset_dir}/experimental/manifests/etcd-service.yaml"
|
||||
}
|
||||
67
manifests.tf
Normal file
67
manifests.tf
Normal file
@@ -0,0 +1,67 @@
|
||||
locals {
|
||||
# Kubernetes static pod manifests map
|
||||
# {static-manifests/manifest.yaml => content }
|
||||
static_manifests = {
|
||||
for name in fileset("${path.module}/resources/static-manifests", "*.yaml") :
|
||||
"static-manifests/${name}" => templatefile(
|
||||
"${path.module}/resources/static-manifests/${name}",
|
||||
{
|
||||
hyperkube_image = var.container_images["hyperkube"]
|
||||
etcd_servers = join(",", formatlist("https://%s:2379", var.etcd_servers))
|
||||
cloud_provider = var.cloud_provider
|
||||
pod_cidr = var.pod_cidr
|
||||
service_cidr = var.service_cidr
|
||||
trusted_certs_dir = var.trusted_certs_dir
|
||||
aggregation_flags = var.enable_aggregation ? indent(4, local.aggregation_flags) : ""
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
# Kubernetes control plane manifests map
|
||||
# { manifests/manifest.yaml => content }
|
||||
manifests = {
|
||||
for name in fileset("${path.module}/resources/manifests", "**/*.yaml") :
|
||||
"manifests/${name}" => templatefile(
|
||||
"${path.module}/resources/manifests/${name}",
|
||||
{
|
||||
hyperkube_image = var.container_images["hyperkube"]
|
||||
coredns_image = var.container_images["coredns"]
|
||||
control_plane_replicas = max(2, length(var.etcd_servers))
|
||||
pod_cidr = var.pod_cidr
|
||||
cluster_domain_suffix = var.cluster_domain_suffix
|
||||
cluster_dns_service_ip = cidrhost(var.service_cidr, 10)
|
||||
trusted_certs_dir = var.trusted_certs_dir
|
||||
server = format("https://%s:%s", var.api_servers[0], var.external_apiserver_port)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes static pod manifests
|
||||
resource "local_file" "static-manifests" {
|
||||
for_each = var.asset_dir == "" ? {} : local.static_manifests
|
||||
|
||||
content = each.value
|
||||
filename = "${var.asset_dir}/${each.key}"
|
||||
}
|
||||
|
||||
# Kubernetes control plane manifests
|
||||
resource "local_file" "manifests" {
|
||||
for_each = var.asset_dir == "" ? {} : local.manifests
|
||||
|
||||
content = each.value
|
||||
filename = "${var.asset_dir}/${each.key}"
|
||||
}
|
||||
|
||||
locals {
|
||||
aggregation_flags = <<EOF
|
||||
|
||||
- --proxy-client-cert-file=/etc/kubernetes/secrets/aggregation-client.crt
|
||||
- --proxy-client-key-file=/etc/kubernetes/secrets/aggregation-client.key
|
||||
- --requestheader-client-ca-file=/etc/kubernetes/secrets/aggregation-ca.crt
|
||||
- --requestheader-extra-headers-prefix=X-Remote-Extra-
|
||||
- --requestheader-group-headers=X-Remote-Group
|
||||
- --requestheader-username-headers=X-Remote-User
|
||||
EOF
|
||||
}
|
||||
|
||||
76
outputs.tf
76
outputs.tf
@@ -1,43 +1,71 @@
|
||||
output "id" {
|
||||
value = "${sha1("${template_dir.bootstrap-manifests.id} ${local_file.kubeconfig.id}")}"
|
||||
|
||||
output "cluster_dns_service_ip" {
|
||||
value = cidrhost(var.service_cidr, 10)
|
||||
}
|
||||
|
||||
output "content_hash" {
|
||||
value = "${sha1("${template_dir.bootstrap-manifests.id} ${template_dir.manifests.id}")}"
|
||||
// Generated kubeconfig for Kubelets (i.e. lower privilege than admin)
|
||||
output "kubeconfig-kubelet" {
|
||||
value = data.template_file.kubeconfig-kubelet.rendered
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "kube_dns_service_ip" {
|
||||
value = "${cidrhost(var.service_cidr, 10)}"
|
||||
// Generated kubeconfig for admins (i.e. human super-user)
|
||||
output "kubeconfig-admin" {
|
||||
value = data.template_file.kubeconfig-admin.rendered
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "etcd_service_ip" {
|
||||
value = "${cidrhost(var.service_cidr, 15)}"
|
||||
# assets to distribute to controllers
|
||||
# { some/path => content }
|
||||
output "assets_dist" {
|
||||
# combine maps of assets
|
||||
value = merge(
|
||||
local.auth_kubeconfigs,
|
||||
local.etcd_tls,
|
||||
local.kubernetes_tls,
|
||||
local.aggregation_tls,
|
||||
local.static_manifests,
|
||||
local.manifests,
|
||||
local.flannel_manifests,
|
||||
local.calico_manifests,
|
||||
local.kube_router_manifests,
|
||||
)
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = "${data.template_file.kubeconfig.rendered}"
|
||||
# etcd TLS assets
|
||||
|
||||
output "etcd_ca_cert" {
|
||||
value = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "user-kubeconfig" {
|
||||
value = "${local_file.user-kubeconfig.filename}"
|
||||
output "etcd_client_cert" {
|
||||
value = tls_locally_signed_cert.client.cert_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
# Some platforms may need to reconstruct the kubeconfig directly in user-data.
|
||||
# That can't be done with the way template_file interpolates multi-line
|
||||
# contents so the raw components of the kubeconfig may be needed.
|
||||
|
||||
output "ca_cert" {
|
||||
value = "${base64encode(var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate)}"
|
||||
output "etcd_client_key" {
|
||||
value = tls_private_key.client.private_key_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "kubelet_cert" {
|
||||
value = "${base64encode(tls_locally_signed_cert.kubelet.cert_pem)}"
|
||||
output "etcd_server_cert" {
|
||||
value = tls_locally_signed_cert.server.cert_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "kubelet_key" {
|
||||
value = "${base64encode(tls_private_key.kubelet.private_key_pem)}"
|
||||
output "etcd_server_key" {
|
||||
value = tls_private_key.server.private_key_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "server" {
|
||||
value = "${format("https://%s:443", element(var.api_servers, 0))}"
|
||||
output "etcd_peer_cert" {
|
||||
value = tls_locally_signed_cert.peer.cert_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "etcd_peer_key" {
|
||||
value = tls_private_key.peer.private_key_pem
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- ./hyperkube
|
||||
- controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --cluster-cidr=${pod_cidr}
|
||||
- --cloud-provider=${cloud_provider}
|
||||
- --configure-cloud-routes=false
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --leader-elect=true
|
||||
- --root-ca-file=/etc/kubernetes/bootstrap-secrets/ca.crt
|
||||
- --service-account-private-key-file=/etc/kubernetes/bootstrap-secrets/service-account.key
|
||||
volumeMounts:
|
||||
- name: kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
- name: ssl-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
- name: ssl-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
@@ -1,23 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- ./hyperkube
|
||||
- scheduler
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --leader-elect=true
|
||||
volumeMounts:
|
||||
- name: kubernetes
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
12
resources/calico/cluster-role-binding.yaml
Normal file
12
resources/calico/cluster-role-binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
109
resources/calico/cluster-role.yaml
Normal file
109
resources/calico/cluster-role.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: calico-node
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Calico patches the node NetworkUnavilable status
|
||||
- patch
|
||||
# Calico updates some info in node annotations
|
||||
- update
|
||||
# CNI plugin patches pods/status
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico reads some info on nodes
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico monitors Kubernetes NetworkPolicies
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Calico monitors its CRDs
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico may perform IPAM allocations
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
# Watch block affinities for route aggregation
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
45
resources/calico/config.yaml
Normal file
45
resources/calico/config.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Disable Typha for now.
|
||||
typha_service_name: "none"
|
||||
# Calico backend to use
|
||||
calico_backend: "bird"
|
||||
# Calico MTU
|
||||
veth_mtu: "${network_mtu}"
|
||||
# The CNI network configuration to install on each node.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
},
|
||||
{
|
||||
"type": "bandwidth",
|
||||
"capabilities": {"bandwidth": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
12
resources/calico/crd-bgpconfigurations.yaml
Normal file
12
resources/calico/crd-bgpconfigurations.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgpconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPConfiguration
|
||||
plural: bgpconfigurations
|
||||
singular: bgpconfiguration
|
||||
12
resources/calico/crd-bgppeers.yaml
Normal file
12
resources/calico/crd-bgppeers.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgppeers.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPPeer
|
||||
plural: bgppeers
|
||||
singular: bgppeer
|
||||
12
resources/calico/crd-blockaffinities.yaml
Normal file
12
resources/calico/crd-blockaffinities.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: blockaffinities.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BlockAffinity
|
||||
plural: blockaffinities
|
||||
singular: blockaffinity
|
||||
12
resources/calico/crd-clusterinformations.yaml
Normal file
12
resources/calico/crd-clusterinformations.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
||||
12
resources/calico/crd-felixconfigurations.yaml
Normal file
12
resources/calico/crd-felixconfigurations.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
||||
12
resources/calico/crd-globalnetworkpolicies.yaml
Normal file
12
resources/calico/crd-globalnetworkpolicies.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
||||
12
resources/calico/crd-globalnetworksets.yaml
Normal file
12
resources/calico/crd-globalnetworksets.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
||||
12
resources/calico/crd-hostendpoints.yaml
Normal file
12
resources/calico/crd-hostendpoints.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
||||
12
resources/calico/crd-ipamblocks.yaml
Normal file
12
resources/calico/crd-ipamblocks.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamblocks.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMBlock
|
||||
plural: ipamblocks
|
||||
singular: ipamblock
|
||||
12
resources/calico/crd-ipamconfigs.yaml
Normal file
12
resources/calico/crd-ipamconfigs.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamconfigs.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMConfig
|
||||
plural: ipamconfigs
|
||||
singular: ipamconfig
|
||||
12
resources/calico/crd-ipamhandles.yaml
Normal file
12
resources/calico/crd-ipamhandles.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamhandles.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMHandle
|
||||
plural: ipamhandles
|
||||
singular: ipamhandle
|
||||
12
resources/calico/crd-ippools.yaml
Normal file
12
resources/calico/crd-ippools.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
||||
12
resources/calico/crd-networkpolicies.yaml
Normal file
12
resources/calico/crd-networkpolicies.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
||||
12
resources/calico/crd-networksets.yaml
Normal file
12
resources/calico/crd-networksets.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkSet
|
||||
plural: networksets
|
||||
singular: networkset
|
||||
191
resources/calico/daemonset.yaml
Normal file
191
resources/calico/daemonset.yaml
Normal file
@@ -0,0 +1,191 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: calico-node
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
initContainers:
|
||||
# Install Calico CNI binaries and CNI network config file on nodes
|
||||
- name: install-cni
|
||||
image: ${calico_cni_image}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create on each node.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# Set node name based on k8s nodeName
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Contents of the CNI config to create on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
- name: CNI_NET_DIR
|
||||
value: "/etc/kubernetes/cni/net.d"
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- name: cni-bin-dir
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
mountPath: /host/etc/cni/net.d
|
||||
containers:
|
||||
- name: calico-node
|
||||
image: ${calico_image}
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Wait for datastore
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Typha support: controlled by the ConfigMap.
|
||||
- name: FELIX_TYPHAK8SSERVICENAME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: typha_service_name
|
||||
- name: FELIX_USAGEREPORTINGENABLED
|
||||
value: "${enable_reporting}"
|
||||
# Set node name based on k8s nodeName.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Calico network backend
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,bgp"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "${network_ip_autodetection_method}"
|
||||
# Whether Felix should enable IP-in-IP tunnel
|
||||
- name: FELIX_IPINIPENABLED
|
||||
value: "${ipip_enabled}"
|
||||
# MTU to set on the IPIP tunnel (if enabled)
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Whether Felix should enable VXLAN tunnel
|
||||
- name: FELIX_VXLANENABLED
|
||||
value: "${vxlan_enabled}"
|
||||
# MTU to set on the VXLAN tunnel (if enabled)
|
||||
- name: FELIX_VXLANMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
- name: NO_DEFAULT_POOLS
|
||||
value: "true"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# Disable IPV6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Enable felix info logging.
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
${ipip_readiness}
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: var-lib-calico
|
||||
mountPath: /var/lib/calico
|
||||
readOnly: false
|
||||
- name: var-run-calico
|
||||
mountPath: /var/run/calico
|
||||
readOnly: false
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
terminationGracePeriodSeconds: 0
|
||||
volumes:
|
||||
# Used by calico/node
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
type: FileOrCreate
|
||||
path: /run/xtables.lock
|
||||
# Used by install-cni
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/kubernetes/cni/net.d
|
||||
10
resources/calico/ippools-default-ipv4.yaml
Normal file
10
resources/calico/ippools-default-ipv4.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: crd.projectcalico.org/v1
|
||||
kind: IPPool
|
||||
metadata:
|
||||
name: default-ipv4-ippool
|
||||
spec:
|
||||
blockSize: 24
|
||||
cidr: ${pod_cidr}
|
||||
${network_encapsulation}
|
||||
natOutgoing: true
|
||||
nodeSelector: all()
|
||||
5
resources/calico/service-account.yaml
Normal file
5
resources/calico/service-account.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"name": "bootstrap-etcd-service",
|
||||
"namespace": "kube-system"
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"k8s-app": "boot-etcd"
|
||||
},
|
||||
"clusterIP": "${bootstrap_etcd_service_ip}",
|
||||
"ports": [
|
||||
{
|
||||
"name": "client",
|
||||
"port": 12379,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"name": "peers",
|
||||
"port": 12380,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"apiVersion": "etcd.coreos.com/v1beta1",
|
||||
"kind": "Cluster",
|
||||
"metadata": {
|
||||
"name": "kube-etcd",
|
||||
"namespace": "kube-system"
|
||||
},
|
||||
"spec": {
|
||||
"size": 1,
|
||||
"version": "v3.1.6",
|
||||
"pod": {
|
||||
"nodeSelector": {
|
||||
"node-role.kubernetes.io/master": ""
|
||||
},
|
||||
"tolerations": [
|
||||
{
|
||||
"key": "node-role.kubernetes.io/master",
|
||||
"operator": "Exists",
|
||||
"effect": "NoSchedule"
|
||||
}
|
||||
]
|
||||
},
|
||||
"selfHosted": {
|
||||
"bootMemberClientEndpoint": "http://${bootstrap_etcd_service_ip}:12379"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-etcd
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: boot-etcd
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: ${etcd_image}
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- --name=boot-etcd
|
||||
- --listen-client-urls=http://0.0.0.0:12379
|
||||
- --listen-peer-urls=http://0.0.0.0:12380
|
||||
- --advertise-client-urls=http://${bootstrap_etcd_service_ip}:12379
|
||||
- --initial-advertise-peer-urls=http://${bootstrap_etcd_service_ip}:12380
|
||||
- --initial-cluster=boot-etcd=http://${bootstrap_etcd_service_ip}:12380
|
||||
- --initial-cluster-token=bootkube
|
||||
- --initial-cluster-state=new
|
||||
- --data-dir=/var/etcd/data
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
hostNetwork: true
|
||||
restartPolicy: Never
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: etcd-operator
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: etcd-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: etcd-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd-operator
|
||||
image: quay.io/coreos/etcd-operator:v0.3.0
|
||||
command:
|
||||
- /usr/local/bin/etcd-operator
|
||||
- --analytics=false
|
||||
env:
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: etcd-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
app: etcd
|
||||
etcd_cluster: kube-etcd
|
||||
clusterIP: ${etcd_service_ip}
|
||||
ports:
|
||||
- name: client
|
||||
port: 2379
|
||||
protocol: TCP
|
||||
12
resources/flannel/cluster-role-binding.yaml
Normal file
12
resources/flannel/cluster-role-binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
24
resources/flannel/cluster-role.yaml
Normal file
24
resources/flannel/cluster-role.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
37
resources/flannel/config.yaml
Normal file
37
resources/flannel/config.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: flannel-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"hairpinMode": true,
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "${pod_cidr}",
|
||||
"Backend": {
|
||||
"Type": "vxlan",
|
||||
"Port": 4789
|
||||
}
|
||||
}
|
||||
85
resources/flannel/daemonset.yaml
Normal file
85
resources/flannel/daemonset.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: flannel
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: flannel
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: flannel
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: flannel
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: flannel
|
||||
image: ${flannel_image}
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface=$(POD_IP)"]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- name: flannel-config
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: run-flannel
|
||||
mountPath: /run/flannel
|
||||
- name: install-cni
|
||||
image: ${flannel_cni_image}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: flannel-config
|
||||
key: cni-conf.json
|
||||
volumeMounts:
|
||||
- name: cni-bin-dir
|
||||
mountPath: /host/opt/cni/bin/
|
||||
- name: cni-conf-dir
|
||||
mountPath: /host/etc/cni/net.d
|
||||
volumes:
|
||||
- name: flannel-config
|
||||
configMap:
|
||||
name: flannel-config
|
||||
- name: run-flannel
|
||||
hostPath:
|
||||
path: /run/flannel
|
||||
# Used by install-cni
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/kubernetes/cni/net.d
|
||||
5
resources/flannel/service-account.yaml
Normal file
5
resources/flannel/service-account.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
@@ -1,12 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:default-sa
|
||||
name: kube-router
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kube-router
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
33
resources/kube-router/cluster-role.yaml
Normal file
33
resources/kube-router/cluster-role.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kube-router
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
- services
|
||||
- nodes
|
||||
- endpoints
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
30
resources/kube-router/config.yaml
Normal file
30
resources/kube-router/config.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-router-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins":[
|
||||
{
|
||||
"name": "kube-router",
|
||||
"type": "bridge",
|
||||
"bridge": "kube-bridge",
|
||||
"isDefaultGateway": true,
|
||||
"mtu": ${network_mtu},
|
||||
"ipam": {
|
||||
"type": "host-local"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
90
resources/kube-router/daemonset.yaml
Normal file
90
resources/kube-router/daemonset.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: kube-router
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: kube-router
|
||||
image: ${kube_router_image}
|
||||
args:
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --run-router=true
|
||||
- --run-firewall=true
|
||||
- --run-service-proxy=false
|
||||
- --v=5
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: KUBE_ROUTER_CNI_CONF_FILE
|
||||
value: /etc/cni/net.d/10-kuberouter.conflist
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: kubeconfig
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
- name: install-cni
|
||||
image: ${flannel_cni_image}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
- name: CNI_OLD_NAME
|
||||
value: 10-flannel.conflist
|
||||
- name: CNI_CONF_NAME
|
||||
value: 10-kuberouter.conflist
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: kube-router-config
|
||||
key: cni-conf.json
|
||||
volumeMounts:
|
||||
- name: cni-bin-dir
|
||||
mountPath: /host/opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
mountPath: /host/etc/cni/net.d
|
||||
volumes:
|
||||
# Used by kube-router
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: kubeconfig
|
||||
configMap:
|
||||
name: kubeconfig-in-cluster
|
||||
# Used by install-cni
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/kubernetes/cni/net.d
|
||||
5
resources/kube-router/service-account.yaml
Normal file
5
resources/kube-router/service-account.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
@@ -10,6 +10,7 @@ users:
|
||||
user:
|
||||
client-certificate-data: ${kubelet_cert}
|
||||
client-key-data: ${kubelet_key}
|
||||
current-context: ${name}-context
|
||||
contexts:
|
||||
- name: ${name}-context
|
||||
context:
|
||||
16
resources/manifests/coredns/cluster-role-binding.yaml
Normal file
16
resources/manifests/coredns/cluster-role-binding.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:coredns
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
21
resources/manifests/coredns/cluster-role.yaml
Normal file
21
resources/manifests/coredns/cluster-role.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:coredns
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
27
resources/manifests/coredns/config.yaml
Normal file
27
resources/manifests/coredns/config.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
log . {
|
||||
class error
|
||||
}
|
||||
kubernetes ${cluster_domain_suffix} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :9153
|
||||
forward . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
101
resources/manifests/coredns/deployment.yaml
Normal file
101
resources/manifests/coredns/deployment.yaml
Normal file
@@ -0,0 +1,101 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: ${control_plane_replicas}
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: control-plane
|
||||
k8s-app: coredns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: coredns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: tier
|
||||
operator: In
|
||||
values:
|
||||
- control-plane
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: coredns
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: coredns
|
||||
image: ${coredns_image}
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- name: dns
|
||||
protocol: UDP
|
||||
containerPort: 53
|
||||
- name: dns-tcp
|
||||
protocol: TCP
|
||||
containerPort: 53
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
containerPort: 9153
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8181
|
||||
scheme: HTTP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
5
resources/manifests/coredns/service-account.yaml
Normal file
5
resources/manifests/coredns/service-account.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
22
resources/manifests/coredns/service.yaml
Normal file
22
resources/manifests/coredns/service.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9153"
|
||||
labels:
|
||||
k8s-app: coredns
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: coredns
|
||||
clusterIP: ${cluster_dns_service_ip}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
data:
|
||||
apiserver.key: ${apiserver_key}
|
||||
apiserver.crt: ${apiserver_cert}
|
||||
service-account.pub: ${serviceaccount_pub}
|
||||
ca.crt: ${ca_cert}
|
||||
@@ -1,81 +0,0 @@
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-apiserver
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-apiserver
|
||||
annotations:
|
||||
checkpointer.alpha.coreos.com/checkpoint: "true"
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- /usr/bin/flock
|
||||
- --exclusive
|
||||
- --timeout=30
|
||||
- /var/lock/api-server.lock
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
|
||||
- --advertise-address=$(POD_IP)
|
||||
- --allow-privileged=true
|
||||
- --anonymous-auth=false
|
||||
- --authorization-mode=RBAC
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --cloud-provider=${cloud_provider}
|
||||
- --etcd-servers=${etcd_servers}
|
||||
- --insecure-port=0
|
||||
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
|
||||
- --secure-port=443
|
||||
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
|
||||
- --service-cluster-ip-range=${service_cidr}
|
||||
- --storage-backend=etcd3
|
||||
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/secrets
|
||||
name: secrets
|
||||
readOnly: true
|
||||
- mountPath: /var/lock
|
||||
name: var-lock
|
||||
readOnly: false
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: kube-apiserver
|
||||
- name: var-lock
|
||||
hostPath:
|
||||
path: /var/lock
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-controller-manager
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
data:
|
||||
service-account.key: ${serviceaccount_key}
|
||||
ca.crt: ${ca_cert}
|
||||
@@ -1,76 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-controller-manager
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-controller-manager
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: tier
|
||||
operator: In
|
||||
values:
|
||||
- control-plane
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- kube-contoller-manager
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- ./hyperkube
|
||||
- controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --cloud-provider=${cloud_provider}
|
||||
- --cluster-cidr=${pod_cidr}
|
||||
- --configure-cloud-routes=false
|
||||
- --leader-elect=true
|
||||
- --root-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10252 # Note: Using default port. Update if --port option is set differently.
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: secrets
|
||||
mountPath: /etc/kubernetes/secrets
|
||||
readOnly: true
|
||||
- name: ssl-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: secrets
|
||||
secret:
|
||||
secretName: kube-controller-manager
|
||||
- name: ssl-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
@@ -1,155 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=cluster.local.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --log-facility=-
|
||||
- --server=/cluster.local/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
@@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: ${kube_dns_service_ip}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
@@ -1,48 +0,0 @@
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-etcd-network-checkpointer
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-etcd-network-checkpointer
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-etcd-network-checkpointer
|
||||
annotations:
|
||||
checkpointer.alpha.coreos.com/checkpoint: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: quay.io/coreos/kenc:48b6feceeee56c657ea9263f47b6ea091e8d3035
|
||||
name: kube-etcd-network-checkpointer
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/selfhosted-etcd
|
||||
name: checkpoint-dir
|
||||
readOnly: false
|
||||
- mountPath: /var/lock
|
||||
name: var-lock
|
||||
readOnly: false
|
||||
command:
|
||||
- /usr/bin/flock
|
||||
- /var/lock/kenc.lock
|
||||
- -c
|
||||
- "kenc -r -m iptables && kenc -m iptables"
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: checkpoint-dir
|
||||
hostPath:
|
||||
path: /etc/kubernetes/checkpoint-iptables
|
||||
- name: var-lock
|
||||
hostPath:
|
||||
path: /var/lock
|
||||
@@ -1,24 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "${pod_cidr}",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: flannel
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: flannel
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.7.1-amd64
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface=$(POD_IP)"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: busybox
|
||||
command: [ "/bin/sh", "-c", "set -e -x; TMP=/etc/cni/net.d/.tmp-flannel-cfg; cp /etc/kube-flannel/cni-conf.json $TMP; mv $TMP /etc/cni/net.d/10-flannel.conf; while :; do sleep 3600; done" ]
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/kubernetes/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
||||
12
resources/manifests/kube-proxy-role-binding.yaml
Normal file
12
resources/manifests/kube-proxy-role-binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node-proxier # Automatically created system role.
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
||||
5
resources/manifests/kube-proxy-sa.yaml
Normal file
5
resources/manifests/kube-proxy-sa.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kube-proxy
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: "extensions/v1beta1"
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
@@ -7,49 +7,70 @@ metadata:
|
||||
tier: node
|
||||
k8s-app: kube-proxy
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: node
|
||||
k8s-app: kube-proxy
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: kube-proxy
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: kube-proxy
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- ./hyperkube
|
||||
- kube-proxy
|
||||
- --cluster-cidr=${pod_cidr}
|
||||
- --hostname-override=$(NODE_NAME)
|
||||
- --kubeconfig=/etc/kubernetes/kubeconfig
|
||||
- --proxy-mode=iptables
|
||||
- --proxy-mode=ipvs
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10256
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
readOnly: true
|
||||
- name: etc-kubernetes
|
||||
- name: kubeconfig
|
||||
mountPath: /etc/kubernetes
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: ssl-certs-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
name: ssl-certs-host
|
||||
- name: etc-kubernetes
|
||||
- name: kubeconfig
|
||||
configMap:
|
||||
name: kubeconfig-in-cluster
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
path: /lib/modules
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
path: ${trusted_certs_dir}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
minAvailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-scheduler
|
||||
@@ -1,55 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-scheduler
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: kube-scheduler
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: tier
|
||||
operator: In
|
||||
values:
|
||||
- control-plane
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- kube-scheduler
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- ./hyperkube
|
||||
- scheduler
|
||||
- --leader-elect=true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251 # Note: Using default port. Update if --port option is set differently.
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
24
resources/manifests/kubeconfig-in-cluster.yaml
Normal file
24
resources/manifests/kubeconfig-in-cluster.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeconfig-in-cluster
|
||||
namespace: kube-system
|
||||
data:
|
||||
kubeconfig: |
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
# kubeconfig-in-cluster is for control plane components that must reach
|
||||
# kube-apiserver before service IPs are available (e.g.10.3.0.1)
|
||||
server: ${server}
|
||||
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
users:
|
||||
- name: service-account
|
||||
user:
|
||||
# Use service account token
|
||||
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: service-account
|
||||
12
resources/manifests/kubelet-delete-cluster-role-binding.yaml
Normal file
12
resources/manifests/kubelet-delete-cluster-role-binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubelet-delete
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubelet-delete
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:nodes
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
10
resources/manifests/kubelet-delete-cluster-role.yaml
Normal file
10
resources/manifests/kubelet-delete-cluster-role.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kubelet-delete
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- delete
|
||||
12
resources/manifests/kubelet-nodes-cluster-role-binding.yaml
Normal file
12
resources/manifests/kubelet-nodes-cluster-role-binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system-nodes
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:nodes
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -1,58 +0,0 @@
|
||||
apiVersion: "extensions/v1beta1"
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: pod-checkpointer
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: pod-checkpointer
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: control-plane
|
||||
k8s-app: pod-checkpointer
|
||||
annotations:
|
||||
checkpointer.alpha.coreos.com/checkpoint: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: checkpoint
|
||||
image: quay.io/coreos/pod-checkpointer:2cad4cac4186611a79de1969e3ea4924f02f459e
|
||||
command:
|
||||
- /checkpoint
|
||||
- --v=4
|
||||
- --lock-file=/var/run/lock/pod-checkpointer.lock
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes
|
||||
name: etc-kubernetes
|
||||
- mountPath: /var/run
|
||||
name: var-run
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
restartPolicy: Always
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: etc-kubernetes
|
||||
hostPath:
|
||||
path: /etc/kubernetes
|
||||
- name: var-run
|
||||
hostPath:
|
||||
path: /var/run
|
||||
@@ -1,35 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: bootstrap-kube-apiserver
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-apiserver
|
||||
tier: control-plane
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- /usr/bin/flock
|
||||
- --exclusive
|
||||
- --timeout=30
|
||||
- /var/lock/api-server.lock
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
|
||||
- kube-apiserver
|
||||
- --advertise-address=$(POD_IP)
|
||||
- --allow-privileged=true
|
||||
- --anonymous-auth=false
|
||||
- --authorization-mode=RBAC
|
||||
- --bind-address=0.0.0.0
|
||||
- --client-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --cloud-provider=${cloud_provider}
|
||||
- --etcd-cafile=/etc/kubernetes/secrets/etcd-client-ca.crt
|
||||
- --etcd-certfile=/etc/kubernetes/secrets/etcd-client.crt
|
||||
- --etcd-keyfile=/etc/kubernetes/secrets/etcd-client.key
|
||||
- --etcd-servers=${etcd_servers}
|
||||
- --insecure-port=0
|
||||
- --kubelet-client-certificate=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --kubelet-client-key=/etc/kubernetes/secrets/apiserver.key
|
||||
- --secure-port=443
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname${aggregation_flags}
|
||||
- --secure-port=6443
|
||||
- --service-account-key-file=/etc/kubernetes/secrets/service-account.pub
|
||||
- --service-cluster-ip-range=${service_cidr}
|
||||
- --cloud-provider=${cloud_provider}
|
||||
- --storage-backend=etcd3
|
||||
- --tls-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --tls-cert-file=/etc/kubernetes/secrets/apiserver.crt
|
||||
- --tls-private-key-file=/etc/kubernetes/secrets/apiserver.key
|
||||
env:
|
||||
@@ -37,24 +44,20 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
volumeMounts:
|
||||
- mountPath: /etc/ssl/certs
|
||||
name: ssl-certs-host
|
||||
- name: secrets
|
||||
mountPath: /etc/kubernetes/secrets
|
||||
readOnly: true
|
||||
- mountPath: /etc/kubernetes/secrets
|
||||
name: secrets
|
||||
- name: ssl-certs-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
- mountPath: /var/lock
|
||||
name: var-lock
|
||||
readOnly: false
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: secrets
|
||||
hostPath:
|
||||
path: /etc/kubernetes/bootstrap-secrets
|
||||
- name: ssl-certs-host
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
- name: var-lock
|
||||
hostPath:
|
||||
path: /var/lock
|
||||
path: ${trusted_certs_dir}
|
||||
60
resources/static-manifests/kube-controller-manager.yaml
Normal file
60
resources/static-manifests/kube-controller-manager.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-controller-manager
|
||||
tier: control-plane
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- /hyperkube
|
||||
- kube-controller-manager
|
||||
- --allocate-node-cidrs=true
|
||||
- --cloud-provider=${cloud_provider}
|
||||
- --cluster-cidr=${pod_cidr}
|
||||
- --cluster-signing-cert-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --cluster-signing-key-file=/etc/kubernetes/secrets/ca.key
|
||||
- --configure-cloud-routes=false
|
||||
- --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins
|
||||
- --kubeconfig=/etc/kubernetes/secrets/kubeconfig
|
||||
- --leader-elect=true
|
||||
- --pod-eviction-timeout=1m
|
||||
- --root-ca-file=/etc/kubernetes/secrets/ca.crt
|
||||
- --service-account-private-key-file=/etc/kubernetes/secrets/service-account.key
|
||||
- --service-cluster-ip-range=${service_cidr}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10257
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
volumeMounts:
|
||||
- name: secrets
|
||||
mountPath: /etc/kubernetes/secrets
|
||||
readOnly: true
|
||||
- name: ssl-host
|
||||
mountPath: /etc/ssl/certs
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: secrets
|
||||
hostPath:
|
||||
path: /etc/kubernetes/bootstrap-secrets
|
||||
- name: ssl-host
|
||||
hostPath:
|
||||
path: ${trusted_certs_dir}
|
||||
43
resources/static-manifests/kube-scheduler.yaml
Normal file
43
resources/static-manifests/kube-scheduler.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
tier: control-plane
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-cluster-critical
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: ${hyperkube_image}
|
||||
command:
|
||||
- /hyperkube
|
||||
- kube-scheduler
|
||||
- --kubeconfig=/etc/kubernetes/secrets/kubeconfig
|
||||
- --leader-elect=true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10259
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
volumeMounts:
|
||||
- name: secrets
|
||||
mountPath: /etc/kubernetes/secrets
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: secrets
|
||||
hostPath:
|
||||
path: /etc/kubernetes/bootstrap-secrets
|
||||
@@ -1,5 +1,4 @@
|
||||
cluster_name = "example"
|
||||
api_servers = ["node1.example.com"]
|
||||
etcd_servers = ["http://127.0.0.1:2379"]
|
||||
asset_dir = "/home/core/clusters/mycluster"
|
||||
experimental_self_hosted_etcd = false
|
||||
etcd_servers = ["node1.example.com"]
|
||||
networking = "flannel"
|
||||
|
||||
108
tls-aggregation.tf
Normal file
108
tls-aggregation.tf
Normal file
@@ -0,0 +1,108 @@
|
||||
locals {
|
||||
# Kubernetes Aggregation TLS assets map
|
||||
aggregation_tls = var.enable_aggregation ? {
|
||||
"tls/k8s/aggregation-ca.crt" = tls_self_signed_cert.aggregation-ca[0].cert_pem,
|
||||
"tls/k8s/aggregation-client.crt" = tls_locally_signed_cert.aggregation-client[0].cert_pem,
|
||||
"tls/k8s/aggregation-client.key" = tls_private_key.aggregation-client[0].private_key_pem,
|
||||
} : {}
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Kubernetes Aggregation CA (i.e. front-proxy-ca)
|
||||
# Files: tls/{aggregation-ca.crt,aggregation-ca.key}
|
||||
|
||||
resource "tls_private_key" "aggregation-ca" {
|
||||
count = var.enable_aggregation ? 1 : 0
|
||||
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_self_signed_cert" "aggregation-ca" {
|
||||
count = var.enable_aggregation ? 1 : 0
|
||||
|
||||
key_algorithm = tls_private_key.aggregation-ca[0].algorithm
|
||||
private_key_pem = tls_private_key.aggregation-ca[0].private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "kubernetes-front-proxy-ca"
|
||||
}
|
||||
|
||||
is_ca_certificate = true
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"cert_signing",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "aggregation-ca-key" {
|
||||
count = var.enable_aggregation && var.asset_dir != "" ? 1 : 0
|
||||
|
||||
content = tls_private_key.aggregation-ca[0].private_key_pem
|
||||
filename = "${var.asset_dir}/tls/aggregation-ca.key"
|
||||
}
|
||||
|
||||
resource "local_file" "aggregation-ca-crt" {
|
||||
count = var.enable_aggregation && var.asset_dir != "" ? 1 : 0
|
||||
|
||||
content = tls_self_signed_cert.aggregation-ca[0].cert_pem
|
||||
filename = "${var.asset_dir}/tls/aggregation-ca.crt"
|
||||
}
|
||||
|
||||
# Kubernetes apiserver (i.e. front-proxy-client)
|
||||
# Files: tls/{aggregation-client.crt,aggregation-client.key}
|
||||
|
||||
resource "tls_private_key" "aggregation-client" {
|
||||
count = var.enable_aggregation ? 1 : 0
|
||||
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "aggregation-client" {
|
||||
count = var.enable_aggregation ? 1 : 0
|
||||
|
||||
key_algorithm = tls_private_key.aggregation-client[0].algorithm
|
||||
private_key_pem = tls_private_key.aggregation-client[0].private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "kube-apiserver"
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "aggregation-client" {
|
||||
count = var.enable_aggregation ? 1 : 0
|
||||
|
||||
cert_request_pem = tls_cert_request.aggregation-client[0].cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.aggregation-ca[0].key_algorithm
|
||||
ca_private_key_pem = tls_private_key.aggregation-ca[0].private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.aggregation-ca[0].cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "aggregation-client-key" {
|
||||
count = var.enable_aggregation && var.asset_dir != "" ? 1 : 0
|
||||
|
||||
content = tls_private_key.aggregation-client[0].private_key_pem
|
||||
filename = "${var.asset_dir}/tls/aggregation-client.key"
|
||||
}
|
||||
|
||||
resource "local_file" "aggregation-client-crt" {
|
||||
count = var.enable_aggregation && var.asset_dir != "" ? 1 : 0
|
||||
|
||||
content = tls_locally_signed_cert.aggregation-client[0].cert_pem
|
||||
filename = "${var.asset_dir}/tls/aggregation-client.crt"
|
||||
}
|
||||
|
||||
245
tls-etcd.tf
Normal file
245
tls-etcd.tf
Normal file
@@ -0,0 +1,245 @@
|
||||
locals {
|
||||
# etcd TLS assets map
|
||||
etcd_tls = {
|
||||
"tls/etcd/etcd-client-ca.crt" = tls_self_signed_cert.etcd-ca.cert_pem,
|
||||
"tls/etcd/etcd-client.crt" = tls_locally_signed_cert.client.cert_pem,
|
||||
"tls/etcd/etcd-client.key" = tls_private_key.client.private_key_pem
|
||||
"tls/etcd/server-ca.crt" = tls_self_signed_cert.etcd-ca.cert_pem,
|
||||
"tls/etcd/server.crt" = tls_locally_signed_cert.server.cert_pem
|
||||
"tls/etcd/server.key" = tls_private_key.server.private_key_pem
|
||||
"tls/etcd/peer-ca.crt" = tls_self_signed_cert.etcd-ca.cert_pem,
|
||||
"tls/etcd/peer.crt" = tls_locally_signed_cert.peer.cert_pem
|
||||
"tls/etcd/peer.key" = tls_private_key.peer.private_key_pem
|
||||
}
|
||||
}
|
||||
|
||||
# etcd CA
|
||||
|
||||
resource "tls_private_key" "etcd-ca" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_self_signed_cert" "etcd-ca" {
|
||||
key_algorithm = tls_private_key.etcd-ca.algorithm
|
||||
private_key_pem = tls_private_key.etcd-ca.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "etcd-ca"
|
||||
organization = "etcd"
|
||||
}
|
||||
|
||||
is_ca_certificate = true
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"cert_signing",
|
||||
]
|
||||
}
|
||||
|
||||
# etcd-ca.crt
|
||||
resource "local_file" "etcd_ca_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd-ca.crt"
|
||||
}
|
||||
|
||||
# etcd-client-ca.crt
|
||||
resource "local_file" "etcd_client_ca_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd-client-ca.crt"
|
||||
}
|
||||
|
||||
# etcd-ca.key
|
||||
resource "local_file" "etcd_ca_key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.etcd-ca.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/etcd-ca.key"
|
||||
}
|
||||
|
||||
# etcd Client (apiserver to etcd communication)
|
||||
|
||||
resource "tls_private_key" "client" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "client" {
|
||||
key_algorithm = tls_private_key.client.algorithm
|
||||
private_key_pem = tls_private_key.client.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "etcd-client"
|
||||
organization = "etcd"
|
||||
}
|
||||
|
||||
ip_addresses = [
|
||||
"127.0.0.1",
|
||||
]
|
||||
|
||||
dns_names = concat(var.etcd_servers, ["localhost"])
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "client" {
|
||||
cert_request_pem = tls_cert_request.client.cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.etcd-ca.key_algorithm
|
||||
ca_private_key_pem = tls_private_key.etcd-ca.private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
# etcd-client.crt
|
||||
resource "local_file" "etcd_client_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_locally_signed_cert.client.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd-client.crt"
|
||||
}
|
||||
|
||||
# etcd-client.key
|
||||
resource "local_file" "etcd_client_key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.client.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/etcd-client.key"
|
||||
}
|
||||
|
||||
# etcd Server
|
||||
|
||||
resource "tls_private_key" "server" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "server" {
|
||||
key_algorithm = tls_private_key.server.algorithm
|
||||
private_key_pem = tls_private_key.server.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "etcd-server"
|
||||
organization = "etcd"
|
||||
}
|
||||
|
||||
ip_addresses = [
|
||||
"127.0.0.1",
|
||||
]
|
||||
|
||||
dns_names = concat(var.etcd_servers, ["localhost"])
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "server" {
|
||||
cert_request_pem = tls_cert_request.server.cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.etcd-ca.key_algorithm
|
||||
ca_private_key_pem = tls_private_key.etcd-ca.private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
# server-ca.crt
|
||||
resource "local_file" "etcd_server_ca_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd/server-ca.crt"
|
||||
}
|
||||
|
||||
# server.crt
|
||||
resource "local_file" "etcd_server_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_locally_signed_cert.server.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd/server.crt"
|
||||
}
|
||||
|
||||
# server.key
|
||||
resource "local_file" "etcd_server_key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.server.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/etcd/server.key"
|
||||
}
|
||||
|
||||
# etcd Peer
|
||||
|
||||
resource "tls_private_key" "peer" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "peer" {
|
||||
key_algorithm = tls_private_key.peer.algorithm
|
||||
private_key_pem = tls_private_key.peer.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "etcd-peer"
|
||||
organization = "etcd"
|
||||
}
|
||||
|
||||
dns_names = var.etcd_servers
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "peer" {
|
||||
cert_request_pem = tls_cert_request.peer.cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.etcd-ca.key_algorithm
|
||||
ca_private_key_pem = tls_private_key.etcd-ca.private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
# peer-ca.crt
|
||||
resource "local_file" "etcd_peer_ca_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_self_signed_cert.etcd-ca.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd/peer-ca.crt"
|
||||
}
|
||||
|
||||
# peer.crt
|
||||
resource "local_file" "etcd_peer_crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_locally_signed_cert.peer.cert_pem
|
||||
filename = "${var.asset_dir}/tls/etcd/peer.crt"
|
||||
}
|
||||
|
||||
# peer.key
|
||||
resource "local_file" "etcd_peer_key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.peer.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/etcd/peer.key"
|
||||
}
|
||||
|
||||
228
tls-k8s.tf
Normal file
228
tls-k8s.tf
Normal file
@@ -0,0 +1,228 @@
|
||||
locals {
|
||||
# Kubernetes TLS assets map
|
||||
kubernetes_tls = {
|
||||
"tls/k8s/ca.crt" = tls_self_signed_cert.kube-ca.cert_pem,
|
||||
"tls/k8s/ca.key" = tls_private_key.kube-ca.private_key_pem,
|
||||
"tls/k8s/apiserver.crt" = tls_locally_signed_cert.apiserver.cert_pem,
|
||||
"tls/k8s/apiserver.key" = tls_private_key.apiserver.private_key_pem,
|
||||
"tls/k8s/service-account.pub" = tls_private_key.service-account.public_key_pem
|
||||
"tls/k8s/service-account.key" = tls_private_key.service-account.private_key_pem
|
||||
}
|
||||
}
|
||||
|
||||
# Kubernetes CA (tls/{ca.crt,ca.key})
|
||||
|
||||
resource "tls_private_key" "kube-ca" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_self_signed_cert" "kube-ca" {
|
||||
key_algorithm = tls_private_key.kube-ca.algorithm
|
||||
private_key_pem = tls_private_key.kube-ca.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "kubernetes-ca"
|
||||
organization = "typhoon"
|
||||
}
|
||||
|
||||
is_ca_certificate = true
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"cert_signing",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "kube-ca-key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.kube-ca.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/ca.key"
|
||||
}
|
||||
|
||||
resource "local_file" "kube-ca-crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_self_signed_cert.kube-ca.cert_pem
|
||||
filename = "${var.asset_dir}/tls/ca.crt"
|
||||
}
|
||||
|
||||
# Kubernetes API Server (tls/{apiserver.key,apiserver.crt})
|
||||
|
||||
resource "tls_private_key" "apiserver" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "apiserver" {
|
||||
key_algorithm = tls_private_key.apiserver.algorithm
|
||||
private_key_pem = tls_private_key.apiserver.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "kube-apiserver"
|
||||
organization = "system:masters"
|
||||
}
|
||||
|
||||
dns_names = flatten([
|
||||
var.api_servers,
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.${var.cluster_domain_suffix}",
|
||||
])
|
||||
|
||||
ip_addresses = [
|
||||
cidrhost(var.service_cidr, 1),
|
||||
]
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "apiserver" {
|
||||
cert_request_pem = tls_cert_request.apiserver.cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.kube-ca.key_algorithm
|
||||
ca_private_key_pem = tls_private_key.kube-ca.private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.kube-ca.cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "apiserver-key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.apiserver.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/apiserver.key"
|
||||
}
|
||||
|
||||
resource "local_file" "apiserver-crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_locally_signed_cert.apiserver.cert_pem
|
||||
filename = "${var.asset_dir}/tls/apiserver.crt"
|
||||
}
|
||||
|
||||
# Kubernetes Admin (tls/{admin.key,admin.crt})
|
||||
|
||||
resource "tls_private_key" "admin" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "admin" {
|
||||
key_algorithm = tls_private_key.admin.algorithm
|
||||
private_key_pem = tls_private_key.admin.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "kubernetes-admin"
|
||||
organization = "system:masters"
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "admin" {
|
||||
cert_request_pem = tls_cert_request.admin.cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.kube-ca.key_algorithm
|
||||
ca_private_key_pem = tls_private_key.kube-ca.private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.kube-ca.cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "admin-key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.admin.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/admin.key"
|
||||
}
|
||||
|
||||
resource "local_file" "admin-crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_locally_signed_cert.admin.cert_pem
|
||||
filename = "${var.asset_dir}/tls/admin.crt"
|
||||
}
|
||||
|
||||
# Kubernete's Service Account (tls/{service-account.key,service-account.pub})
|
||||
|
||||
resource "tls_private_key" "service-account" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "local_file" "service-account-key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.service-account.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/service-account.key"
|
||||
}
|
||||
|
||||
resource "local_file" "service-account-crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.service-account.public_key_pem
|
||||
filename = "${var.asset_dir}/tls/service-account.pub"
|
||||
}
|
||||
|
||||
# Kubelet
|
||||
|
||||
resource "tls_private_key" "kubelet" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "kubelet" {
|
||||
key_algorithm = tls_private_key.kubelet.algorithm
|
||||
private_key_pem = tls_private_key.kubelet.private_key_pem
|
||||
|
||||
subject {
|
||||
common_name = "kubelet"
|
||||
organization = "system:nodes"
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "kubelet" {
|
||||
cert_request_pem = tls_cert_request.kubelet.cert_request_pem
|
||||
|
||||
ca_key_algorithm = tls_self_signed_cert.kube-ca.key_algorithm
|
||||
ca_private_key_pem = tls_private_key.kube-ca.private_key_pem
|
||||
ca_cert_pem = tls_self_signed_cert.kube-ca.cert_pem
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "kubelet-key" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_private_key.kubelet.private_key_pem
|
||||
filename = "${var.asset_dir}/tls/kubelet.key"
|
||||
}
|
||||
|
||||
resource "local_file" "kubelet-crt" {
|
||||
count = var.asset_dir == "" ? 0 : 1
|
||||
|
||||
content = tls_locally_signed_cert.kubelet.cert_pem
|
||||
filename = "${var.asset_dir}/tls/kubelet.crt"
|
||||
}
|
||||
|
||||
165
tls.tf
165
tls.tf
@@ -1,165 +0,0 @@
|
||||
# NOTE: Across this module, the following syntax is used at various places:
|
||||
# `"${var.ca_certificate == "" ? join(" ", tls_private_key.kube-ca.*.private_key_pem) : var.ca_private_key}"`
|
||||
#
|
||||
# Due to https://github.com/hashicorp/hil/issues/50, both sides of conditions
|
||||
# are evaluated, until one of them is discarded. Unfortunately, the
|
||||
# `{tls_private_key/tls_self_signed_cert}.kube-ca` resources are created
|
||||
# conditionally and might not be present - in which case an error is
|
||||
# generated. Because a `count` is used on these ressources, the resources can be
|
||||
# referenced as lists with the `.*` notation, and arrays are allowed to be
|
||||
# empty. The `join()` interpolation function is then used to cast them back to
|
||||
# a string. Since `count` can only be 0 or 1, the returned value is either empty
|
||||
# (and discarded anyways) or the desired value.
|
||||
|
||||
# Kubernetes CA (tls/{ca.crt,ca.key})
|
||||
resource "tls_private_key" "kube-ca" {
|
||||
count = "${var.ca_certificate == "" ? 1 : 0}"
|
||||
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_self_signed_cert" "kube-ca" {
|
||||
count = "${var.ca_certificate == "" ? 1 : 0}"
|
||||
|
||||
key_algorithm = "${tls_private_key.kube-ca.algorithm}"
|
||||
private_key_pem = "${tls_private_key.kube-ca.private_key_pem}"
|
||||
|
||||
subject {
|
||||
common_name = "kube-ca"
|
||||
organization = "bootkube"
|
||||
}
|
||||
|
||||
is_ca_certificate = true
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"cert_signing",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "kube-ca-key" {
|
||||
content = "${var.ca_certificate == "" ? join(" ", tls_private_key.kube-ca.*.private_key_pem) : var.ca_private_key}"
|
||||
filename = "${var.asset_dir}/tls/ca.key"
|
||||
}
|
||||
|
||||
resource "local_file" "kube-ca-crt" {
|
||||
content = "${var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate}"
|
||||
filename = "${var.asset_dir}/tls/ca.crt"
|
||||
}
|
||||
|
||||
# Kubernetes API Server (tls/{apiserver.key,apiserver.crt})
|
||||
resource "tls_private_key" "apiserver" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "apiserver" {
|
||||
key_algorithm = "${tls_private_key.apiserver.algorithm}"
|
||||
private_key_pem = "${tls_private_key.apiserver.private_key_pem}"
|
||||
|
||||
subject {
|
||||
common_name = "kube-apiserver"
|
||||
organization = "kube-master"
|
||||
}
|
||||
|
||||
dns_names = [
|
||||
"${var.api_servers}",
|
||||
"kubernetes",
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
"kubernetes.default.svc.cluster.local",
|
||||
]
|
||||
|
||||
ip_addresses = [
|
||||
"${cidrhost(var.service_cidr, 1)}",
|
||||
]
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "apiserver" {
|
||||
cert_request_pem = "${tls_cert_request.apiserver.cert_request_pem}"
|
||||
|
||||
ca_key_algorithm = "${var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.key_algorithm) : var.ca_key_alg}"
|
||||
ca_private_key_pem = "${var.ca_certificate == "" ? join(" ", tls_private_key.kube-ca.*.private_key_pem) : var.ca_private_key}"
|
||||
ca_cert_pem = "${var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem): var.ca_certificate}"
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "apiserver-key" {
|
||||
content = "${tls_private_key.apiserver.private_key_pem}"
|
||||
filename = "${var.asset_dir}/tls/apiserver.key"
|
||||
}
|
||||
|
||||
resource "local_file" "apiserver-crt" {
|
||||
content = "${tls_locally_signed_cert.apiserver.cert_pem}"
|
||||
filename = "${var.asset_dir}/tls/apiserver.crt"
|
||||
}
|
||||
|
||||
# Kubernete's Service Account (tls/{service-account.key,service-account.pub})
|
||||
resource "tls_private_key" "service-account" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "local_file" "service-account-key" {
|
||||
content = "${tls_private_key.service-account.private_key_pem}"
|
||||
filename = "${var.asset_dir}/tls/service-account.key"
|
||||
}
|
||||
|
||||
resource "local_file" "service-account-crt" {
|
||||
content = "${tls_private_key.service-account.public_key_pem}"
|
||||
filename = "${var.asset_dir}/tls/service-account.pub"
|
||||
}
|
||||
|
||||
# Kubelet
|
||||
resource "tls_private_key" "kubelet" {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = "2048"
|
||||
}
|
||||
|
||||
resource "tls_cert_request" "kubelet" {
|
||||
key_algorithm = "${tls_private_key.kubelet.algorithm}"
|
||||
private_key_pem = "${tls_private_key.kubelet.private_key_pem}"
|
||||
|
||||
subject {
|
||||
common_name = "kubelet"
|
||||
organization = "system:masters"
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_locally_signed_cert" "kubelet" {
|
||||
cert_request_pem = "${tls_cert_request.kubelet.cert_request_pem}"
|
||||
|
||||
ca_key_algorithm = "${var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.key_algorithm) : var.ca_key_alg}"
|
||||
ca_private_key_pem = "${var.ca_certificate == "" ? join(" ", tls_private_key.kube-ca.*.private_key_pem) : var.ca_private_key}"
|
||||
ca_cert_pem = "${var.ca_certificate == "" ? join(" ", tls_self_signed_cert.kube-ca.*.cert_pem) : var.ca_certificate}"
|
||||
|
||||
validity_period_hours = 8760
|
||||
|
||||
allowed_uses = [
|
||||
"key_encipherment",
|
||||
"digital_signature",
|
||||
"server_auth",
|
||||
"client_auth",
|
||||
]
|
||||
}
|
||||
|
||||
resource "local_file" "kubelet-key" {
|
||||
content = "${tls_private_key.kubelet.private_key_pem}"
|
||||
filename = "${var.asset_dir}/tls/kubelet.key"
|
||||
}
|
||||
|
||||
resource "local_file" "kubelet-crt" {
|
||||
content = "${tls_locally_signed_cert.kubelet.cert_pem}"
|
||||
filename = "${var.asset_dir}/tls/kubelet.crt"
|
||||
}
|
||||
112
variables.tf
112
variables.tf
@@ -1,73 +1,115 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
description = "Cluster name"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "api_servers" {
|
||||
description = "URL used to reach kube-apiserver"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
description = "List of URLs used to reach kube-apiserver"
|
||||
}
|
||||
|
||||
variable "etcd_servers" {
|
||||
description = "List of etcd server URLs including protocol, host, and port"
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "experimental_self_hosted_etcd" {
|
||||
description = "(Experimental) Create self-hosted etcd assets"
|
||||
default = false
|
||||
type = list(string)
|
||||
description = "List of URLs used to reach etcd servers."
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "cloud_provider" {
|
||||
description = "The provider for cloud services (empty string for no provider)"
|
||||
type = "string"
|
||||
type = string
|
||||
description = "Absolute path to a directory where generated assets should be placed (contains secrets)"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cloud_provider" {
|
||||
type = string
|
||||
description = "The provider for cloud services (empty string for no provider)"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
type = string
|
||||
description = "Choice of networking provider (flannel or calico or kube-router)"
|
||||
default = "flannel"
|
||||
}
|
||||
|
||||
variable "network_mtu" {
|
||||
type = number
|
||||
description = "CNI interface MTU (only applies to calico and kube-router)"
|
||||
default = 1500
|
||||
}
|
||||
|
||||
variable "network_encapsulation" {
|
||||
type = string
|
||||
description = "Network encapsulation mode either ipip or vxlan (only applies to calico)"
|
||||
default = "ipip"
|
||||
}
|
||||
|
||||
variable "network_ip_autodetection_method" {
|
||||
type = string
|
||||
description = "Method to autodetect the host IPv4 address (only applies to calico)"
|
||||
default = "first-found"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
type = string
|
||||
description = "CIDR IP range to assign Kubernetes pods"
|
||||
type = "string"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
type = string
|
||||
description = <<EOD
|
||||
CIDR IP range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns.
|
||||
EOD
|
||||
type = "string"
|
||||
default = "10.3.0.0/24"
|
||||
}
|
||||
|
||||
|
||||
variable "container_images" {
|
||||
type = map(string)
|
||||
description = "Container images to use"
|
||||
type = "map"
|
||||
|
||||
default = {
|
||||
hyperkube = "quay.io/coreos/hyperkube:v1.6.2_coreos.0"
|
||||
etcd = "quay.io/coreos/etcd:v3.1.6"
|
||||
calico = "quay.io/calico/node:v3.10.1"
|
||||
calico_cni = "quay.io/calico/cni:v3.10.1"
|
||||
flannel = "quay.io/coreos/flannel:v0.11.0-amd64"
|
||||
flannel_cni = "quay.io/coreos/flannel-cni:v0.3.0"
|
||||
kube_router = "cloudnativelabs/kube-router:v0.3.2"
|
||||
hyperkube = "k8s.gcr.io/hyperkube:v1.17.0"
|
||||
coredns = "k8s.gcr.io/coredns:1.6.5"
|
||||
}
|
||||
}
|
||||
|
||||
variable "ca_certificate" {
|
||||
description = "Existing PEM-encoded CA certificate (generated if blank)"
|
||||
type = "string"
|
||||
default = ""
|
||||
|
||||
variable "trusted_certs_dir" {
|
||||
type = string
|
||||
description = "Path to the directory on cluster nodes where trust TLS certs are kept"
|
||||
default = "/usr/share/ca-certificates"
|
||||
}
|
||||
|
||||
variable "ca_key_alg" {
|
||||
description = "Algorithm used to generate ca_key (required if ca_cert is specified)"
|
||||
type = "string"
|
||||
default = "RSA"
|
||||
variable "enable_reporting" {
|
||||
type = bool
|
||||
description = "Enable usage or analytics reporting to upstream component owners (Tigera: Calico)"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "ca_private_key" {
|
||||
description = "Existing Certificate Authority private key (required if ca_certificate is set)"
|
||||
type = "string"
|
||||
default = ""
|
||||
variable "enable_aggregation" {
|
||||
type = bool
|
||||
description = "Enable the Kubernetes Aggregation Layer (defaults to false, recommended)"
|
||||
default = false
|
||||
}
|
||||
|
||||
# unofficial, temporary, may be removed without notice
|
||||
|
||||
variable "external_apiserver_port" {
|
||||
type = number
|
||||
description = "External kube-apiserver port (e.g. 6443 to match internal kube-apiserver port)"
|
||||
default = 6443
|
||||
}
|
||||
|
||||
variable "cluster_domain_suffix" {
|
||||
type = string
|
||||
description = "Queries for domains with the suffix will be answered by kube-dns"
|
||||
default = "cluster.local"
|
||||
}
|
||||
|
||||
|
||||
10
versions.tf
Normal file
10
versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
# Terraform version and plugin versions
|
||||
|
||||
terraform {
|
||||
required_version = "~> 0.12.0"
|
||||
required_providers {
|
||||
local = "~> 1.2"
|
||||
template = "~> 2.1"
|
||||
tls = "~> 2.0"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user