examples/k8s: Remove fleet from Kubernetes nodes

* fleet is not a requirement, let's keep it simple
This commit is contained in:
Dalton Hubble
2016-07-08 22:17:12 -07:00
parent 9dc256abd2
commit 76da59c504
11 changed files with 9 additions and 32 deletions

View File

@@ -1,6 +1,6 @@
{
"id": "node1",
"name": "Master Node",
"name": "k8s controller",
"profile": "k8s-master",
"selector": {
"mac": "52:54:00:a1:9c:ae"
@@ -8,7 +8,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
"etcd_name": "node1",
"fleet_metadata": "role=etcd,name=node1",
"ipv4_address": "172.17.0.21",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_dns_service_ip": "10.3.0.10",

View File

@@ -1,6 +1,6 @@
{
"id": "node2",
"name": "Worker 1",
"name": "k8s worker",
"profile": "k8s-worker",
"selector": {
"mac": "52:54:00:b2:2f:86"
@@ -8,7 +8,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
"etcd_name": "node2",
"fleet_metadata": "role=etcd,name=node2",
"ipv4_address": "172.17.0.22",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_controller_endpoint": "https://172.17.0.21",

View File

@@ -1,6 +1,6 @@
{
"id": "node3",
"name": "Worker 2",
"name": "k8s worker",
"profile": "k8s-worker",
"selector": {
"mac": "52:54:00:c3:61:77"
@@ -8,7 +8,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
"etcd_name": "node3",
"fleet_metadata": "role=etcd,name=node3",
"ipv4_address": "172.17.0.23",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_controller_endpoint": "https://172.17.0.21",

View File

@@ -1,6 +1,6 @@
{
"id": "node1",
"name": "Master Node",
"name": "k8s controller",
"profile": "k8s-master-install",
"selector": {
"os": "installed",
@@ -9,7 +9,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
"etcd_name": "node1",
"fleet_metadata": "role=etcd,name=node1",
"ipv4_address": "172.15.0.21",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_dns_service_ip": "10.3.0.10",

View File

@@ -1,6 +1,6 @@
{
"id": "node2",
"name": "Worker 1",
"name": "k8s worker",
"profile": "k8s-worker-install",
"selector": {
"os": "installed",
@@ -9,7 +9,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
"etcd_name": "node2",
"fleet_metadata": "role=etcd,name=node2",
"ipv4_address": "172.15.0.22",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_controller_endpoint": "https://172.15.0.21",

View File

@@ -1,6 +1,6 @@
{
"id": "node3",
"name": "Worker 2",
"name": "k8s worker",
"profile": "k8s-worker-install",
"selector": {
"os": "installed",
@@ -9,7 +9,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
"etcd_name": "node3",
"fleet_metadata": "role=etcd,name=node3",
"ipv4_address": "172.15.0.23",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_controller_endpoint": "https://172.15.0.21",

View File

@@ -1,6 +1,6 @@
{
"id": "node1",
"name": "Master Node",
"name": "k8s controller",
"profile": "k8s-master",
"selector": {
"mac": "52:54:00:a1:9c:ae"
@@ -8,7 +8,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
"etcd_name": "node1",
"fleet_metadata": "role=etcd,name=node1",
"ipv4_address": "172.15.0.21",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_dns_service_ip": "10.3.0.10",

View File

@@ -1,6 +1,6 @@
{
"id": "node2",
"name": "Worker 1",
"name": "k8s worker",
"profile": "k8s-worker",
"selector": {
"mac": "52:54:00:b2:2f:86"
@@ -8,7 +8,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
"etcd_name": "node2",
"fleet_metadata": "role=etcd,name=node2",
"ipv4_address": "172.15.0.22",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_controller_endpoint": "https://172.15.0.21",

View File

@@ -1,6 +1,6 @@
{
"id": "node3",
"name": "Worker 2",
"name": "k8s worker",
"profile": "k8s-worker",
"selector": {
"mac": "52:54:00:c3:61:77"
@@ -8,7 +8,6 @@
"metadata": {
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
"etcd_name": "node3",
"fleet_metadata": "role=etcd,name=node3",
"ipv4_address": "172.15.0.23",
"k8s_cert_endpoint": "http://bootcfg.foo:8080/assets",
"k8s_controller_endpoint": "https://172.15.0.21",

View File

@@ -14,13 +14,6 @@ systemd:
Environment="ETCD_LISTEN_PEER_URLS=http://{{.ipv4_address}}:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
- name: fleet.service
enable: true
dropins:
- name: 40-fleet-metadata.conf
contents: |
[Service]
Environment="FLEET_METADATA={{.fleet_metadata}}"
- name: flanneld.service
dropins:
- name: 40-ExecStartPre-symlink.conf

View File

@@ -14,13 +14,6 @@ systemd:
Environment="ETCD_LISTEN_PEER_URLS=http://{{.ipv4_address}}:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
- name: fleet.service
enable: true
dropins:
- name: 40-fleet-metadata.conf
contents: |
[Service]
Environment="FLEET_METADATA={{.fleet_metadata}}"
- name: flanneld.service
dropins:
- name: 40-ExecStartPre-symlink.conf