mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 10:18:13 +00:00 
			
		
		
		
	AWS: Kill bash deployment
c.f. #38772, #42194 and https://k8s-testgrid.appspot.com/google-aws#aws cluster/kube-up.sh with KUBERNETES_PROVIDER=aws has been broken on 1.6 for a couple of months now. No one is supporting it. Nuke.
This commit is contained in:
		| @@ -1,49 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2015 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
|  | ||||
| # A library of common helper functions for Ubuntus & Debians. | ||||
|  | ||||
| function detect-minion-image() { | ||||
|   if [[ -z "${KUBE_NODE_IMAGE=-}" ]]; then | ||||
|     detect-image | ||||
|     KUBE_NODE_IMAGE=$AWS_IMAGE | ||||
|   fi | ||||
| } | ||||
|  | ||||
| function generate-minion-user-data { | ||||
|   # We pipe this to the ami as a startup script in the user-data field.  Requires a compatible ami | ||||
|   echo "#! /bin/bash" | ||||
|   echo "SALT_MASTER='${MASTER_INTERNAL_IP}'" | ||||
|   echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'" | ||||
|   echo "readonly NON_MASQUERADE_CIDR='${NON_MASQUERADE_CIDR:-}'" | ||||
|   echo "readonly DOCKER_STORAGE='${DOCKER_STORAGE:-}'" | ||||
|   grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/common.sh" | ||||
|   grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/format-disks.sh" | ||||
|   grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-minion.sh" | ||||
| } | ||||
|  | ||||
| function check-minion() { | ||||
|   local minion_ip=$1 | ||||
|  | ||||
|   local output=$(ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@$minion_ip sudo docker ps -a 2>/dev/null) | ||||
|   if [[ -z "${output}" ]]; then | ||||
|     ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ${SSH_USER}@$minion_ip sudo service docker start > $LOG 2>&1 | ||||
|     echo "not working yet" | ||||
|   else | ||||
|     echo "working" | ||||
|   fi | ||||
| } | ||||
| @@ -1,167 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2014 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| ZONE=${KUBE_AWS_ZONE:-us-west-2a} | ||||
| MASTER_SIZE=${MASTER_SIZE:-} | ||||
| NODE_SIZE=${NODE_SIZE:-} | ||||
| NUM_NODES=${NUM_NODES:-4} | ||||
|  | ||||
| # Dynamically set node sizes so that Heapster has enough space to run | ||||
| if [[ -z ${NODE_SIZE} ]]; then | ||||
|   if (( ${NUM_NODES} < 50 )); then | ||||
|     NODE_SIZE="t2.micro" | ||||
|   elif (( ${NUM_NODES} < 150 )); then | ||||
|     NODE_SIZE="t2.small" | ||||
|   else | ||||
|     NODE_SIZE="t2.medium" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| # Dynamically set the master size by the number of nodes, these are guesses | ||||
| if [[ -z ${MASTER_SIZE} ]]; then | ||||
|   MASTER_SIZE="m3.medium" | ||||
|   if [[ "${NUM_NODES}" -gt "5" ]]; then | ||||
|     suggested_master_size="m3.large" | ||||
|   fi | ||||
|   if [[ "${NUM_NODES}" -gt "10" ]]; then | ||||
|     suggested_master_size="m3.xlarge" | ||||
|   fi | ||||
|   if [[ "${NUM_NODES}" -gt "100" ]]; then | ||||
|     suggested_master_size="m3.2xlarge" | ||||
|   fi | ||||
|   if [[ "${NUM_NODES}" -gt "250" ]]; then | ||||
|     suggested_master_size="c4.4xlarge" | ||||
|   fi | ||||
|   if [[ "${NUM_NODES}" -gt "500" ]]; then | ||||
|     suggested_master_size="c4.8xlarge" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| # Optional: Set AWS_S3_BUCKET to the name of an S3 bucket to use for uploading binaries | ||||
| # (otherwise a unique bucket name will be generated for you) | ||||
| #  AWS_S3_BUCKET=kubernetes-artifacts | ||||
|  | ||||
| # Because regions are globally named, we want to create in a single region; default to us-east-1 | ||||
| AWS_S3_REGION=${AWS_S3_REGION:-us-east-1} | ||||
|  | ||||
| # Which docker storage mechanism to use. | ||||
| DOCKER_STORAGE=${DOCKER_STORAGE:-aufs} | ||||
|  | ||||
| # Extra docker options for nodes. | ||||
| EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" | ||||
|  | ||||
| INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-kubernetes}" | ||||
| CLUSTER_ID=${INSTANCE_PREFIX} | ||||
| VPC_NAME=${VPC_NAME:-kubernetes-vpc} | ||||
| AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} | ||||
| CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}" | ||||
|  | ||||
| LOG="/dev/null" | ||||
|  | ||||
| MASTER_DISK_TYPE="${MASTER_DISK_TYPE:-gp2}" | ||||
| MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} | ||||
| # The master root EBS volume size (typically does not need to be very large) | ||||
| MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" | ||||
| MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} | ||||
| # The minions root EBS volume size (used to house Docker images) | ||||
| NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" | ||||
| NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} | ||||
|  | ||||
| MASTER_NAME="${INSTANCE_PREFIX}-master" | ||||
| MASTER_TAG="${INSTANCE_PREFIX}-master" | ||||
| NODE_TAG="${INSTANCE_PREFIX}-minion" | ||||
| NODE_SCOPES="" | ||||
| NON_MASQUERADE_CIDR="${NON_MASQUERADE_CIDR:-10.0.0.0/8}" # Traffic to IPs outside this range will use IP masquerade | ||||
| SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}"  # formerly PORTAL_NET | ||||
| CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" | ||||
| MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" | ||||
| SSH_CIDR="${SSH_CIDR:-0.0.0.0/0}" # IP to restrict ssh access to nodes/master | ||||
| HTTP_API_CIDR="${HTTP_API_CIDR:-0.0.0.0/0}" # IP to restrict HTTP API access | ||||
| # If set to an Elastic IP address, the master instance will be associated with this IP. | ||||
| # Otherwise a new Elastic IP will be acquired | ||||
| # (We used to accept 'auto' to mean 'allocate elastic ip', but that is now the default) | ||||
| MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}" | ||||
|  | ||||
| # Runtime config | ||||
| RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" | ||||
|  | ||||
| # Optional: Cluster monitoring to setup as part of the cluster bring up: | ||||
| #   none     - No cluster monitoring setup | ||||
| #   influxdb - Heapster, InfluxDB, and Grafana | ||||
| ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" | ||||
|  | ||||
| # Optional: Enable node logging. | ||||
| ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" | ||||
| LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-elasticsearch}" # options: elasticsearch, gcp | ||||
|  | ||||
| # Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. | ||||
| ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}" | ||||
| ELASTICSEARCH_LOGGING_REPLICAS=1 | ||||
|  | ||||
| # Optional: Don't require https for registries in our local RFC1918 network | ||||
| if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then | ||||
|   EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry ${NON_MASQUERADE_CIDR}" | ||||
| fi | ||||
|  | ||||
| # Optional: Install cluster DNS. | ||||
| ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" | ||||
| DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}" | ||||
| DNS_DOMAIN="cluster.local" | ||||
|  | ||||
| # Optional: Enable DNS horizontal autoscaler | ||||
| ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" | ||||
|  | ||||
| # Optional: Install Kubernetes UI | ||||
| ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" | ||||
|  | ||||
| # Optional: Create autoscaler for cluster's nodes. | ||||
| ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" | ||||
| if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then | ||||
|   # TODO: actually configure ASG or similar | ||||
|   AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" | ||||
|   AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" | ||||
|   TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" | ||||
| fi | ||||
|  | ||||
| # Admission Controllers to invoke prior to persisting objects in cluster | ||||
| # If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely. | ||||
| ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds | ||||
|  | ||||
| # Optional: Enable/disable public IP assignment for minions. | ||||
| # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! | ||||
| ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} | ||||
|  | ||||
| # OS options for minions | ||||
| KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-jessie}" | ||||
| MASTER_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" | ||||
| NODE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" | ||||
| KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" | ||||
| COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" | ||||
| CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" | ||||
| RKT_VERSION="${KUBE_RKT_VERSION:-1.23.0}" | ||||
|  | ||||
| NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # kubenet, opencontrail, flannel | ||||
|  | ||||
| # OpenContrail networking plugin specific settings | ||||
| OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" | ||||
| OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}" | ||||
| OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" | ||||
|  | ||||
| # Optional: if set to true, kube-up will configure the cluster to run e2e tests. | ||||
| E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} | ||||
|  | ||||
| # Optional: install a default StorageClass | ||||
| ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}" | ||||
| @@ -1,153 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2014 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| ZONE=${KUBE_AWS_ZONE:-us-west-2a} | ||||
|  | ||||
| MASTER_SIZE=${MASTER_SIZE:-} | ||||
| NODE_SIZE=${NODE_SIZE:-} | ||||
| NUM_NODES=${NUM_NODES:-2} | ||||
|  | ||||
| # Dynamically set node sizes so that Heapster has enough space to run | ||||
| if [[ -z ${NODE_SIZE} ]]; then | ||||
|   if (( ${NUM_NODES} < 50 )); then | ||||
|     NODE_SIZE="t2.micro" | ||||
|   elif (( ${NUM_NODES} < 150 )); then | ||||
|     NODE_SIZE="t2.small" | ||||
|   else | ||||
|     NODE_SIZE="t2.medium" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| # Dynamically set the master size by the number of nodes, these are guesses | ||||
| # TODO: gather some data | ||||
| if [[ -z ${MASTER_SIZE} ]]; then | ||||
|   if (( ${NUM_NODES} < 150 )); then | ||||
|     MASTER_SIZE="m3.medium" | ||||
|   else | ||||
|     MASTER_SIZE="m3.large" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
|  | ||||
| # Because regions are globally named, we want to create in a single region; default to us-east-1 | ||||
| AWS_S3_REGION=${AWS_S3_REGION:-us-east-1} | ||||
|  | ||||
| # Which docker storage mechanism to use. | ||||
| DOCKER_STORAGE=${DOCKER_STORAGE:-aufs} | ||||
|  | ||||
| # Extra docker options for nodes. | ||||
| EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" | ||||
|  | ||||
| INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-e2e-test-${USER}}" | ||||
| CONFIG_CONTEXT="${KUBE_CONFIG_CONTEXT:-aws_${INSTANCE_PREFIX}}" | ||||
| CLUSTER_ID=${INSTANCE_PREFIX} | ||||
| VPC_NAME=${VPC_NAME:-kubernetes-vpc} | ||||
| AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa} | ||||
|  | ||||
| LOG="/dev/null" | ||||
|  | ||||
| MASTER_DISK_TYPE="${MASTER_DISK_TYPE:-gp2}" | ||||
| MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20} | ||||
| # The master root EBS volume size (typically does not need to be very large) | ||||
| MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}" | ||||
| MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8} | ||||
| # The minions root EBS volume size (used to house Docker images) | ||||
| NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}" | ||||
| NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32} | ||||
|  | ||||
| MASTER_NAME="${INSTANCE_PREFIX}-master" | ||||
| MASTER_TAG="${INSTANCE_PREFIX}-master" | ||||
| NODE_TAG="${INSTANCE_PREFIX}-minion" | ||||
| NODE_SCOPES="" | ||||
| NON_MASQUERADE_CIDR="${NON_MASQUERADE_CIDR:-10.0.0.0/8}" # Traffic to IPs outside this range will use IP masquerade | ||||
| SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}"  # formerly PORTAL_NET | ||||
| CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" | ||||
| MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" | ||||
| SSH_CIDR="${SSH_CIDR:-0.0.0.0/0}" # IP to restrict ssh access to nodes/master | ||||
| HTTP_API_CIDR="${HTTP_API_CIDR:-0.0.0.0/0}" # IP to restrict HTTP API access | ||||
| # If set to an Elastic IP address, the master instance will be associated with this IP. | ||||
| # Otherwise a new Elastic IP will be acquired | ||||
| # (We used to accept 'auto' to mean 'allocate elastic ip', but that is now the default) | ||||
| MASTER_RESERVED_IP="${MASTER_RESERVED_IP:-}" | ||||
| RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}" | ||||
|  | ||||
| # Optional: Cluster monitoring to setup as part of the cluster bring up: | ||||
| #   none     - No cluster monitoring setup | ||||
| #   influxdb - Heapster, InfluxDB, and Grafana | ||||
| ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-none}" | ||||
|  | ||||
| # Optional: Enable node logging. | ||||
| ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}" | ||||
| LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-elasticsearch}" # options: elasticsearch, gcp | ||||
|  | ||||
| # Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up. | ||||
| ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-false}" | ||||
| ELASTICSEARCH_LOGGING_REPLICAS=1 | ||||
|  | ||||
| # Optional: Don't require https for registries in our local RFC1918 network | ||||
| if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then | ||||
|   EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry ${NON_MASQUERADE_CIDR}" | ||||
| fi | ||||
|  | ||||
| # Optional: Install cluster DNS. | ||||
| ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" | ||||
| DNS_SERVER_IP="${DNS_SERVER_IP:-10.0.0.10}" | ||||
| DNS_DOMAIN="cluster.local" | ||||
|  | ||||
| # Optional: Enable DNS horizontal autoscaler | ||||
| ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}" | ||||
|  | ||||
| # Optional: Install Kubernetes UI | ||||
| ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" | ||||
|  | ||||
| # Optional: Create autoscaler for cluster's nodes. | ||||
| ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" | ||||
| if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then | ||||
|   # TODO: actually configure ASG or similar | ||||
|   AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" | ||||
|   AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}" | ||||
|   TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" | ||||
| fi | ||||
|  | ||||
| # Admission Controllers to invoke prior to persisting objects in cluster | ||||
| # If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely. | ||||
| ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds | ||||
|  | ||||
| # Optional: Enable/disable public IP assignment for minions. | ||||
| # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! | ||||
| ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true} | ||||
|  | ||||
| # OS options for minions | ||||
| KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-jessie}" | ||||
| MASTER_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" | ||||
| NODE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION}" | ||||
| KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}" | ||||
| COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}" | ||||
| CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}" | ||||
| RKT_VERSION="${KUBE_RKT_VERSION:-1.23.0}" | ||||
|  | ||||
| NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # kubenet, opencontrail, flannel | ||||
|  | ||||
| # OpenContrail networking plugin specific settings | ||||
| OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}" | ||||
| OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}" | ||||
| OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}" | ||||
|  | ||||
| # Optional: if set to true, kube-up will configure the cluster to run e2e tests. | ||||
| E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false} | ||||
|  | ||||
| # Optional: install a default StorageClass | ||||
| ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}" | ||||
| @@ -1,46 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2015 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
|  | ||||
| # A library of helper functions for Jessie. | ||||
|  | ||||
| source "${KUBE_ROOT}/cluster/aws/common/common.sh" | ||||
|  | ||||
| SSH_USER=admin | ||||
|  | ||||
| # Detects the AMI to use for jessie (considering the region) | ||||
| # | ||||
| # Vars set: | ||||
| #   AWS_IMAGE | ||||
| function detect-jessie-image () { | ||||
|   if [[ -z "${AWS_IMAGE-}" ]]; then | ||||
|     # These images are built using the imagebuilder tool, in the kube-deploy github repo | ||||
|     # https://github.com/kubernetes/kube-deploy/tree/master/imagebuilder | ||||
|  | ||||
|     # 282335181503: images published by kope.io | ||||
|     aws_account="282335181503" | ||||
|     # TODO: we could use a tag for the latest image, instead of bumping it every time | ||||
|     # e.g. family = k8s-1.3-debian-jessie-amd64-hvm-ebs latest/1.3=true | ||||
|     if [[ -z "${AWS_IMAGE_NAME:-}" ]]; then | ||||
|       AWS_IMAGE_NAME="k8s-1.3-debian-jessie-amd64-hvm-ebs-2016-06-18" | ||||
|     fi | ||||
|     AWS_IMAGE=`aws ec2 describe-images --owner ${aws_account} --filters Name=name,Values=${AWS_IMAGE_NAME} --query Images[].ImageId --output text` | ||||
|     if [[ -z "${AWS_IMAGE-}" ]]; then | ||||
|       echo "Please specify AWS_IMAGE directly (image ${AWS_IMAGE_NAME} not found in region ${AWS_REGION})" | ||||
|       exit 1 | ||||
|     fi | ||||
|   fi | ||||
| } | ||||
| @@ -1,157 +0,0 @@ | ||||
| # AWS specific configuration options | ||||
|  | ||||
| These options can be set as environment variables to customize how your cluster is created.  Only options | ||||
| specific to AWS are documented here, for cross-provider options see [this document](../options.md). | ||||
|  | ||||
| This is a work-in-progress; not all options are documented yet! | ||||
|  | ||||
| **KUBE_AWS_ZONE** | ||||
|  | ||||
| The AWS availability zone to deploy to.  Defaults to us-west-2a. | ||||
|  | ||||
| **AWS_IMAGE** | ||||
|  | ||||
| The AMI to use.  If not specified, the image will be selected based on the AWS region. | ||||
|  | ||||
| **AWS_S3_BUCKET**, **AWS_S3_REGION** | ||||
|  | ||||
| The bucket name to use, and the region where the bucket should be created, or where the bucket is located if it exists already. | ||||
|  | ||||
| If not specified, defaults to AWS_S3_REGION us-east-1, because buckets are globally named and you probably | ||||
| want to share a bucket across all regions; us-east-1 is a sensible (relatively arbitrary) default. | ||||
|  | ||||
| AWS_S3_BUCKET will default to a uniquely generated name, so you won't collide with other kubernetes users. | ||||
| (Currently this uses the hash of your AWS Access key to produce a per-user unique value). | ||||
|  | ||||
| It is not a bad idea to set AWS_S3_BUCKET to something more human friendly. | ||||
|  | ||||
| AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example. | ||||
|  | ||||
| **MASTER_SIZE**, **NODE_SIZE** | ||||
|  | ||||
| The instance type to use for creating the master/minion.  Defaults to auto-sizing based on the number of nodes (see below). | ||||
|  | ||||
| For production usage, we recommend bigger instances, for example: | ||||
|  | ||||
| ``` | ||||
| export MASTER_SIZE=c4.large | ||||
| export NODE_SIZE=r3.large | ||||
| ``` | ||||
|  | ||||
| If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker | ||||
| nodes based on `${NUM_NODES}`. See [Getting started on AWS EC2](../../docs/getting-started-guides/aws.md) for details. | ||||
|  | ||||
| Please note: `kube-up` utilizes ephemeral storage available on instances for docker storage. EBS-only instance types do not | ||||
| support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB. | ||||
| EBS-only instance types include `t2`, `c4`, and `m4`. | ||||
|  | ||||
| **KUBE_ENABLE_NODE_PUBLIC_IP** | ||||
|  | ||||
| Should a public IP automatically assigned to the minions? "true" or "false"   | ||||
| Defaults to: "true" | ||||
|  | ||||
| Please note: Do not set this to "false" unless you... | ||||
|  | ||||
| - ... already configured a NAT instance in the kubernetes VPC that will enable internet access for the new minions | ||||
| - ... already configured a route for "0.0.0.0/0" to this NAT instance | ||||
| - ... already configured a route for "YOUR_IP/32" to an AWS internet gateway (for the master instance to reach your | ||||
|   client directly during setup) | ||||
|  | ||||
| **DOCKER_STORAGE** | ||||
|  | ||||
| Choose the docker storage driver to use.  This is an advanced option; most people should leave it as the default aufs | ||||
| for parity with GCE. | ||||
|  | ||||
| Supported values: btrfs, aufs, devicemapper, aufs-nolvm | ||||
|  | ||||
| This will also configure your ephemeral storage in a compatible way, and your Docker containers | ||||
| will run on this storage if available, as typically the root disk is comparatively small. | ||||
|  | ||||
| * `btrfs` will combine your ephemeral disks into a btrfs volume.  This is a good option if you have a recent kernel | ||||
|   with a reliable btrfs. | ||||
| * `aufs` uses the aufs driver, but also installs LVM to combine your disks. `aufs-nolvm` will not use LVM, | ||||
|  meaning that only your first ephemeral disk will be used. | ||||
| * `devicemapper` sets up LVM across all your ephemeral disks and sets Docker to drive it directly.  This is a | ||||
|   similar option to btrfs, but without relying on the btrfs filesystem.  Sadly, it does not work with most | ||||
|   configurations - see [this docker bug](https://github.com/docker/docker/issues/4036) | ||||
|  | ||||
| If your machines don't have any ephemeral disks, this will default to the aufs driver on your root disk (with no LVM). | ||||
|  | ||||
| **KUBE_OS_DISTRIBUTION** | ||||
|  | ||||
| The distribution to use.  Defaults to `jessie` | ||||
|  | ||||
| Supported options: | ||||
|  | ||||
| * `jessie`: Debian Jessie, running a custom kubernetes-optimized image.  Should | ||||
|   be supported until 2018 by the debian-security team, and until 2020 by the | ||||
|   debian-LTS team. | ||||
| * `wily`: Ubuntu Wily.  Wily is not an LTS release, and OS support is due to | ||||
|   end in July 2016. | ||||
|  | ||||
| No longer supported as of 1.3: | ||||
|  | ||||
| * `vivid`: Ubuntu Vivid.  Vivid OS support ended in early February 2016. | ||||
|   Docker no longer provides packages for vivid. | ||||
|  | ||||
| Given the support situation, we recommend using Debian Jessie.  In Kubernetes | ||||
| 1.3 Ubuntu should have their next LTS release out, so we should be able to | ||||
| recommend Ubuntu again at that time. | ||||
|  | ||||
| Using kube-up with other operating systems is neither supported nor | ||||
| recommended.  But we would welcome increased OS support for kube-up, so please | ||||
| contribute! | ||||
|  | ||||
| **NON_MASQUERADE_CIDR** | ||||
|  | ||||
| The 'internal' IP range which Kubernetes will use, which will therefore not | ||||
| use IP masquerade.  By default kubernetes runs an internal network for traffic | ||||
| between pods (and between pods and services), and by default this uses the | ||||
| `10.0.0.0/8` range.  However, this sometimes overlaps with a range that you may | ||||
| want to use; in particular the range cannot be used with EC2 ClassicLink.  You | ||||
| may also want to run kubernetes in an existing VPC where you have chosen a CIDR | ||||
| in the `10.0.0.0/8` range. | ||||
|  | ||||
| Setting this flag allows you to change this internal network CIDR.  Note that | ||||
| you must set other values consistently within the CIDR that you choose. | ||||
|  | ||||
| For example, you might choose `172.16.0.0/14`; and you could then choose to | ||||
| configure like this: | ||||
|  | ||||
| ``` | ||||
| export NON_MASQUERADE_CIDR="172.16.0.0/14" | ||||
| export SERVICE_CLUSTER_IP_RANGE="172.16.0.0/16" | ||||
| export DNS_SERVER_IP="172.16.0.10" | ||||
| export MASTER_IP_RANGE="172.17.0.0/24" | ||||
| export CLUSTER_IP_RANGE="172.18.0.0/16" | ||||
| ``` | ||||
|  | ||||
| When choosing a CIDR in the 172.20/12 reserved range you should be careful not | ||||
| to choose a CIDR that overlaps your VPC CIDR (the kube-up script sets the VPC | ||||
| CIDR to 172.20.0.0/16 by default, so you should not overlap that).  If you want | ||||
| to allow inter-VPC traffic you should be careful to avoid your other VPCs as | ||||
| well. | ||||
|  | ||||
| There is also a 100.64/10 address block which is reserved for "Carrier Grade | ||||
| NAT", and which some users have reported success using.  While we haven't seen | ||||
| any problems, or conflicts with any AWS networks, we can't guarantee it.  If you | ||||
| decide you are comfortable using 100.64, you might use: | ||||
|  | ||||
| ``` | ||||
| export NON_MASQUERADE_CIDR="100.64.0.0/10" | ||||
| export SERVICE_CLUSTER_IP_RANGE="100.64.0.0/16" | ||||
| export DNS_SERVER_IP="100.64.0.10" | ||||
| export MASTER_IP_RANGE="100.65.0.0/24" | ||||
| export CLUSTER_IP_RANGE="100.66.0.0/16" | ||||
| ``` | ||||
|  | ||||
| **KUBE_VPC_CIDR_BASE** | ||||
|  | ||||
| By default `kube-up.sh` will create a VPC with CIDR 172.20.0.0/16. `KUBE_VPC_CIDR_BASE` allows to configure | ||||
| this CIDR. For example you may choose to use `172.21.0.0/16`: | ||||
|  | ||||
| ``` | ||||
| export KUBE_VPC_CIDR_BASE=172.21 | ||||
| ``` | ||||
|  | ||||
| []() | ||||
| @@ -1,132 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2015 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| # Note: these functions override functions in the GCE configure-vm script | ||||
| # We include the GCE script first, and this one second. | ||||
|  | ||||
| ensure-basic-networking() { | ||||
|   : | ||||
| } | ||||
|  | ||||
| ensure-packages() { | ||||
|   apt-get-install curl | ||||
|   # For reading kube_env.yaml | ||||
|   apt-get-install python-yaml | ||||
|  | ||||
|   # TODO: Where to get safe_format_and_mount? | ||||
|   mkdir -p /usr/share/google | ||||
|   cd /usr/share/google | ||||
|   download-or-bust "dc96f40fdc9a0815f099a51738587ef5a976f1da" https://raw.githubusercontent.com/GoogleCloudPlatform/compute-image-packages/82b75f314528b90485d5239ab5d5495cc22d775f/google-startup-scripts/usr/share/google/safe_format_and_mount | ||||
|   chmod +x safe_format_and_mount | ||||
| } | ||||
|  | ||||
| set-kube-env() { | ||||
|   local kube_env_yaml="/etc/kubernetes/kube_env.yaml" | ||||
|  | ||||
|   # kube-env has all the environment variables we care about, in a flat yaml format | ||||
|   eval "$(python -c ' | ||||
| import pipes,sys,yaml | ||||
|  | ||||
| for k,v in yaml.load(sys.stdin).iteritems(): | ||||
|   print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v)))) | ||||
|   print("""export {var}""".format(var = k)) | ||||
|   ' < """${kube_env_yaml}""")" | ||||
| } | ||||
|  | ||||
| remove-docker-artifacts() { | ||||
|   : | ||||
| } | ||||
|  | ||||
| # Finds the master PD device | ||||
| find-master-pd() { | ||||
|   if ( grep "/mnt/master-pd" /proc/mounts ); then | ||||
|     echo "Master PD already mounted; won't remount" | ||||
|     MASTER_PD_DEVICE="" | ||||
|     return | ||||
|   fi | ||||
|   echo "Waiting for master pd to be attached" | ||||
|   attempt=0 | ||||
|   while true; do | ||||
|     echo Attempt "$(($attempt+1))" to check for /dev/xvdb | ||||
|     if [[ -e /dev/xvdb ]]; then | ||||
|       echo "Found /dev/xvdb" | ||||
|       MASTER_PD_DEVICE="/dev/xvdb" | ||||
|       break | ||||
|     fi | ||||
|     attempt=$(($attempt+1)) | ||||
|     sleep 1 | ||||
|   done | ||||
|  | ||||
|   # Mount the master PD as early as possible | ||||
|   echo "/dev/xvdb /mnt/master-pd ext4 noatime 0 0" >> /etc/fstab | ||||
| } | ||||
|  | ||||
| fix-apt-sources() { | ||||
|   : | ||||
| } | ||||
|  | ||||
| salt-master-role() { | ||||
|   cat <<EOF >/etc/salt/minion.d/grains.conf | ||||
| grains: | ||||
|   roles: | ||||
|     - kubernetes-master | ||||
|   cloud: aws | ||||
| EOF | ||||
|  | ||||
|   # If the kubelet on the master is enabled, give it the same CIDR range | ||||
|   # as a generic node. | ||||
|   if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then | ||||
|     cat <<EOF >>/etc/salt/minion.d/grains.conf | ||||
|   kubelet_api_servers: '${KUBELET_APISERVER}' | ||||
| EOF | ||||
|   else | ||||
|     # If the kubelet is running disconnected from a master, give it a fixed | ||||
|     # CIDR range. | ||||
|     cat <<EOF >>/etc/salt/minion.d/grains.conf | ||||
|   cbr-cidr: ${MASTER_IP_RANGE} | ||||
| EOF | ||||
|   fi | ||||
|  | ||||
|   env-to-grains "runtime_config" | ||||
|   env-to-grains "kube_user" | ||||
| } | ||||
|  | ||||
| salt-node-role() { | ||||
|   cat <<EOF >/etc/salt/minion.d/grains.conf | ||||
| grains: | ||||
|   roles: | ||||
|     - kubernetes-pool | ||||
|   cloud: aws | ||||
|   api_servers: '${API_SERVERS}' | ||||
| EOF | ||||
|  | ||||
|   # We set the hostname_override to the full EC2 private dns name | ||||
|   # we'd like to use EC2 instance-id, but currently the kubelet health-check assumes the name | ||||
|   # is resolvable, although that check should be going away entirely (#7092) | ||||
|   if [[ -z "${HOSTNAME_OVERRIDE:-}" ]]; then | ||||
|     HOSTNAME_OVERRIDE=`curl --silent curl http://169.254.169.254/2007-01-19/meta-data/local-hostname` | ||||
|   fi | ||||
|  | ||||
|   env-to-grains "hostname_override" | ||||
| } | ||||
|  | ||||
| function run-user-script() { | ||||
|   # TODO(justinsb): Support user scripts on AWS | ||||
|   # AWS doesn't have as rich a metadata service as GCE does | ||||
|   # Maybe specify an env var that is the path to a script? | ||||
|   : | ||||
| } | ||||
|  | ||||
| @@ -1,226 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2015 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| # Discover all the ephemeral disks | ||||
|  | ||||
| function ensure-local-disks() { | ||||
|  | ||||
| # Skip if already mounted (a reboot) | ||||
| if ( grep "/mnt/ephemeral" /proc/mounts ); then | ||||
|   echo "Found /mnt/ephemeral in /proc/mounts; skipping local disk initialization" | ||||
|   return | ||||
| fi | ||||
|  | ||||
| block_devices=() | ||||
|  | ||||
| ephemeral_devices=$( (curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/ | grep ephemeral) || true ) | ||||
| for ephemeral_device in $ephemeral_devices; do | ||||
|   echo "Checking ephemeral device: ${ephemeral_device}" | ||||
|   aws_device=$(curl --silent http://169.254.169.254/2014-11-05/meta-data/block-device-mapping/${ephemeral_device}) | ||||
|  | ||||
|   device_path="" | ||||
|   if [ -b /dev/$aws_device ]; then | ||||
|     device_path="/dev/$aws_device" | ||||
|   else | ||||
|     # Check for the xvd-style name | ||||
|     xvd_style=$(echo $aws_device | sed "s/sd/xvd/") | ||||
|     if [ -b /dev/$xvd_style ]; then | ||||
|       device_path="/dev/$xvd_style" | ||||
|     fi | ||||
|   fi | ||||
|  | ||||
|   if [[ -z ${device_path} ]]; then | ||||
|     echo "  Could not find disk: ${ephemeral_device}@${aws_device}" | ||||
|   else | ||||
|     echo "  Detected ephemeral disk: ${ephemeral_device}@${device_path}" | ||||
|     block_devices+=(${device_path}) | ||||
|   fi | ||||
| done | ||||
|  | ||||
| # These are set if we should move where docker/kubelet store data | ||||
| # Note this gets set to the parent directory | ||||
| move_docker="" | ||||
| move_kubelet="" | ||||
|  | ||||
| docker_storage=${DOCKER_STORAGE:-aufs} | ||||
|  | ||||
| # Format the ephemeral disks | ||||
| if [[ ${#block_devices[@]} == 0 ]]; then | ||||
|   echo "No ephemeral block devices found; will use aufs on root" | ||||
|   docker_storage="aufs" | ||||
| else | ||||
|   echo "Block devices: ${block_devices[@]}" | ||||
|  | ||||
|   # Remove any existing mounts | ||||
|   for block_device in ${block_devices}; do | ||||
|     echo "Unmounting ${block_device}" | ||||
|     /bin/umount ${block_device} || echo "Ignoring failure umounting ${block_device}" | ||||
|     sed -i -e "\|^${block_device}|d" /etc/fstab | ||||
|   done | ||||
|  | ||||
|   # Remove any existing /mnt/ephemeral entry in /etc/fstab | ||||
|   sed -i -e "\|/mnt/ephemeral|d" /etc/fstab | ||||
|  | ||||
|   # Mount the storage | ||||
|   if [[ ${docker_storage} == "btrfs" ]]; then | ||||
|     apt-get-install btrfs-tools | ||||
|  | ||||
|     if [[ ${#block_devices[@]} == 1 ]]; then | ||||
|       echo "One ephemeral block device found; formatting with btrfs" | ||||
|       mkfs.btrfs -f ${block_devices[0]} | ||||
|     else | ||||
|       echo "Found multiple ephemeral block devices, formatting with btrfs as RAID-0" | ||||
|       mkfs.btrfs -f --data raid0 ${block_devices[@]} | ||||
|     fi | ||||
|     echo "${block_devices[0]}  /mnt/ephemeral  btrfs  noatime,nofail  0 0" >> /etc/fstab | ||||
|     mkdir -p /mnt/ephemeral | ||||
|     mount /mnt/ephemeral | ||||
|  | ||||
|     mkdir -p /mnt/ephemeral/kubernetes | ||||
|  | ||||
|     move_docker="/mnt/ephemeral" | ||||
|     move_kubelet="/mnt/ephemeral/kubernetes" | ||||
|   elif [[ ${docker_storage} == "aufs-nolvm" ]]; then | ||||
|     if [[ ${#block_devices[@]} != 1 ]]; then | ||||
|       echo "aufs-nolvm selected, but multiple ephemeral devices were found; only the first will be available" | ||||
|     fi | ||||
|  | ||||
|     mkfs -t ext4 ${block_devices[0]} | ||||
|     echo "${block_devices[0]}  /mnt/ephemeral  ext4     noatime,nofail  0 0" >> /etc/fstab | ||||
|     mkdir -p /mnt/ephemeral | ||||
|     mount /mnt/ephemeral | ||||
|  | ||||
|     mkdir -p /mnt/ephemeral/kubernetes | ||||
|  | ||||
|     move_docker="/mnt/ephemeral" | ||||
|     move_kubelet="/mnt/ephemeral/kubernetes" | ||||
|   elif [[ ${docker_storage} == "devicemapper" || ${docker_storage} == "aufs" ]]; then | ||||
|     # We always use LVM, even with one device | ||||
|     # In devicemapper mode, Docker can use LVM directly | ||||
|     # Also, fewer code paths are good | ||||
|     echo "Using LVM2 and ext4" | ||||
|     apt-get-install lvm2 | ||||
|  | ||||
|     # Don't output spurious "File descriptor X leaked on vgcreate invocation." | ||||
|     # Known bug: e.g. Ubuntu #591823 | ||||
|     export LVM_SUPPRESS_FD_WARNINGS=1 | ||||
|  | ||||
|     for block_device in ${block_devices}; do | ||||
|       pvcreate ${block_device} | ||||
|     done | ||||
|     vgcreate vg-ephemeral ${block_devices[@]} | ||||
|  | ||||
|     if [[ ${docker_storage} == "devicemapper" ]]; then | ||||
|       # devicemapper thin provisioning, managed by docker | ||||
|       # This is the best option, but it is sadly broken on most distros | ||||
|       # Bug: https://github.com/docker/docker/issues/4036 | ||||
|  | ||||
|       # 80% goes to the docker thin-pool; we want to leave some space for host-volumes | ||||
|       lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral | ||||
|  | ||||
|       DOCKER_OPTS="${DOCKER_OPTS:-} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool" | ||||
|       # Note that we don't move docker; docker goes direct to the thinpool | ||||
|  | ||||
|       # Remaining space (20%) is for kubernetes data | ||||
|       # TODO: Should this be a thin pool?  e.g. would we ever want to snapshot this data? | ||||
|       lvcreate -l 100%FREE -n kubernetes vg-ephemeral | ||||
|       mkfs -t ext4 /dev/vg-ephemeral/kubernetes | ||||
|       mkdir -p /mnt/ephemeral/kubernetes | ||||
|       echo "/dev/vg-ephemeral/kubernetes  /mnt/ephemeral/kubernetes  ext4  noatime,nofail  0 0" >> /etc/fstab | ||||
|       mount /mnt/ephemeral/kubernetes | ||||
|  | ||||
|       move_kubelet="/mnt/ephemeral/kubernetes" | ||||
|      else | ||||
|       # aufs | ||||
|       # We used to split docker & kubernetes, but we no longer do that, because | ||||
|       # host volumes go into the kubernetes area, and it is otherwise very easy | ||||
|       # to fill up small volumes. | ||||
|       # | ||||
|       # No need for thin pool since we are not over-provisioning or doing snapshots | ||||
|       # (probably shouldn't be doing snapshots on ephemeral disk? Should be stateless-ish.) | ||||
|       # Tried to do it, but it cause problems (#16188) | ||||
|  | ||||
|       lvcreate -l 100%VG -n ephemeral vg-ephemeral | ||||
|       mkfs -t ext4 /dev/vg-ephemeral/ephemeral | ||||
|       mkdir -p /mnt/ephemeral | ||||
|       echo "/dev/vg-ephemeral/ephemeral  /mnt/ephemeral  ext4  noatime,nofail 0 0" >> /etc/fstab | ||||
|       mount /mnt/ephemeral | ||||
|  | ||||
|       mkdir -p /mnt/ephemeral/kubernetes | ||||
|  | ||||
|       move_docker="/mnt/ephemeral" | ||||
|       move_kubelet="/mnt/ephemeral/kubernetes" | ||||
|      fi | ||||
|  else | ||||
|     echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
|  | ||||
| if [[ ${docker_storage} == "btrfs" ]]; then | ||||
|   DOCKER_OPTS="${DOCKER_OPTS:-} -s btrfs" | ||||
| elif [[ ${docker_storage} == "aufs-nolvm" || ${docker_storage} == "aufs" ]]; then | ||||
|   # Install aufs kernel module | ||||
|   # Fix issue #14162 with extra-virtual | ||||
|   if [[ `lsb_release -i -s` == 'Ubuntu' ]]; then | ||||
|     apt-get-install linux-image-extra-$(uname -r) linux-image-extra-virtual | ||||
|   fi | ||||
|  | ||||
|   # Install aufs tools | ||||
|   apt-get-install aufs-tools | ||||
|  | ||||
|   DOCKER_OPTS="${DOCKER_OPTS:-} -s aufs" | ||||
| elif [[ ${docker_storage} == "devicemapper" ]]; then | ||||
|   DOCKER_OPTS="${DOCKER_OPTS:-} -s devicemapper" | ||||
| else | ||||
|   echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}" | ||||
| fi | ||||
|  | ||||
| if [[ -n "${move_docker}" ]]; then | ||||
|   # Stop docker if it is running, so we can move its files | ||||
|   systemctl stop docker || true | ||||
|  | ||||
|   # Move docker to e.g. /mnt | ||||
|   # but only if it is a directory, not a symlink left over from a previous run | ||||
|   if [[ -d /var/lib/docker ]]; then | ||||
|     mv /var/lib/docker ${move_docker}/ | ||||
|   fi | ||||
|   mkdir -p ${move_docker}/docker | ||||
|   # If /var/lib/docker doesn't exist (it will exist if it is already a symlink), | ||||
|   # then symlink it to the ephemeral docker area | ||||
|   if [[ ! -e /var/lib/docker ]]; then | ||||
|     ln -s ${move_docker}/docker /var/lib/docker | ||||
|   fi | ||||
|   DOCKER_ROOT="${move_docker}/docker" | ||||
|   DOCKER_OPTS="${DOCKER_OPTS:-} -g ${DOCKER_ROOT}" | ||||
| fi | ||||
|  | ||||
| if [[ -n "${move_kubelet}" ]]; then | ||||
|   # Move /var/lib/kubelet to e.g. /mnt | ||||
|   # (the backing for empty-dir volumes can use a lot of space!) | ||||
|   # (As with /var/lib/docker, only if it is a directory; skip if symlink) | ||||
|   if [[ -d /var/lib/kubelet ]]; then | ||||
|     mv /var/lib/kubelet ${move_kubelet}/ | ||||
|   fi | ||||
|   mkdir -p ${move_kubelet}/kubelet | ||||
|   # Create symlink for /var/lib/kubelet, unless it is already a symlink | ||||
|   if [[ ! -e /var/lib/kubelet ]]; then | ||||
|     ln -s ${move_kubelet}/kubelet /var/lib/kubelet | ||||
|   fi | ||||
|   KUBELET_ROOT="${move_kubelet}/kubelet" | ||||
| fi | ||||
|  | ||||
| } | ||||
| @@ -1,27 +0,0 @@ | ||||
| { | ||||
|   "Version": "2012-10-17", | ||||
|   "Statement": [ | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": ["ec2:*"], | ||||
|       "Resource": ["*"] | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": ["elasticloadbalancing:*"], | ||||
|       "Resource": ["*"] | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": ["route53:*"], | ||||
|       "Resource": ["*"] | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": "s3:*", | ||||
|       "Resource": [ | ||||
|         "arn:aws:s3:::kubernetes-*" | ||||
|       ] | ||||
|     } | ||||
|   ] | ||||
| } | ||||
| @@ -1,10 +0,0 @@ | ||||
| { | ||||
|   "Version": "2012-10-17", | ||||
|   "Statement": [ | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Principal": { "Service": "ec2.amazonaws.com"}, | ||||
|       "Action": "sts:AssumeRole" | ||||
|     } | ||||
|   ] | ||||
| } | ||||
| @@ -1,45 +0,0 @@ | ||||
| { | ||||
|   "Version": "2012-10-17", | ||||
|   "Statement": [ | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": "s3:*", | ||||
|       "Resource": [ | ||||
|         "arn:aws:s3:::kubernetes-*" | ||||
|       ] | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": "ec2:Describe*", | ||||
|       "Resource": "*" | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": "ec2:AttachVolume", | ||||
|       "Resource": "*" | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": "ec2:DetachVolume", | ||||
|       "Resource": "*" | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": ["route53:*"], | ||||
|       "Resource": ["*"] | ||||
|     }, | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Action": [ | ||||
|         "ecr:GetAuthorizationToken", | ||||
|         "ecr:BatchCheckLayerAvailability", | ||||
|         "ecr:GetDownloadUrlForLayer", | ||||
|         "ecr:GetRepositoryPolicy", | ||||
|         "ecr:DescribeRepositories", | ||||
|         "ecr:ListImages", | ||||
|         "ecr:BatchGetImage" | ||||
|       ], | ||||
|       "Resource": "*" | ||||
|     } | ||||
|   ] | ||||
| } | ||||
| @@ -1,10 +0,0 @@ | ||||
| { | ||||
|   "Version": "2012-10-17", | ||||
|   "Statement": [ | ||||
|     { | ||||
|       "Effect": "Allow", | ||||
|       "Principal": { "Service": "ec2.amazonaws.com"}, | ||||
|       "Action": "sts:AssumeRole" | ||||
|     } | ||||
|   ] | ||||
| } | ||||
							
								
								
									
										1609
									
								
								cluster/aws/util.sh
									
									
									
									
									
								
							
							
						
						
									
										1609
									
								
								cluster/aws/util.sh
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,86 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright 2015 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
|  | ||||
| source "${KUBE_ROOT}/cluster/aws/common/common.sh" | ||||
|  | ||||
| SSH_USER=ubuntu | ||||
|  | ||||
| # Detects the AMI to use for ubuntu (considering the region) | ||||
| # | ||||
| # Vars set: | ||||
| #   AWS_IMAGE | ||||
| function detect-wily-image () { | ||||
|   # This is the ubuntu 15.10 image for <region>, amd64, hvm:ebs-ssd | ||||
|   # See here: http://cloud-images.ubuntu.com/locator/ec2/ for other images | ||||
|   # This will need to be updated from time to time as amis are deprecated | ||||
|   if [[ -z "${AWS_IMAGE-}" ]]; then | ||||
|     case "${AWS_REGION}" in | ||||
|       ap-northeast-1) | ||||
|         AWS_IMAGE=ami-3355505d | ||||
|         ;; | ||||
|  | ||||
|       ap-northeast-2) | ||||
|         AWS_IMAGE=ami-e427e98a | ||||
|         ;; | ||||
|  | ||||
|       ap-southeast-1) | ||||
|         AWS_IMAGE=ami-60975903 | ||||
|         ;; | ||||
|  | ||||
|       eu-central-1) | ||||
|         AWS_IMAGE=ami-6da2ba01 | ||||
|         ;; | ||||
|  | ||||
|       eu-west-1) | ||||
|         AWS_IMAGE=ami-36a71645 | ||||
|         ;; | ||||
|  | ||||
|       sa-east-1) | ||||
|         AWS_IMAGE=ami-fd36b691 | ||||
|         ;; | ||||
|  | ||||
|       us-east-1) | ||||
|         AWS_IMAGE=ami-6610390c | ||||
|         ;; | ||||
|  | ||||
|       us-west-1) | ||||
|         AWS_IMAGE=ami-6e64120e | ||||
|         ;; | ||||
|  | ||||
|       cn-north-1) | ||||
|         AWS_IMAGE=ami-17a76f7a | ||||
|         ;; | ||||
|  | ||||
|       us-gov-west-1) | ||||
|         AWS_IMAGE=ami-b0bad893 | ||||
|         ;; | ||||
|  | ||||
|       ap-southeast-2) | ||||
|         AWS_IMAGE=ami-3895b15b | ||||
|         ;; | ||||
|  | ||||
|       us-west-2) | ||||
|         AWS_IMAGE=ami-d95abcb9 | ||||
|         ;; | ||||
|  | ||||
|       *) | ||||
|         echo "Please specify AWS_IMAGE directly (region ${AWS_REGION} not recognized)" | ||||
|         exit 1 | ||||
|     esac | ||||
|   fi | ||||
| } | ||||
|  | ||||
		Reference in New Issue
	
	Block a user
	 Zach Loafman
					Zach Loafman