mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #41666 from mikedanese/cvm-master
Automatic merge from submit-queue (batch tested with PRs 41306, 42187, 41666, 42275, 42266) remove support for debian masters in GCE Asked about this on the mailing list and no one objects. @zmerlynn @roberthbailey ```release-note Remove support for debian masters in GCE kube-up. ```
This commit is contained in:
		@@ -18,16 +18,9 @@ set -o errexit
 | 
			
		||||
set -o nounset
 | 
			
		||||
set -o pipefail
 | 
			
		||||
 | 
			
		||||
# Note that this script is also used by AWS; we include it and then override
 | 
			
		||||
# functions with AWS equivalents.  Note `#+AWS_OVERRIDES_HERE` below.
 | 
			
		||||
# TODO(justinsb): Refactor into common script & GCE specific script?
 | 
			
		||||
 | 
			
		||||
# If we have any arguments at all, this is a push and not just setup.
 | 
			
		||||
is_push=$@
 | 
			
		||||
 | 
			
		||||
readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
 | 
			
		||||
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
 | 
			
		||||
 | 
			
		||||
function ensure-basic-networking() {
 | 
			
		||||
  # Deal with GCE networking bring-up race. (We rely on DNS for a lot,
 | 
			
		||||
  # and it's just not worth doing a whole lot of startup work if this
 | 
			
		||||
@@ -175,23 +168,6 @@ for k,v in yaml.load(sys.stdin).iteritems():
 | 
			
		||||
  ' < """${kube_env_yaml}""")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function set-kube-master-certs() {
 | 
			
		||||
  local kube_master_certs_yaml="${INSTALL_DIR}/kube_master_certs.yaml"
 | 
			
		||||
 | 
			
		||||
  until curl-metadata kube-master-certs > "${kube_master_certs_yaml}"; do
 | 
			
		||||
    echo 'Waiting for kube-master-certs...'
 | 
			
		||||
    sleep 3
 | 
			
		||||
  done
 | 
			
		||||
 | 
			
		||||
  eval "$(python -c '
 | 
			
		||||
import pipes,sys,yaml
 | 
			
		||||
 | 
			
		||||
for k,v in yaml.load(sys.stdin).iteritems():
 | 
			
		||||
  print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
 | 
			
		||||
  print("""export {var}""".format(var = k))
 | 
			
		||||
  ' < """${kube_master_certs_yaml}""")"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function remove-docker-artifacts() {
 | 
			
		||||
  echo "== Deleting docker0 =="
 | 
			
		||||
  apt-get-install bridge-utils
 | 
			
		||||
@@ -418,51 +394,6 @@ find-master-pd() {
 | 
			
		||||
  MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Mounts a persistent disk (formatting if needed) to store the persistent data
 | 
			
		||||
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
 | 
			
		||||
#
 | 
			
		||||
# This function can be reused to mount an existing PD because all of its
 | 
			
		||||
# operations modifying the disk are idempotent -- safe_format_and_mount only
 | 
			
		||||
# formats an unformatted disk, and mkdir -p will leave a directory be if it
 | 
			
		||||
# already exists.
 | 
			
		||||
mount-master-pd() {
 | 
			
		||||
  find-master-pd
 | 
			
		||||
  if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
 | 
			
		||||
    return
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  # Format and mount the disk, create directories on it for all of the master's
 | 
			
		||||
  # persistent data, and link them to where they're used.
 | 
			
		||||
  echo "Mounting master-pd"
 | 
			
		||||
  mkdir -p /mnt/master-pd
 | 
			
		||||
  /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${MASTER_PD_DEVICE}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
 | 
			
		||||
    { echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; }
 | 
			
		||||
  # Contains all the data stored in etcd
 | 
			
		||||
  mkdir -m 700 -p /mnt/master-pd/var/etcd
 | 
			
		||||
  # Contains the dynamically generated apiserver auth certs and keys
 | 
			
		||||
  mkdir -p /mnt/master-pd/srv/kubernetes
 | 
			
		||||
  # Contains the cluster's initial config parameters and auth tokens
 | 
			
		||||
  mkdir -p /mnt/master-pd/srv/salt-overlay
 | 
			
		||||
  # Directory for kube-apiserver to store SSH key (if necessary)
 | 
			
		||||
  mkdir -p /mnt/master-pd/srv/sshproxy
 | 
			
		||||
 | 
			
		||||
  ln -s -f /mnt/master-pd/var/etcd /var/etcd
 | 
			
		||||
  ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
 | 
			
		||||
  ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy
 | 
			
		||||
  ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
 | 
			
		||||
 | 
			
		||||
  # This is a bit of a hack to get around the fact that salt has to run after the
 | 
			
		||||
  # PD and mounted directory are already set up. We can't give ownership of the
 | 
			
		||||
  # directory to etcd until the etcd user and group exist, but they don't exist
 | 
			
		||||
  # until salt runs if we don't create them here. We could alternatively make the
 | 
			
		||||
  # permissions on the directory more permissive, but this seems less bad.
 | 
			
		||||
  if ! id etcd &>/dev/null; then
 | 
			
		||||
    useradd -s /sbin/nologin -d /var/etcd etcd
 | 
			
		||||
  fi
 | 
			
		||||
  chown -R etcd /mnt/master-pd/var/etcd
 | 
			
		||||
  chgrp -R etcd /mnt/master-pd/var/etcd
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Create the overlay files for the salt tree.  We create these in a separate
 | 
			
		||||
# place so that we can blow away the rest of the salt configs on a kube-push and
 | 
			
		||||
# re-apply these.
 | 
			
		||||
@@ -687,70 +618,6 @@ function convert-bytes-gce-kube() {
 | 
			
		||||
  echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# This should only happen on cluster initialization.
 | 
			
		||||
#
 | 
			
		||||
#  - Uses KUBE_PASSWORD and KUBE_USER to generate basic_auth.csv.
 | 
			
		||||
#  - Uses KUBE_BEARER_TOKEN, KUBELET_TOKEN, and KUBE_PROXY_TOKEN to generate
 | 
			
		||||
#    known_tokens.csv (KNOWN_TOKENS_FILE).
 | 
			
		||||
#  - Uses CA_CERT, MASTER_CERT, and MASTER_KEY to populate the SSL credentials
 | 
			
		||||
#    for the apiserver.
 | 
			
		||||
#  - Optionally uses KUBECFG_CERT and KUBECFG_KEY to store a copy of the client
 | 
			
		||||
#    cert credentials.
 | 
			
		||||
#
 | 
			
		||||
# After the first boot and on upgrade, these files exist on the master-pd
 | 
			
		||||
# and should never be touched again (except perhaps an additional service
 | 
			
		||||
# account, see NB below.)
 | 
			
		||||
function create-salt-master-auth() {
 | 
			
		||||
  if [[ ! -e /srv/kubernetes/ca.crt ]]; then
 | 
			
		||||
    if  [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then
 | 
			
		||||
      mkdir -p /srv/kubernetes
 | 
			
		||||
      (umask 077;
 | 
			
		||||
        echo "${CA_CERT}" | base64 --decode > /srv/kubernetes/ca.crt;
 | 
			
		||||
        echo "${MASTER_CERT}" | base64 --decode > /srv/kubernetes/server.cert;
 | 
			
		||||
        echo "${MASTER_KEY}" | base64 --decode > /srv/kubernetes/server.key;
 | 
			
		||||
        # Kubecfg cert/key are optional and included for backwards compatibility.
 | 
			
		||||
        # TODO(roberthbailey): Remove these two lines once GKE no longer requires
 | 
			
		||||
        # fetching clients certs from the master VM.
 | 
			
		||||
        echo "${KUBECFG_CERT:-}" | base64 --decode > /srv/kubernetes/kubecfg.crt;
 | 
			
		||||
        echo "${KUBECFG_KEY:-}" | base64 --decode > /srv/kubernetes/kubecfg.key)
 | 
			
		||||
    fi
 | 
			
		||||
  fi
 | 
			
		||||
  if [ ! -e /srv/kubernetes/kubeapiserver.cert ]; then
 | 
			
		||||
    if [[ ! -z "${KUBEAPISERVER_CERT:-}" ]] && [[ ! -z "${KUBEAPISERVER_KEY:-}" ]]; then
 | 
			
		||||
      (umask 077;
 | 
			
		||||
        echo "${KUBEAPISERVER_CERT}" | base64 --decode > /srv/kubernetes/kubeapiserver.cert;
 | 
			
		||||
        echo "${KUBEAPISERVER_KEY}" | base64 --decode > /srv/kubernetes/kubeapiserver.key)
 | 
			
		||||
    fi
 | 
			
		||||
  fi
 | 
			
		||||
  if [ ! -e "${BASIC_AUTH_FILE}" ]; then
 | 
			
		||||
    mkdir -p /srv/salt-overlay/salt/kube-apiserver
 | 
			
		||||
    (umask 077;
 | 
			
		||||
      echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
 | 
			
		||||
  fi
 | 
			
		||||
  if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
 | 
			
		||||
    mkdir -p /srv/salt-overlay/salt/kube-apiserver
 | 
			
		||||
    (umask 077;
 | 
			
		||||
      echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
 | 
			
		||||
      echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}";
 | 
			
		||||
      echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}")
 | 
			
		||||
  fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# This should happen only on cluster initialization. After the first boot
 | 
			
		||||
# and on upgrade, the kubeconfig file exists on the master-pd and should
 | 
			
		||||
# never be touched again.
 | 
			
		||||
#
 | 
			
		||||
#  - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
 | 
			
		||||
#    KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
 | 
			
		||||
#    connect to the apiserver.
 | 
			
		||||
function create-salt-master-kubelet-auth() {
 | 
			
		||||
  # Only configure the kubelet on the master if the required variables are
 | 
			
		||||
  # set in the environment.
 | 
			
		||||
  if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
 | 
			
		||||
    create-salt-kubelet-auth
 | 
			
		||||
  fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# This should happen both on cluster initialization and node upgrades.
 | 
			
		||||
#
 | 
			
		||||
#  - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
 | 
			
		||||
@@ -888,164 +755,6 @@ log_level_logfile: debug
 | 
			
		||||
EOF
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function salt-master-role() {
 | 
			
		||||
  cat <<EOF >/etc/salt/minion.d/grains.conf
 | 
			
		||||
grains:
 | 
			
		||||
  roles:
 | 
			
		||||
    - kubernetes-master
 | 
			
		||||
  cloud: gce
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
  cat <<EOF >/etc/gce.conf
 | 
			
		||||
[global]
 | 
			
		||||
EOF
 | 
			
		||||
  CLOUD_CONFIG='' # Set to non-empty path if we are using the gce.conf file
 | 
			
		||||
 | 
			
		||||
  if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then
 | 
			
		||||
    cat <<EOF >>/etc/gce.conf
 | 
			
		||||
token-url = ${TOKEN_URL}
 | 
			
		||||
token-body = ${TOKEN_BODY}
 | 
			
		||||
project-id = ${PROJECT_ID}
 | 
			
		||||
network-name = ${NODE_NETWORK}
 | 
			
		||||
EOF
 | 
			
		||||
    CLOUD_CONFIG=/etc/gce.conf
 | 
			
		||||
    EXTERNAL_IP=$(curl --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  advertise_address: '${EXTERNAL_IP}'
 | 
			
		||||
  proxy_ssh_user: '${PROXY_SSH_USER}'
 | 
			
		||||
EOF
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
 | 
			
		||||
    if [[ -n "${NODE_TAGS:-}" ]]; then
 | 
			
		||||
      local -r node_tags="${NODE_TAGS}"
 | 
			
		||||
    else
 | 
			
		||||
      local -r node_tags="${NODE_INSTANCE_PREFIX}"
 | 
			
		||||
    fi
 | 
			
		||||
    cat <<EOF >>/etc/gce.conf
 | 
			
		||||
node-tags = ${NODE_TAGS}
 | 
			
		||||
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
 | 
			
		||||
EOF
 | 
			
		||||
    CLOUD_CONFIG=/etc/gce.conf
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  if [[ -n "${MULTIZONE:-}" ]]; then
 | 
			
		||||
    cat <<EOF >>/etc/gce.conf
 | 
			
		||||
multizone = ${MULTIZONE}
 | 
			
		||||
EOF
 | 
			
		||||
    CLOUD_CONFIG=/etc/gce.conf
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  if [[ -n "${CLOUD_CONFIG:-}" ]]; then
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  cloud_config: ${CLOUD_CONFIG}
 | 
			
		||||
EOF
 | 
			
		||||
  else
 | 
			
		||||
    rm -f /etc/gce.conf
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  webhook_authentication_config: /etc/gcp_authn.config
 | 
			
		||||
EOF
 | 
			
		||||
    cat <<EOF >/etc/gcp_authn.config
 | 
			
		||||
clusters:
 | 
			
		||||
  - name: gcp-authentication-server
 | 
			
		||||
    cluster:
 | 
			
		||||
      server: ${GCP_AUTHN_URL}
 | 
			
		||||
users:
 | 
			
		||||
  - name: kube-apiserver
 | 
			
		||||
    user:
 | 
			
		||||
      auth-provider:
 | 
			
		||||
        name: gcp
 | 
			
		||||
current-context: webhook
 | 
			
		||||
contexts:
 | 
			
		||||
- context:
 | 
			
		||||
    cluster: gcp-authentication-server
 | 
			
		||||
    user: kube-apiserver
 | 
			
		||||
  name: webhook
 | 
			
		||||
EOF
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  webhook_authorization_config: /etc/gcp_authz.config
 | 
			
		||||
EOF
 | 
			
		||||
    cat <<EOF >/etc/gcp_authz.config
 | 
			
		||||
clusters:
 | 
			
		||||
  - name: gcp-authorization-server
 | 
			
		||||
    cluster:
 | 
			
		||||
      server: ${GCP_AUTHZ_URL}
 | 
			
		||||
users:
 | 
			
		||||
  - name: kube-apiserver
 | 
			
		||||
    user:
 | 
			
		||||
      auth-provider:
 | 
			
		||||
        name: gcp
 | 
			
		||||
current-context: webhook
 | 
			
		||||
contexts:
 | 
			
		||||
- context:
 | 
			
		||||
    cluster: gcp-authorization-server
 | 
			
		||||
    user: kube-apiserver
 | 
			
		||||
  name: webhook
 | 
			
		||||
EOF
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
 | 
			
		||||
    # This is the config file for the image review webhook.
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  image_review_config: /etc/gcp_image_review.config
 | 
			
		||||
EOF
 | 
			
		||||
    cat <<EOF >/etc/gcp_image_review.config
 | 
			
		||||
clusters:
 | 
			
		||||
  - name: gcp-image-review-server
 | 
			
		||||
    cluster:
 | 
			
		||||
      server: ${GCP_IMAGE_VERIFICATION_URL}
 | 
			
		||||
users:
 | 
			
		||||
  - name: kube-apiserver
 | 
			
		||||
    user:
 | 
			
		||||
      auth-provider:
 | 
			
		||||
        name: gcp
 | 
			
		||||
current-context: webhook
 | 
			
		||||
contexts:
 | 
			
		||||
- context:
 | 
			
		||||
    cluster: gcp-image-review-server
 | 
			
		||||
    user: kube-apiserver
 | 
			
		||||
  name: webhook
 | 
			
		||||
EOF
 | 
			
		||||
    # This is the config for the image review admission controller.
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  image_review_webhook_config: /etc/admission_controller.config
 | 
			
		||||
EOF
 | 
			
		||||
    cat <<EOF >/etc/admission_controller.config
 | 
			
		||||
imagePolicy:
 | 
			
		||||
  kubeConfigFile: /etc/gcp_image_review.config
 | 
			
		||||
  allowTTL: 30
 | 
			
		||||
  denyTTL: 30
 | 
			
		||||
  retryBackoff: 500
 | 
			
		||||
  defaultAllow: true
 | 
			
		||||
EOF
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  # If the kubelet on the master is enabled, give it the same CIDR range
 | 
			
		||||
  # as a generic node.
 | 
			
		||||
  if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  kubelet_api_servers: '${KUBELET_APISERVER}'
 | 
			
		||||
EOF
 | 
			
		||||
  else
 | 
			
		||||
    # If the kubelet is running disconnected from a master, give it a fixed
 | 
			
		||||
    # CIDR range.
 | 
			
		||||
    cat <<EOF >>/etc/salt/minion.d/grains.conf
 | 
			
		||||
  cbr-cidr: ${MASTER_IP_RANGE}
 | 
			
		||||
EOF
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  env-to-grains "runtime_config"
 | 
			
		||||
  env-to-grains "kube_user"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function salt-node-role() {
 | 
			
		||||
  cat <<EOF >/etc/salt/minion.d/grains.conf
 | 
			
		||||
grains:
 | 
			
		||||
@@ -1091,15 +800,8 @@ function salt-grains() {
 | 
			
		||||
function configure-salt() {
 | 
			
		||||
  mkdir -p /etc/salt/minion.d
 | 
			
		||||
  salt-run-local
 | 
			
		||||
  if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
 | 
			
		||||
    salt-master-role
 | 
			
		||||
    if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}"  ]; then
 | 
			
		||||
        salt-apiserver-timeout-grain $KUBE_APISERVER_REQUEST_TIMEOUT
 | 
			
		||||
    fi
 | 
			
		||||
  else
 | 
			
		||||
    salt-node-role
 | 
			
		||||
    node-docker-opts
 | 
			
		||||
  fi
 | 
			
		||||
  salt-node-role
 | 
			
		||||
  node-docker-opts
 | 
			
		||||
  salt-grains
 | 
			
		||||
  install-salt
 | 
			
		||||
  stop-salt-minion
 | 
			
		||||
@@ -1129,20 +831,10 @@ function run-user-script() {
 | 
			
		||||
  fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function create-salt-master-etcd-auth {
 | 
			
		||||
  if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
 | 
			
		||||
    local -r auth_dir="/srv/kubernetes"
 | 
			
		||||
    echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
 | 
			
		||||
    echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
 | 
			
		||||
    echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
 | 
			
		||||
  fi
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# This script is re-used on AWS.  Some of the above functions will be replaced.
 | 
			
		||||
# The AWS kube-up script looks for this marker:
 | 
			
		||||
#+AWS_OVERRIDES_HERE
 | 
			
		||||
 | 
			
		||||
####################################################################################
 | 
			
		||||
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
 | 
			
		||||
  echo "Support for debian master has been removed"
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ -z "${is_push}" ]]; then
 | 
			
		||||
  echo "== kube-up node config starting =="
 | 
			
		||||
@@ -1156,17 +848,9 @@ if [[ -z "${is_push}" ]]; then
 | 
			
		||||
  auto-upgrade
 | 
			
		||||
  ensure-local-disks
 | 
			
		||||
  create-node-pki
 | 
			
		||||
  [[ "${KUBERNETES_MASTER}" == "true" ]] && mount-master-pd
 | 
			
		||||
  create-salt-pillar
 | 
			
		||||
  if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
 | 
			
		||||
    set-kube-master-certs
 | 
			
		||||
    create-salt-master-auth
 | 
			
		||||
    create-salt-master-etcd-auth
 | 
			
		||||
    create-salt-master-kubelet-auth
 | 
			
		||||
  else
 | 
			
		||||
    create-salt-kubelet-auth
 | 
			
		||||
    create-salt-kubeproxy-auth
 | 
			
		||||
  fi
 | 
			
		||||
  create-salt-kubelet-auth
 | 
			
		||||
  create-salt-kubeproxy-auth
 | 
			
		||||
  download-release
 | 
			
		||||
  configure-salt
 | 
			
		||||
  remove-docker-artifacts
 | 
			
		||||
 
 | 
			
		||||
@@ -1,110 +0,0 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Copyright 2015 The Kubernetes Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
 | 
			
		||||
# A library of helper functions and constant for debian os distro
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# create-master-instance creates the master instance. If called with
 | 
			
		||||
# an argument, the argument is used as the name to a reserved IP
 | 
			
		||||
# address for the master. (In the case of upgrade/repair, we re-use
 | 
			
		||||
# the same IP.)
 | 
			
		||||
#
 | 
			
		||||
# It requires a whole slew of assumed variables, partially due to to
 | 
			
		||||
# the call to write-master-env. Listing them would be rather
 | 
			
		||||
# futile. Instead, we list the required calls to ensure any additional
 | 
			
		||||
# variables are set:
 | 
			
		||||
#   ensure-temp-dir
 | 
			
		||||
#   detect-project
 | 
			
		||||
#   get-bearer-token
 | 
			
		||||
#
 | 
			
		||||
function create-master-instance {
 | 
			
		||||
  local address_opt=""
 | 
			
		||||
  [[ -n ${1:-} ]] && address_opt="--address ${1}"
 | 
			
		||||
  local preemptible_master=""
 | 
			
		||||
  if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
 | 
			
		||||
    preemptible_master="--preemptible --maintenance-policy TERMINATE"
 | 
			
		||||
  fi
 | 
			
		||||
 | 
			
		||||
  write-master-env
 | 
			
		||||
  prepare-startup-script
 | 
			
		||||
  create-master-instance-internal "${MASTER_NAME}" "${address_opt}" "${preemptible_master}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function replicate-master-instance() {
 | 
			
		||||
  local existing_master_zone="${1}"
 | 
			
		||||
  local existing_master_name="${2}"
 | 
			
		||||
  local existing_master_replicas="${3}"
 | 
			
		||||
 | 
			
		||||
  local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
 | 
			
		||||
  # Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
 | 
			
		||||
  kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
 | 
			
		||||
  kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
 | 
			
		||||
 | 
			
		||||
  # Substitute INITIAL_ETCD_CLUSTER_STATE
 | 
			
		||||
  kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER_STATE")"
 | 
			
		||||
  kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER_STATE: 'existing'")"
 | 
			
		||||
 | 
			
		||||
  ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" |  sed "s/^.*: '//" | sed "s/'$//")"
 | 
			
		||||
  ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" |  sed "s/^.*: '//" | sed "s/'$//")"
 | 
			
		||||
 | 
			
		||||
  create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
 | 
			
		||||
 | 
			
		||||
  kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
 | 
			
		||||
  kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
 | 
			
		||||
  kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
 | 
			
		||||
  kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
 | 
			
		||||
 | 
			
		||||
  echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
 | 
			
		||||
  get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > ${KUBE_TEMP}/cluster-name.txt
 | 
			
		||||
  get-metadata "${existing_master_zone}" "${existing_master_name}" startup-script > ${KUBE_TEMP}/configure-vm.sh
 | 
			
		||||
  get-metadata "${existing_master_zone}" "${existing_master_name}" kube-master-certs > "${KUBE_TEMP}/kube-master-certs.yaml"
 | 
			
		||||
 | 
			
		||||
  create-master-instance-internal "${REPLICA_NAME}"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function create-master-instance-internal() {
 | 
			
		||||
  local -r master_name="${1}"
 | 
			
		||||
  local -r address_option="${2:-}"
 | 
			
		||||
  local -r preemptible_master="${3:-}"
 | 
			
		||||
 | 
			
		||||
  gcloud compute instances create "${master_name}" \
 | 
			
		||||
    ${address_option} \
 | 
			
		||||
    --project "${PROJECT}" \
 | 
			
		||||
    --zone "${ZONE}" \
 | 
			
		||||
    --machine-type "${MASTER_SIZE}" \
 | 
			
		||||
    --image-project="${MASTER_IMAGE_PROJECT}" \
 | 
			
		||||
    --image "${MASTER_IMAGE}" \
 | 
			
		||||
    --tags "${MASTER_TAG}" \
 | 
			
		||||
    --network "${NETWORK}" \
 | 
			
		||||
    --scopes "storage-ro,compute-rw,monitoring,logging-write" \
 | 
			
		||||
    --can-ip-forward \
 | 
			
		||||
    --metadata-from-file \
 | 
			
		||||
      "startup-script=${KUBE_TEMP}/configure-vm.sh,kube-env=${KUBE_TEMP}/master-kube-env.yaml,cluster-name=${KUBE_TEMP}/cluster-name.txt,kube-master-certs=${KUBE_TEMP}/kube-master-certs.yaml" \
 | 
			
		||||
    --disk "name=${master_name}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
 | 
			
		||||
    --boot-disk-size "${MASTER_ROOT_DISK_SIZE:-10}" \
 | 
			
		||||
    ${preemptible_master}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# TODO: This is most likely not the best way to read metadata from the existing master.
 | 
			
		||||
function get-metadata() {
 | 
			
		||||
  local zone="${1}"
 | 
			
		||||
  local name="${2}"
 | 
			
		||||
  local key="${3}"
 | 
			
		||||
  gcloud compute ssh "${name}" \
 | 
			
		||||
    --project "${PROJECT}" \
 | 
			
		||||
    --zone="${zone}" \
 | 
			
		||||
    --command "curl \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}\" -H \"Metadata-Flavor: Google\"" 2>/dev/null
 | 
			
		||||
}
 | 
			
		||||
@@ -30,7 +30,7 @@ else
 | 
			
		||||
  exit 1
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "debian" || "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
 | 
			
		||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "container-linux" || "${MASTER_OS_DISTRIBUTION}" == "trusty" || "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then
 | 
			
		||||
  source "${KUBE_ROOT}/cluster/gce/${MASTER_OS_DISTRIBUTION}/master-helper.sh"
 | 
			
		||||
else
 | 
			
		||||
  echo "Cannot operate on cluster using master os distro: ${MASTER_OS_DISTRIBUTION}" >&2
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user