mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #61119 from mtaufen/fix-cluster-autoscaler
Automatic merge from submit-queue (batch tested with PRs 61284, 61119, 61201). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add AUTOSCALER_ENV_VARS to kube-env to hotfix cluster autoscaler This provides a temporary way for the cluster autoscaler to get at values that were removed from kube-env in #60020. Ideally this information will eventually be available via e.g. the Cluster API, because kube-env is an internal interface that carries no stability guarantees. This is the first half of the fix; the other half is that cluster autoscaler needs to be modified to read from AUTOSCALER_ENV_VARS, if it is available. Since cluster autoscaler was also reading KUBELET_TEST_ARGS for the kube-reserved flag, and we don't want to resurrect KUBELET_TEST_ARGS in kube-env, we opted to create AUTOSCALER_ENV_VARS instead of just adding back the old env vars. This also makes it clear that we have an ugly dependency on kube-env. ```release-note NONE ```
This commit is contained in:
		@@ -514,6 +514,23 @@ function write-node-env {
 | 
			
		||||
  build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
function build-node-labels {
 | 
			
		||||
  local master=$1
 | 
			
		||||
  local node_labels=""
 | 
			
		||||
  if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then
 | 
			
		||||
    # Add kube-proxy daemonset label to node to avoid situation during cluster
 | 
			
		||||
    # upgrade/downgrade when there are two instances of kube-proxy running on a node.
 | 
			
		||||
    node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
 | 
			
		||||
  fi
 | 
			
		||||
  if [[ -n "${NODE_LABELS:-}" ]]; then
 | 
			
		||||
    node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
 | 
			
		||||
  fi
 | 
			
		||||
  if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then
 | 
			
		||||
    node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
 | 
			
		||||
  fi
 | 
			
		||||
  echo $node_labels
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# $1: if 'true', we're rendering flags for a master, else a node
 | 
			
		||||
function construct-kubelet-flags {
 | 
			
		||||
  local master=$1
 | 
			
		||||
@@ -585,18 +602,7 @@ function construct-kubelet-flags {
 | 
			
		||||
  if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
 | 
			
		||||
    flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
 | 
			
		||||
  fi
 | 
			
		||||
  local node_labels=""
 | 
			
		||||
  if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then
 | 
			
		||||
    # Add kube-proxy daemonset label to node to avoid situation during cluster
 | 
			
		||||
    # upgrade/downgrade when there are two instances of kube-proxy running on a node.
 | 
			
		||||
    node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
 | 
			
		||||
  fi
 | 
			
		||||
  if [[ -n "${NODE_LABELS:-}" ]]; then
 | 
			
		||||
    node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
 | 
			
		||||
  fi
 | 
			
		||||
  if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then
 | 
			
		||||
    node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
 | 
			
		||||
  fi
 | 
			
		||||
  local node_labels=$(build-node-labels ${master})
 | 
			
		||||
  if [[ -n "${node_labels:-}" ]]; then
 | 
			
		||||
    flags+=" --node-labels=${node_labels}"
 | 
			
		||||
  fi
 | 
			
		||||
@@ -972,6 +978,17 @@ ENABLE_CLUSTER_AUTOSCALER: $(yaml-quote ${ENABLE_CLUSTER_AUTOSCALER})
 | 
			
		||||
AUTOSCALER_MIG_CONFIG: $(yaml-quote ${AUTOSCALER_MIG_CONFIG})
 | 
			
		||||
AUTOSCALER_EXPANDER_CONFIG: $(yaml-quote ${AUTOSCALER_EXPANDER_CONFIG})
 | 
			
		||||
EOF
 | 
			
		||||
      if [[ "${master}" == "false" ]]; then
 | 
			
		||||
          # TODO(kubernetes/autoscaler#718): AUTOSCALER_ENV_VARS is a hotfix for cluster autoscaler,
 | 
			
		||||
          # which reads the kube-env to determine the shape of a node and was broken by #60020.
 | 
			
		||||
          # This should be removed as soon as a more reliable source of information is available!
 | 
			
		||||
          local node_labels=$(build-node-labels false)
 | 
			
		||||
          local node_taints="${NODE_TAINTS:-}"
 | 
			
		||||
          local autoscaler_env_vars="node_labels=${node_labels};node_taints=${node_taints}"
 | 
			
		||||
          cat >>$file <<EOF
 | 
			
		||||
AUTOSCALER_ENV_VARS: $(yaml-quote ${autoscaler_env_vars})
 | 
			
		||||
EOF
 | 
			
		||||
      fi
 | 
			
		||||
  fi
 | 
			
		||||
  if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
 | 
			
		||||
    cat >>$file <<EOF
 | 
			
		||||
@@ -1408,7 +1425,7 @@ function get-template-name-from-version() {
 | 
			
		||||
  echo "${NODE_INSTANCE_PREFIX}-template-${1}" | cut -c 1-63 | sed 's/[\.\+]/-/g;s/-*$//g'
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# validates the NODE_LOCAL_SSDS_EXT variable 
 | 
			
		||||
# validates the NODE_LOCAL_SSDS_EXT variable
 | 
			
		||||
function validate-node-local-ssds-ext(){
 | 
			
		||||
  ssdopts="${1}"
 | 
			
		||||
 | 
			
		||||
@@ -1484,7 +1501,7 @@ function create-node-template() {
 | 
			
		||||
      done
 | 
			
		||||
    done
 | 
			
		||||
  fi
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  if [[ ! -z ${NODE_LOCAL_SSDS+x} ]]; then
 | 
			
		||||
    # The NODE_LOCAL_SSDS check below fixes issue #49171
 | 
			
		||||
    # Some versions of seq will count down from 1 if "seq 0" is specified
 | 
			
		||||
@@ -1494,7 +1511,7 @@ function create-node-template() {
 | 
			
		||||
      done
 | 
			
		||||
    fi
 | 
			
		||||
  fi
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  local network=$(make-gcloud-network-argument \
 | 
			
		||||
    "${NETWORK_PROJECT}" \
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user