mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #30845 from YuPengZTE/master
Automatic merge from submit-queue The first letter should be capitalized
This commit is contained in:
		@@ -23,12 +23,12 @@
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# LIMITATIONS
 | 
			
		||||
# 1. controllers are not updated unless their name is changed
 | 
			
		||||
# 1. Controllers are not updated unless their name is changed
 | 
			
		||||
# 3. Services will not be updated unless their name is changed,
 | 
			
		||||
#    but for services we actually want updates without name change.
 | 
			
		||||
# 4. Json files are not handled at all. Currently addons must be
 | 
			
		||||
#    in yaml files
 | 
			
		||||
# 5. exit code is probably not always correct (I haven't checked
 | 
			
		||||
# 5. Exit code is probably not always correct (I haven't checked
 | 
			
		||||
#    carefully if it works in 100% cases)
 | 
			
		||||
# 6. There are no unittests
 | 
			
		||||
# 8. Will not work if the total length of paths to addons is greater than
 | 
			
		||||
@@ -36,9 +36,9 @@
 | 
			
		||||
# 9. Performance issue: yaml files are read many times in a single execution.
 | 
			
		||||
 | 
			
		||||
# cosmetic improvements to be done
 | 
			
		||||
# 1. improve the log function; add timestamp, file name, etc.
 | 
			
		||||
# 2. logging doesn't work from files that print things out.
 | 
			
		||||
# 3. kubectl prints the output to stderr (the output should be captured and then
 | 
			
		||||
# 1. Improve the log function; add timestamp, file name, etc.
 | 
			
		||||
# 2. Logging doesn't work from files that print things out.
 | 
			
		||||
# 3. Kubectl prints the output to stderr (the output should be captured and then
 | 
			
		||||
#    logged)
 | 
			
		||||
 | 
			
		||||
# global config
 | 
			
		||||
 
 | 
			
		||||
@@ -473,7 +473,7 @@ function stage-images() {
 | 
			
		||||
  done
 | 
			
		||||
 | 
			
		||||
  kube::util::wait-for-jobs || {
 | 
			
		||||
    kube::log::error "unable to push images. see ${temp_dir}/*.log for more info."
 | 
			
		||||
    kube::log::error "unable to push images. See ${temp_dir}/*.log for more info."
 | 
			
		||||
    return 1
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -107,7 +107,7 @@ EOF
 | 
			
		||||
  # files. Whenever logrotate is ran, this config will:
 | 
			
		||||
  # * rotate the log file if its size is > 100Mb OR if one day has elapsed
 | 
			
		||||
  # * save rotated logs into a gzipped timestamped backup
 | 
			
		||||
  # * log file timestamp (controlled by 'dateformat') includes seconds too. this
 | 
			
		||||
  # * log file timestamp (controlled by 'dateformat') includes seconds too. This
 | 
			
		||||
  #   ensures that logrotate can generate unique logfiles during each rotation
 | 
			
		||||
  #   (otherwise it skips rotation if 'maxsize' is reached multiple times in a
 | 
			
		||||
  #   day).
 | 
			
		||||
 
 | 
			
		||||
@@ -471,7 +471,7 @@ docker:
 | 
			
		||||
# is managing Docker restart we should probably just delete this whole thing
 | 
			
		||||
# but the kubernetes components use salt 'require' to set up a dag, and that
 | 
			
		||||
# complicated and scary to unwind.
 | 
			
		||||
# On AWS, we use a trick now... we don't start the docker service through Salt.
 | 
			
		||||
# On AWS, we use a trick now... We don't start the docker service through Salt.
 | 
			
		||||
# Kubelet or our health checker will start it.  But we use service.enabled,
 | 
			
		||||
# so we still have a `service: docker` node for our DAG.
 | 
			
		||||
{% if grains.cloud is defined and grains.cloud == 'aws' %}
 | 
			
		||||
 
 | 
			
		||||
@@ -58,7 +58,7 @@ function setClusterInfo() {
 | 
			
		||||
        NODE_IPS="$NODE_IPS,$nodeIP"
 | 
			
		||||
      fi
 | 
			
		||||
    else
 | 
			
		||||
      echo "unsupported role for ${i}. please check"
 | 
			
		||||
      echo "unsupported role for ${i}. Please check"
 | 
			
		||||
      exit 1
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
@@ -156,7 +156,7 @@ function verify-cluster() {
 | 
			
		||||
      verify-master
 | 
			
		||||
      verify-node "$i"
 | 
			
		||||
    else
 | 
			
		||||
      echo "unsupported role for ${i}. please check"
 | 
			
		||||
      echo "unsupported role for ${i}. Please check"
 | 
			
		||||
      exit 1
 | 
			
		||||
    fi
 | 
			
		||||
 | 
			
		||||
@@ -978,7 +978,7 @@ function kube-push() {
 | 
			
		||||
    elif [[ "${roles_array[${ii}]}" == "ai" ]]; then
 | 
			
		||||
      provision-masterandnode
 | 
			
		||||
    else
 | 
			
		||||
      echo "unsupported role for ${i}. please check"
 | 
			
		||||
      echo "unsupported role for ${i}. Please check"
 | 
			
		||||
      exit 1
 | 
			
		||||
    fi
 | 
			
		||||
    ((ii=ii+1))
 | 
			
		||||
 
 | 
			
		||||
@@ -220,7 +220,7 @@ function kube-run {
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# run the command remotely and check if the specific kube artifact is running or not.
 | 
			
		||||
# keep checking till the you hit the timeout. default timeout 300s
 | 
			
		||||
# keep checking till the you hit the timeout. Default timeout 300s
 | 
			
		||||
#
 | 
			
		||||
# Usage:
 | 
			
		||||
#   kube_check 10.0.0.1 cmd timeout
 | 
			
		||||
@@ -263,7 +263,7 @@ function kube-check {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# verify if salt master is up. check 30 times and then echo out bad output and return 0
 | 
			
		||||
# verify if salt master is up. Check 30 times and then echo out bad output and return 0
 | 
			
		||||
#
 | 
			
		||||
# Usage:
 | 
			
		||||
#   remote-pgrep 10.0.0.1 salt-master
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user