mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 18:28:13 +00:00 
			
		
		
		
	[Federation] Unjoin only the joined clusters while bringing down the federation control plane.
A few other minor improvements.
This commit is contained in:
		| @@ -416,4 +416,10 @@ function cleanup-federation-api-objects { | |||||||
|   $host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster |   $host_kubectl delete pods,svc,rc,deployment,secret -lapp=federated-cluster | ||||||
|   # Delete all resources in FEDERATION_NAMESPACE. |   # Delete all resources in FEDERATION_NAMESPACE. | ||||||
|   $host_kubectl delete pvc,pv,pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all |   $host_kubectl delete pvc,pv,pods,svc,rc,deployment,secret --namespace=${FEDERATION_NAMESPACE} --all | ||||||
|  |   $host_kubectl delete ns ${FEDERATION_NAMESPACE} | ||||||
|  |  | ||||||
|  |   # Poll until the namespace is completely gone. | ||||||
|  |   while $host_kubectl get namespace ${FEDERATION_NAMESPACE} >/dev/null 2>&1; do | ||||||
|  |     sleep 5 | ||||||
|  |   done | ||||||
| } | } | ||||||
|   | |||||||
| @@ -24,9 +24,27 @@ KUBE_ROOT=$(readlink -m $(dirname "${BASH_SOURCE}")/../../) | |||||||
| # $KUBEDNS_CONFIGMAP_NAME and $KUBEDNS_CONFIGMAP_NAMESPACE. | # $KUBEDNS_CONFIGMAP_NAME and $KUBEDNS_CONFIGMAP_NAMESPACE. | ||||||
| source "${KUBE_ROOT}/federation/cluster/common.sh" | source "${KUBE_ROOT}/federation/cluster/common.sh" | ||||||
|  |  | ||||||
|  | # federation_clusters returns a list of all the clusters in | ||||||
|  | # federation, if at all the federation control plane exists | ||||||
|  | # and there are any clusters registerd. | ||||||
|  | function federation_clusters() { | ||||||
|  |   if clusters=$("${KUBE_ROOT}/cluster/kubectl.sh" \ | ||||||
|  |       --context="${FEDERATION_KUBE_CONTEXT}" \ | ||||||
|  |       -o jsonpath --template '{.items[*].metadata.name}' \ | ||||||
|  |       get clusters) ; then | ||||||
|  |     echo ${clusters} | ||||||
|  |     return | ||||||
|  |   fi | ||||||
|  |   echo "" | ||||||
|  | } | ||||||
|  |  | ||||||
| # unjoin_clusters unjoins all the clusters from federation. | # unjoin_clusters unjoins all the clusters from federation. | ||||||
| function unjoin_clusters() { | function unjoin_clusters() { | ||||||
|   for context in $(federation_cluster_contexts); do |   # Unjoin only those clusters that are registered with the | ||||||
|  |   # given federation. This is slightly different than | ||||||
|  |   # joining clusters where we join all the clusters in the | ||||||
|  |   # current kubeconfig with the "federation" prefix. | ||||||
|  |   for context in $(federation_clusters); do | ||||||
|     kube::log::status "Unjoining cluster \"${context}\" from federation \"${FEDERATION_NAME}\"" |     kube::log::status "Unjoining cluster \"${context}\" from federation \"${FEDERATION_NAME}\"" | ||||||
|  |  | ||||||
|     "${KUBE_ROOT}/federation/develop/kubefed.sh" unjoin \ |     "${KUBE_ROOT}/federation/develop/kubefed.sh" unjoin \ | ||||||
| @@ -48,6 +66,7 @@ if cleanup-federation-api-objects; then | |||||||
|   # cloud provider cleanups are implemented in the individual test |   # cloud provider cleanups are implemented in the individual test | ||||||
|   # `AfterEach` blocks. |   # `AfterEach` blocks. | ||||||
|   # Also, we wait only if the cleanup succeeds. |   # Also, we wait only if the cleanup succeeds. | ||||||
|  |   kube::log::status "Waiting for 2 minutes to allow controllers to clean up federation components..." | ||||||
|   sleep 2m |   sleep 2m | ||||||
| else | else | ||||||
|   echo "Couldn't cleanup federation api objects" |   echo "Couldn't cleanup federation api objects" | ||||||
|   | |||||||
| @@ -79,6 +79,9 @@ function init() { | |||||||
|   local -r kube_registry="${KUBE_REGISTRY:-gcr.io/${project}}" |   local -r kube_registry="${KUBE_REGISTRY:-gcr.io/${project}}" | ||||||
|   local -r kube_version="$(get_version)" |   local -r kube_version="$(get_version)" | ||||||
|  |  | ||||||
|  |   kube::log::status "DNS_ZONE_NAME: \"${DNS_ZONE_NAME}\", DNS_PROVIDER: \"${DNS_PROVIDER}\"" | ||||||
|  |   kube::log::status "Image: \"${kube_registry}/hyperkube-amd64:${kube_version}\"" | ||||||
|  |  | ||||||
|   "${KUBE_ROOT}/federation/develop/kubefed.sh" init \ |   "${KUBE_ROOT}/federation/develop/kubefed.sh" init \ | ||||||
|       "${FEDERATION_NAME}" \ |       "${FEDERATION_NAME}" \ | ||||||
|       --host-cluster-context="${HOST_CLUSTER_CONTEXT}" \ |       --host-cluster-context="${HOST_CLUSTER_CONTEXT}" \ | ||||||
|   | |||||||
| @@ -22,13 +22,9 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. | |||||||
|  |  | ||||||
| source "${KUBE_ROOT}/cluster/kube-util.sh" | source "${KUBE_ROOT}/cluster/kube-util.sh" | ||||||
|  |  | ||||||
| #A little hack to get the last zone. we always deploy federated cluster to the last zone. | : "${FEDERATION_HOST_CLUSTER_ZONE?Must set FEDERATION_HOST_CLUSTER_ZONE env var}" | ||||||
| #TODO(colhom): deploy federated control plane to multiple underlying clusters in robust way |  | ||||||
| lastZone="" |  | ||||||
| for zone in ${E2E_ZONES};do |  | ||||||
|     lastZone="$zone" |  | ||||||
| done |  | ||||||
| ( | ( | ||||||
|     set-federation-zone-vars "$zone" |     set-federation-zone-vars "${FEDERATION_HOST_CLUSTER_ZONE}" | ||||||
|     "${KUBE_ROOT}/hack/ginkgo-e2e.sh" $@ |     "${KUBE_ROOT}/hack/ginkgo-e2e.sh" $@ | ||||||
| ) | ) | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Madhusudan.C.S
					Madhusudan.C.S