mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #43513 from MaciekPytel/ca_fix_e2e
Automatic merge from submit-queue (batch tested with PRs 43513, 43499) Fix Cluster-Autoscaler e2e failing on some node configs This fixes a cluster-autoscaler e2e failure on some node configs (namely if memory capacity == allocatable memory). **Release note**: ```release-note ```
This commit is contained in:
		@@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
 | 
			
		||||
 | 
			
		||||
	It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
 | 
			
		||||
		By("Creating unschedulable pod")
 | 
			
		||||
		ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
 | 
			
		||||
		ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memCapacityMb)), false)
 | 
			
		||||
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
 | 
			
		||||
 | 
			
		||||
		By("Waiting for scale up hoping it won't happen")
 | 
			
		||||
@@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
 | 
			
		||||
		framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
 | 
			
		||||
 | 
			
		||||
		By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
 | 
			
		||||
		ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
 | 
			
		||||
		ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memCapacityMb)), false)
 | 
			
		||||
		defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
 | 
			
		||||
 | 
			
		||||
		// Apparently GKE master is restarted couple minutes after the node pool is added
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user