mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #12846 from piosz/autoscaling-e2e
Increased waiting for cluster size timeout for Autoscaling e2e
This commit is contained in:
		@@ -54,40 +54,40 @@ var _ = Describe("Autoscaling", func() {
 | 
			
		||||
		setUpAutoscaler("cpu/node_utilization", 0.7, nodeCount, nodeCount+1)
 | 
			
		||||
 | 
			
		||||
		ConsumeCpu(f, "cpu-utilization", nodeCount*coresPerNode)
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, nodeCount+1))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
 | 
			
		||||
 | 
			
		||||
		StopConsuming(f, "cpu-utilization")
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, nodeCount))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	It("[Skipped] should scale cluster size based on cpu reservation", func() {
 | 
			
		||||
		setUpAutoscaler("cpu/node_reservation", 0.7, 1, 10)
 | 
			
		||||
 | 
			
		||||
		ReserveCpu(f, "cpu-reservation", 800)
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 2))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute))
 | 
			
		||||
 | 
			
		||||
		StopConsuming(f, "cpu-reservation")
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 1))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	It("[Skipped] should scale cluster size based on memory utilization", func() {
 | 
			
		||||
		setUpAutoscaler("memory/node_utilization", 0.5, 1, 10)
 | 
			
		||||
 | 
			
		||||
		ConsumeMemory(f, "memory-utilization", 2)
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 2))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute))
 | 
			
		||||
 | 
			
		||||
		StopConsuming(f, "memory-utilization")
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 1))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	It("[Skipped] should scale cluster size based on memory reservation", func() {
 | 
			
		||||
		setUpAutoscaler("memory/node_reservation", 0.5, 1, 10)
 | 
			
		||||
 | 
			
		||||
		ReserveMemory(f, "memory-reservation", 2)
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 2))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute))
 | 
			
		||||
 | 
			
		||||
		StopConsuming(f, "memory-reservation")
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 1))
 | 
			
		||||
		expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
 | 
			
		||||
	})
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -110,8 +110,7 @@ func waitForGroupSize(size int) error {
 | 
			
		||||
	return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func waitForClusterSize(c *client.Client, size int) error {
 | 
			
		||||
	timeout := 10 * time.Minute
 | 
			
		||||
func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
 | 
			
		||||
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
 | 
			
		||||
		nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
@@ -441,7 +440,7 @@ var _ = Describe("Nodes", func() {
 | 
			
		||||
			if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil {
 | 
			
		||||
				Failf("Couldn't restore the original node instance group size: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes); err != nil {
 | 
			
		||||
			if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
 | 
			
		||||
				Failf("Couldn't restore the original cluster size: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
@@ -460,7 +459,7 @@ var _ = Describe("Nodes", func() {
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			err = waitForGroupSize(replicas - 1)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			err = waitForClusterSize(c, replicas-1)
 | 
			
		||||
			err = waitForClusterSize(c, replicas-1, 10*time.Minute)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
			By("verifying whether the pods from the removed node are recreated")
 | 
			
		||||
@@ -484,7 +483,7 @@ var _ = Describe("Nodes", func() {
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			err = waitForGroupSize(replicas + 1)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			err = waitForClusterSize(c, replicas+1)
 | 
			
		||||
			err = waitForClusterSize(c, replicas+1, 10*time.Minute)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
			By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user