mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Fix flaky and failing HPA E2E Behavior tests
This commit is contained in:
		@@ -27,7 +27,7 @@ import (
 | 
				
			|||||||
	"github.com/onsi/ginkgo/v2"
 | 
						"github.com/onsi/ginkgo/v2"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod autoscaling (non-default behavior)", func() {
 | 
					var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior)", func() {
 | 
				
			||||||
	f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
 | 
						f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
 | 
				
			||||||
	f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
 | 
						f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -209,12 +209,16 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.Describe("with scale limited by number of Pods rate", func() {
 | 
						ginkgo.Describe("with scale limited by number of Pods rate", func() {
 | 
				
			||||||
 | 
							podCPURequest := 200
 | 
				
			||||||
 | 
							targetCPUUtilizationPercent := 25
 | 
				
			||||||
 | 
							usageForSingleReplica := 45
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should scale up no more than given number of Pods per minute", func() {
 | 
							ginkgo.It("should scale up no more than given number of Pods per minute", func() {
 | 
				
			||||||
			ginkgo.By("setting up resource consumer and HPA")
 | 
								ginkgo.By("setting up resource consumer and HPA")
 | 
				
			||||||
			initPods := 1
 | 
								initPods := 1
 | 
				
			||||||
			initCPUUsageTotal := initPods * usageForSingleReplica
 | 
								initCPUUsageTotal := initPods * usageForSingleReplica
 | 
				
			||||||
			limitWindowLength := 1 * time.Minute
 | 
								limitWindowLength := 1 * time.Minute
 | 
				
			||||||
			podsLimitPerMinute := 2
 | 
								podsLimitPerMinute := 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rc := e2eautoscaling.NewDynamicResourceConsumer(
 | 
								rc := e2eautoscaling.NewDynamicResourceConsumer(
 | 
				
			||||||
				hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
 | 
									hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
 | 
				
			||||||
@@ -230,33 +234,33 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
			defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
								defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("triggering scale up by increasing consumption")
 | 
								ginkgo.By("triggering scale up by increasing consumption")
 | 
				
			||||||
			rc.ConsumeCPU(5 * usageForSingleReplica)
 | 
								rc.ConsumeCPU(3 * usageForSingleReplica)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart := time.Now()
 | 
								waitStart := time.Now()
 | 
				
			||||||
 | 
								rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
 | 
								timeWaitedFor2 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								waitStart = time.Now()
 | 
				
			||||||
			rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
								rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
			timeWaitedFor3 := time.Now().Sub(waitStart)
 | 
								timeWaitedFor3 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart = time.Now()
 | 
								ginkgo.By("verifying time waited for a scale up to 2 replicas")
 | 
				
			||||||
			rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
					 | 
				
			||||||
			timeWaitedFor5 := time.Now().Sub(waitStart)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale up to 3 replicas")
 | 
					 | 
				
			||||||
			deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
								deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
				
			||||||
			// First scale event can happen right away, as there were no scale events in the past.
 | 
								// First scale event can happen right away, as there were no scale events in the past.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale up to 5 replicas")
 | 
								ginkgo.By("verifying time waited for a scale up to 3 replicas")
 | 
				
			||||||
			// Second scale event needs to respect limit window.
 | 
								// Second scale event needs to respect limit window.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor5 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
 | 
								framework.ExpectEqual(timeWaitedFor3 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor3, limitWindowLength)
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should scale down no more than given number of Pods per minute", func() {
 | 
							ginkgo.It("should scale down no more than given number of Pods per minute", func() {
 | 
				
			||||||
			ginkgo.By("setting up resource consumer and HPA")
 | 
								ginkgo.By("setting up resource consumer and HPA")
 | 
				
			||||||
			initPods := 6
 | 
								initPods := 3
 | 
				
			||||||
			initCPUUsageTotal := initPods * usageForSingleReplica
 | 
								initCPUUsageTotal := initPods * usageForSingleReplica
 | 
				
			||||||
			limitWindowLength := 1 * time.Minute
 | 
								limitWindowLength := 1 * time.Minute
 | 
				
			||||||
			podsLimitPerMinute := 2
 | 
								podsLimitPerMinute := 1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rc := e2eautoscaling.NewDynamicResourceConsumer(
 | 
								rc := e2eautoscaling.NewDynamicResourceConsumer(
 | 
				
			||||||
				hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
 | 
									hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
 | 
				
			||||||
@@ -275,29 +279,33 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
			rc.ConsumeCPU(1 * usageForSingleReplica)
 | 
								rc.ConsumeCPU(1 * usageForSingleReplica)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart := time.Now()
 | 
								waitStart := time.Now()
 | 
				
			||||||
			rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
					 | 
				
			||||||
			timeWaitedFor4 := time.Now().Sub(waitStart)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			waitStart = time.Now()
 | 
					 | 
				
			||||||
			rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
								rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
			timeWaitedFor2 := time.Now().Sub(waitStart)
 | 
								timeWaitedFor2 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale down to 4 replicas")
 | 
								waitStart = time.Now()
 | 
				
			||||||
			deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
								rc.WaitForReplicas(1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
			// First scale event can happen right away, as there were no scale events in the past.
 | 
								timeWaitedFor1 := time.Now().Sub(waitStart)
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale down to 2 replicas")
 | 
								ginkgo.By("verifying time waited for a scale down to 2 replicas")
 | 
				
			||||||
			// Second scale event needs to respect limit window.
 | 
								deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
 | 
								// First scale event can happen right away, as there were no scale events in the past.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								ginkgo.By("verifying time waited for a scale down to 1 replicas")
 | 
				
			||||||
 | 
								// Second scale event needs to respect limit window.
 | 
				
			||||||
 | 
								framework.ExpectEqual(timeWaitedFor1 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor1, limitWindowLength)
 | 
				
			||||||
 | 
								framework.ExpectEqual(timeWaitedFor1 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor1, deadline)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.Describe("with scale limited by percentage", func() {
 | 
						ginkgo.Describe("with scale limited by percentage", func() {
 | 
				
			||||||
 | 
							podCPURequest := 200
 | 
				
			||||||
 | 
							targetCPUUtilizationPercent := 25
 | 
				
			||||||
 | 
							usageForSingleReplica := 45
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() {
 | 
							ginkgo.It("should scale up no more than given percentage of current Pods per minute", func() {
 | 
				
			||||||
			ginkgo.By("setting up resource consumer and HPA")
 | 
								ginkgo.By("setting up resource consumer and HPA")
 | 
				
			||||||
			initPods := 4
 | 
								initPods := 2
 | 
				
			||||||
			initCPUUsageTotal := initPods * usageForSingleReplica
 | 
								initCPUUsageTotal := initPods * usageForSingleReplica
 | 
				
			||||||
			limitWindowLength := 1 * time.Minute
 | 
								limitWindowLength := 1 * time.Minute
 | 
				
			||||||
			percentageLimitPerMinute := 50
 | 
								percentageLimitPerMinute := 50
 | 
				
			||||||
@@ -316,33 +324,34 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
			defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
								defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("triggering scale up by increasing consumption")
 | 
								ginkgo.By("triggering scale up by increasing consumption")
 | 
				
			||||||
			rc.ConsumeCPU(10 * usageForSingleReplica)
 | 
								rc.ConsumeCPU(8 * usageForSingleReplica)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart := time.Now()
 | 
								waitStart := time.Now()
 | 
				
			||||||
			rc.WaitForReplicas(6, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
								rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
			timeWaitedFor6 := time.Now().Sub(waitStart)
 | 
								timeWaitedFor3 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart = time.Now()
 | 
								waitStart = time.Now()
 | 
				
			||||||
			rc.WaitForReplicas(9, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
								// Scale up limited by percentage takes ceiling, so new replicas number is ceil(3 * 1.5) = ceil(4.5) = 5
 | 
				
			||||||
			timeWaitedFor9 := time.Now().Sub(waitStart)
 | 
								rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
 | 
								timeWaitedFor5 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale up to 6 replicas")
 | 
								ginkgo.By("verifying time waited for a scale up to 3 replicas")
 | 
				
			||||||
			deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
								deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
				
			||||||
			// First scale event can happen right away, as there were no scale events in the past.
 | 
								// First scale event can happen right away, as there were no scale events in the past.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor6 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor6, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale up to 9 replicas")
 | 
								ginkgo.By("verifying time waited for a scale up to 5 replicas")
 | 
				
			||||||
			// Second scale event needs to respect limit window.
 | 
								// Second scale event needs to respect limit window.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor9 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor9, limitWindowLength)
 | 
								framework.ExpectEqual(timeWaitedFor5 > limitWindowLength, true, "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor9 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor9, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() {
 | 
							ginkgo.It("should scale down no more than given percentage of current Pods per minute", func() {
 | 
				
			||||||
			ginkgo.By("setting up resource consumer and HPA")
 | 
								ginkgo.By("setting up resource consumer and HPA")
 | 
				
			||||||
			initPods := 8
 | 
								initPods := 7
 | 
				
			||||||
			initCPUUsageTotal := initPods * usageForSingleReplica
 | 
								initCPUUsageTotal := initPods * usageForSingleReplica
 | 
				
			||||||
			limitWindowLength := 1 * time.Minute
 | 
								limitWindowLength := 1 * time.Minute
 | 
				
			||||||
			percentageLimitPerMinute := 50
 | 
								percentageLimitPerMinute := 25
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			rc := e2eautoscaling.NewDynamicResourceConsumer(
 | 
								rc := e2eautoscaling.NewDynamicResourceConsumer(
 | 
				
			||||||
				hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
 | 
									hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
 | 
				
			||||||
@@ -361,26 +370,29 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
			rc.ConsumeCPU(1 * usageForSingleReplica)
 | 
								rc.ConsumeCPU(1 * usageForSingleReplica)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart := time.Now()
 | 
								waitStart := time.Now()
 | 
				
			||||||
			rc.WaitForReplicas(4, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
								rc.WaitForReplicas(5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
			timeWaitedFor4 := time.Now().Sub(waitStart)
 | 
								timeWaitedFor5 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			waitStart = time.Now()
 | 
								waitStart = time.Now()
 | 
				
			||||||
			rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
								// Scale down limited by percentage takes floor, so new replicas number is floor(5 * 0.75) = floor(3.75) = 3
 | 
				
			||||||
			timeWaitedFor2 := time.Now().Sub(waitStart)
 | 
								rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
 | 
				
			||||||
 | 
								timeWaitedFor3 := time.Now().Sub(waitStart)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale down to 4 replicas")
 | 
								ginkgo.By("verifying time waited for a scale down to 5 replicas")
 | 
				
			||||||
			deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
								deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
 | 
				
			||||||
			// First scale event can happen right away, as there were no scale events in the past.
 | 
								// First scale event can happen right away, as there were no scale events in the past.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor4 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor4, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor5 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor5, deadline)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying time waited for a scale down to 2 replicas")
 | 
								ginkgo.By("verifying time waited for a scale down to 3 replicas")
 | 
				
			||||||
			// Second scale event needs to respect limit window.
 | 
								// Second scale event needs to respect limit window.
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor2 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor2, limitWindowLength)
 | 
								framework.ExpectEqual(timeWaitedFor3 > limitWindowLength, true, "waited %s, wanted more than %s", timeWaitedFor3, limitWindowLength)
 | 
				
			||||||
			framework.ExpectEqual(timeWaitedFor2 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor2, deadline)
 | 
								framework.ExpectEqual(timeWaitedFor3 < deadline, true, "waited %s, wanted less than %s", timeWaitedFor3, deadline)
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ginkgo.Describe("with both scale up and down controls configured", func() {
 | 
						ginkgo.Describe("with both scale up and down controls configured", func() {
 | 
				
			||||||
 | 
							waitBuffer := 2 * time.Minute
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ginkgo.It("should keep recommendation within the range over two stabilization windows", func() {
 | 
							ginkgo.It("should keep recommendation within the range over two stabilization windows", func() {
 | 
				
			||||||
			ginkgo.By("setting up resource consumer and HPA")
 | 
								ginkgo.By("setting up resource consumer and HPA")
 | 
				
			||||||
			initPods := 2
 | 
								initPods := 2
 | 
				
			||||||
@@ -396,13 +408,13 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
			defer rc.CleanUp()
 | 
								defer rc.CleanUp()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
 | 
								hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
 | 
				
			||||||
				rc, int32(targetCPUUtilizationPercent), 2, 10,
 | 
									rc, int32(targetCPUUtilizationPercent), 2, 5,
 | 
				
			||||||
				e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
 | 
									e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
 | 
				
			||||||
			)
 | 
								)
 | 
				
			||||||
			defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
								defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("triggering scale up by increasing consumption")
 | 
								ginkgo.By("triggering scale up by increasing consumption")
 | 
				
			||||||
			rc.ConsumeCPU(5 * usageForSingleReplica)
 | 
								rc.ConsumeCPU(4 * usageForSingleReplica)
 | 
				
			||||||
			waitDeadline := upScaleStabilization
 | 
								waitDeadline := upScaleStabilization
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
 | 
								ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
 | 
				
			||||||
@@ -450,7 +462,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod
 | 
				
			|||||||
			scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
 | 
								scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
 | 
				
			||||||
			scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
 | 
								scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
 | 
				
			||||||
			hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
 | 
								hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
 | 
				
			||||||
				rc, int32(targetCPUUtilizationPercent), 2, 10,
 | 
									rc, int32(targetCPUUtilizationPercent), 2, 5,
 | 
				
			||||||
				e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
 | 
									e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
 | 
				
			||||||
			)
 | 
								)
 | 
				
			||||||
			defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
								defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user