mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #48997 from johanneswuerbach/hpa-min
Automatic merge from submit-queue (batch tested with PRs 48997, 48595, 48898, 48711, 48972) hpa: Prevent scaling below MinReplicas if desiredReplicas is zero **What this PR does / why we need it**: Prevent a HPA scaling below `minReplicas` if `desiredReplicas` is calculated as `0`. Example events of a HPA continuously scaling between `1` and `MinReplicas`: ``` 2h 59s 22 horizontal-pod-autoscaler Normal SuccessfulRescale New size: 1; reason: All metrics below target 2h 29s 22 horizontal-pod-autoscaler Normal SuccessfulRescale New size: 15; reason: Current number of replicas below Spec.MinReplicas ``` **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #49028 **Special notes for your reviewer**: **Release note**: ```release-note hpa: Prevent scaling below MinReplicas if desiredReplicas is zero ```
This commit is contained in:
		@@ -450,14 +450,14 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
 | 
			
		||||
		case desiredReplicas > scaleUpLimit:
 | 
			
		||||
			setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "ScaleUpLimit", "the desired replica count is increasing faster than the maximum scale rate")
 | 
			
		||||
			desiredReplicas = scaleUpLimit
 | 
			
		||||
		case desiredReplicas == 0:
 | 
			
		||||
			//  never scale down to 0, reserved for disabling autoscaling
 | 
			
		||||
			setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero")
 | 
			
		||||
			desiredReplicas = 1
 | 
			
		||||
		case hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas:
 | 
			
		||||
			// make sure we aren't below our minimum
 | 
			
		||||
			setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was less than the minimum replica count")
 | 
			
		||||
			desiredReplicas = *hpa.Spec.MinReplicas
 | 
			
		||||
		case desiredReplicas == 0:
 | 
			
		||||
			//  never scale down to 0, reserved for disabling autoscaling
 | 
			
		||||
			setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero")
 | 
			
		||||
			desiredReplicas = 1
 | 
			
		||||
		case desiredReplicas > hpa.Spec.MaxReplicas:
 | 
			
		||||
			// make sure we aren't above our maximum
 | 
			
		||||
			setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooManyReplicas", "the desired replica count was more than the maximum replica count")
 | 
			
		||||
 
 | 
			
		||||
@@ -994,6 +994,25 @@ func TestMinReplicas(t *testing.T) {
 | 
			
		||||
	tc.runTest(t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestMinReplicasDesiredZero(t *testing.T) {
 | 
			
		||||
	tc := testCase{
 | 
			
		||||
		minReplicas:         2,
 | 
			
		||||
		maxReplicas:         5,
 | 
			
		||||
		initialReplicas:     3,
 | 
			
		||||
		desiredReplicas:     2,
 | 
			
		||||
		CPUTarget:           90,
 | 
			
		||||
		reportedLevels:      []uint64{0, 0, 0},
 | 
			
		||||
		reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
 | 
			
		||||
		useMetricsApi:       true,
 | 
			
		||||
		expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
 | 
			
		||||
			Type:   autoscalingv2.ScalingLimited,
 | 
			
		||||
			Status: v1.ConditionTrue,
 | 
			
		||||
			Reason: "TooFewReplicas",
 | 
			
		||||
		}),
 | 
			
		||||
	}
 | 
			
		||||
	tc.runTest(t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestZeroReplicas(t *testing.T) {
 | 
			
		||||
	tc := testCase{
 | 
			
		||||
		minReplicas:         3,
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user