mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	fix(HPA): ignore the container resource metrics in HPA controller when the feature gate is disabled
This commit is contained in:
		@@ -21,13 +21,16 @@ package app
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/runtime/schema"
 | 
			
		||||
	"k8s.io/apiserver/pkg/util/feature"
 | 
			
		||||
	"k8s.io/client-go/dynamic"
 | 
			
		||||
	"k8s.io/client-go/scale"
 | 
			
		||||
	"k8s.io/controller-manager/controller"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/controller/podautoscaler"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/features"
 | 
			
		||||
 | 
			
		||||
	resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
 | 
			
		||||
	"k8s.io/metrics/pkg/client/custom_metrics"
 | 
			
		||||
@@ -92,6 +95,7 @@ func startHPAControllerWithMetricsClient(ctx context.Context, controllerContext
 | 
			
		||||
		controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
 | 
			
		||||
		controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerCPUInitializationPeriod.Duration,
 | 
			
		||||
		controllerContext.ComponentConfig.HPAController.HorizontalPodAutoscalerInitialReadinessDelay.Duration,
 | 
			
		||||
		feature.DefaultFeatureGate.Enabled(features.HPAContainerMetrics),
 | 
			
		||||
	).Run(ctx, int(controllerContext.ComponentConfig.HPAController.ConcurrentHorizontalPodAutoscalerSyncs))
 | 
			
		||||
	return nil, true, nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -108,6 +108,9 @@ type HorizontalController struct {
 | 
			
		||||
	// Storage of HPAs and their selectors.
 | 
			
		||||
	hpaSelectors    *selectors.BiMultimap
 | 
			
		||||
	hpaSelectorsMux sync.Mutex
 | 
			
		||||
 | 
			
		||||
	// feature gates
 | 
			
		||||
	containerResourceMetricsEnabled bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewHorizontalController creates a new HorizontalController.
 | 
			
		||||
@@ -124,7 +127,7 @@ func NewHorizontalController(
 | 
			
		||||
	tolerance float64,
 | 
			
		||||
	cpuInitializationPeriod,
 | 
			
		||||
	delayOfInitialReadinessStatus time.Duration,
 | 
			
		||||
 | 
			
		||||
	containerResourceMetricsEnabled bool,
 | 
			
		||||
) *HorizontalController {
 | 
			
		||||
	broadcaster := record.NewBroadcaster()
 | 
			
		||||
	broadcaster.StartStructuredLogging(0)
 | 
			
		||||
@@ -145,6 +148,7 @@ func NewHorizontalController(
 | 
			
		||||
		scaleDownEvents:                 map[string][]timestampedScaleEvent{},
 | 
			
		||||
		scaleDownEventsLock:             sync.RWMutex{},
 | 
			
		||||
		hpaSelectors:                    selectors.NewBiMultimap(),
 | 
			
		||||
		containerResourceMetricsEnabled: containerResourceMetricsEnabled,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
 | 
			
		||||
@@ -425,6 +429,13 @@ func (a *HorizontalController) computeReplicasForMetric(ctx context.Context, hpa
 | 
			
		||||
			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s resource metric value: %v", spec.Resource.Name, err)
 | 
			
		||||
		}
 | 
			
		||||
	case autoscalingv2.ContainerResourceMetricSourceType:
 | 
			
		||||
		if !a.containerResourceMetricsEnabled {
 | 
			
		||||
			// If the container resource metrics feature is disabled but the object has the one,
 | 
			
		||||
			// that means the user enabled the feature once, created some HPAs with the container resource metrics, and disabled it finally.
 | 
			
		||||
			// We cannot return errors in this case because that'll result in all HPAs with the container resource metric sources failing to scale down.
 | 
			
		||||
			// Thus, here we silently ignore it and return current replica values so that it won't affect the autoscaling decision
 | 
			
		||||
			return specReplicas, "", time.Time{}, condition, nil
 | 
			
		||||
		}
 | 
			
		||||
		replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForContainerResourceMetric(ctx, specReplicas, spec, hpa, selector, status)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s container metric value: %v", spec.ContainerResource.Container, err)
 | 
			
		||||
 
 | 
			
		||||
@@ -148,6 +148,8 @@ type testCase struct {
 | 
			
		||||
 | 
			
		||||
	recommendations []timestampedRecommendation
 | 
			
		||||
	hpaSelectors    *selectors.BiMultimap
 | 
			
		||||
 | 
			
		||||
	containerResourceMetricsEnabled bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Needs to be called under a lock.
 | 
			
		||||
@@ -738,6 +740,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
 | 
			
		||||
		defaultTestingTolerance,
 | 
			
		||||
		defaultTestingCPUInitializationPeriod,
 | 
			
		||||
		defaultTestingDelayOfInitialReadinessStatus,
 | 
			
		||||
		tc.containerResourceMetricsEnabled,
 | 
			
		||||
	)
 | 
			
		||||
	hpaController.hpaListerSynced = alwaysReady
 | 
			
		||||
	if tc.recommendations != nil {
 | 
			
		||||
@@ -829,6 +832,38 @@ func TestScaleUpContainer(t *testing.T) {
 | 
			
		||||
		reportedLevels:                  []uint64{300, 500, 700},
 | 
			
		||||
		reportedCPURequests:             []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
 | 
			
		||||
		useMetricsAPI:                   true,
 | 
			
		||||
		containerResourceMetricsEnabled: true,
 | 
			
		||||
	}
 | 
			
		||||
	tc.runTest(t)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestIgnoreContainerMetric(t *testing.T) {
 | 
			
		||||
	// when the container metric feature isn't enabled, it's ignored and HPA keeps the current replica.
 | 
			
		||||
	tc := testCase{
 | 
			
		||||
		minReplicas:             2,
 | 
			
		||||
		maxReplicas:             6,
 | 
			
		||||
		specReplicas:            3,
 | 
			
		||||
		statusReplicas:          3,
 | 
			
		||||
		expectedDesiredReplicas: 3,
 | 
			
		||||
		metricsTarget: []autoscalingv2.MetricSpec{{
 | 
			
		||||
			Type: autoscalingv2.ContainerResourceMetricSourceType,
 | 
			
		||||
			ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
 | 
			
		||||
				Name: v1.ResourceCPU,
 | 
			
		||||
				Target: autoscalingv2.MetricTarget{
 | 
			
		||||
					Type:               autoscalingv2.UtilizationMetricType,
 | 
			
		||||
					AverageUtilization: pointer.Int32(30),
 | 
			
		||||
				},
 | 
			
		||||
				Container: "container1",
 | 
			
		||||
			},
 | 
			
		||||
		}},
 | 
			
		||||
		reportedLevels:      []uint64{300, 500, 700},
 | 
			
		||||
		reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
 | 
			
		||||
		useMetricsAPI:       true,
 | 
			
		||||
		expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
 | 
			
		||||
			Type:   autoscalingv2.AbleToScale,
 | 
			
		||||
			Status: v1.ConditionTrue,
 | 
			
		||||
			Reason: "ReadyForNewScale",
 | 
			
		||||
		}),
 | 
			
		||||
	}
 | 
			
		||||
	tc.runTest(t)
 | 
			
		||||
}
 | 
			
		||||
@@ -1361,6 +1396,7 @@ func TestScaleDownContainerResource(t *testing.T) {
 | 
			
		||||
		}},
 | 
			
		||||
		useMetricsAPI:                   true,
 | 
			
		||||
		recommendations:                 []timestampedRecommendation{},
 | 
			
		||||
		containerResourceMetricsEnabled: true,
 | 
			
		||||
	}
 | 
			
		||||
	tc.runTest(t)
 | 
			
		||||
}
 | 
			
		||||
@@ -4536,6 +4572,7 @@ func TestMultipleHPAs(t *testing.T) {
 | 
			
		||||
		defaultTestingTolerance,
 | 
			
		||||
		defaultTestingCPUInitializationPeriod,
 | 
			
		||||
		defaultTestingDelayOfInitialReadinessStatus,
 | 
			
		||||
		false,
 | 
			
		||||
	)
 | 
			
		||||
	hpaController.scaleUpEvents = scaleUpEventsMap
 | 
			
		||||
	hpaController.scaleDownEvents = scaleDownEventsMap
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user