HPA support for pod-level resource specifications (#132430)

* HPA support for pod-level resource specifications

* Add e2e tests for HPA support for pod-level resource specifications
This commit is contained in:
Luiz Oliveira
2025-07-29 12:02:26 -04:00
committed by GitHub
parent e2ab840708
commit 7fbf63a23f
8 changed files with 339 additions and 66 deletions

View File

@@ -27,9 +27,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/util/feature"
corelisters "k8s.io/client-go/listers/core/v1"
resourcehelpers "k8s.io/component-helpers/resource"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/features"
)
const (
@@ -94,7 +97,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(ctx context.Context, currentRepl
return 0, 0, 0, time.Time{}, fmt.Errorf("did not receive metrics for targeted pods (pods might be unready)")
}
requests, err := calculatePodRequests(podList, container, resource)
requests, err := calculateRequests(podList, container, resource)
if err != nil {
return 0, 0, 0, time.Time{}, err
}
@@ -449,31 +452,76 @@ func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1
return
}
func calculatePodRequests(pods []*v1.Pod, container string, resource v1.ResourceName) (map[string]int64, error) {
// calculateRequests computes the request value for each pod for the specified
// resource.
// If container is non-empty, it uses the request of that specific container.
// If container is empty, it uses pod-level requests if pod-level requests are
// set on the pod. Otherwise, it sums the requests of all containers in the pod
// (including restartable init containers).
// It returns a map of pod names to their calculated request values.
func calculateRequests(pods []*v1.Pod, container string, resource v1.ResourceName) (map[string]int64, error) {
podLevelResourcesEnabled := feature.DefaultFeatureGate.Enabled(features.PodLevelResources)
requests := make(map[string]int64, len(pods))
for _, pod := range pods {
podSum := int64(0)
// Calculate all regular containers and restartable init containers requests.
containers := append([]v1.Container{}, pod.Spec.Containers...)
for _, c := range pod.Spec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
containers = append(containers, c)
}
var request int64
var err error
// Determine if we should use pod-level requests: see KEP-2837
// https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/2837-pod-level-resource-spec/README.md
usePodLevelRequests := podLevelResourcesEnabled &&
resourcehelpers.IsPodLevelRequestsSet(pod) &&
// If a container name is specified in the HPA, it takes precedence over
// the pod-level requests.
container == ""
if usePodLevelRequests {
request, err = calculatePodLevelRequests(pod, resource)
} else {
request, err = calculatePodRequestsFromContainers(pod, container, resource)
}
for _, c := range containers {
if container == "" || container == c.Name {
if containerRequest, ok := c.Resources.Requests[resource]; ok {
podSum += containerRequest.MilliValue()
} else {
return nil, fmt.Errorf("missing request for %s in container %s of Pod %s", resource, c.Name, pod.ObjectMeta.Name)
}
}
if err != nil {
return nil, err
}
requests[pod.Name] = podSum
requests[pod.Name] = request
}
return requests, nil
}
// calculatePodLevelRequests computes the requests for the specific resource at
// the pod level.
func calculatePodLevelRequests(pod *v1.Pod, resource v1.ResourceName) (int64, error) {
podLevelRequests := resourcehelpers.PodRequests(pod, resourcehelpers.PodResourcesOptions{})
podRequest, ok := podLevelRequests[resource]
if !ok {
return 0, fmt.Errorf("missing pod-level request for %s in Pod %s", resource, pod.Name)
}
return podRequest.MilliValue(), nil
}
// calculatePodRequestsFromContainers computes the requests for the specified
// resource by summing requests from all containers in the pod.
// If a container name is specified, it uses only that container.
func calculatePodRequestsFromContainers(pod *v1.Pod, container string, resource v1.ResourceName) (int64, error) {
// Calculate all regular containers and restartable init containers requests.
containers := append([]v1.Container{}, pod.Spec.Containers...)
for _, c := range pod.Spec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
containers = append(containers, c)
}
}
request := int64(0)
for _, c := range containers {
if container == "" || container == c.Name {
containerRequest, ok := c.Resources.Requests[resource]
if !ok {
return 0, fmt.Errorf("missing request for %s in container %s of Pod %s", resource, c.Name, pod.Name)
}
request += containerRequest.MilliValue()
}
}
return request, nil
}
func removeMetricsForPods(metrics metricsclient.PodMetricsInfo, pods sets.Set[string]) {
for _, pod := range pods.UnsortedList() {
delete(metrics, pod)

View File

@@ -31,13 +31,16 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/controller"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/features"
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
@@ -2225,17 +2228,18 @@ func TestGroupPods(t *testing.T) {
}
}
func TestCalculatePodRequests(t *testing.T) {
func TestCalculateRequests(t *testing.T) {
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
testPod := "test-pod"
tests := []struct {
name string
pods []*v1.Pod
container string
resource v1.ResourceName
expectedRequests map[string]int64
expectedError error
name string
pods []*v1.Pod
container string
resource v1.ResourceName
enablePodLevelResources bool
expectedRequests map[string]int64
expectedError error
}{
{
name: "void",
@@ -2246,7 +2250,7 @@ func TestCalculatePodRequests(t *testing.T) {
expectedError: nil,
},
{
name: "pod with regular containers",
name: "Sum container requests if pod-level feature is disabled",
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
@@ -2265,7 +2269,93 @@ func TestCalculatePodRequests(t *testing.T) {
expectedError: nil,
},
{
name: "calculate requests with special container",
name: "Pod-level resources are enabled, but not set: fallback to sum container requests",
enablePodLevelResources: true,
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
Namespace: testNamespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}},
{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
},
},
}},
container: "",
resource: v1.ResourceCPU,
expectedRequests: map[string]int64{testPod: 150},
expectedError: nil,
},
{
name: "Pod-level resources override container requests when feature enabled and pod resources specified",
enablePodLevelResources: true,
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
Namespace: testNamespace,
},
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(800, resource.DecimalSI)},
},
Containers: []v1.Container{
{Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}},
{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
},
},
}},
container: "",
resource: v1.ResourceCPU,
expectedRequests: map[string]int64{testPod: 800},
expectedError: nil,
},
{
name: "Fail if at least one of the containers is missing requests and pod-level feature/requests are not set",
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
Namespace: testNamespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "container1"},
{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
},
},
}},
container: "",
resource: v1.ResourceCPU,
expectedRequests: nil,
expectedError: fmt.Errorf("missing request for %s in container %s of Pod %s", v1.ResourceCPU, "container1", testPod),
},
{
name: "Pod-level resources override missing container requests when feature enabled and pod resources specified",
enablePodLevelResources: true,
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
Namespace: testNamespace,
},
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(800, resource.DecimalSI)},
},
Containers: []v1.Container{
{Name: "container1"},
{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
},
},
}},
container: "",
resource: v1.ResourceCPU,
expectedRequests: map[string]int64{testPod: 800},
expectedError: nil,
},
{
name: "Container: if a container name is specified, calculate requests only for that container",
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
@@ -2284,22 +2374,27 @@ func TestCalculatePodRequests(t *testing.T) {
expectedError: nil,
},
{
name: "container missing requests",
name: "Container: if a container name is specified, calculate requests only for that container and ignore pod-level requests",
enablePodLevelResources: true,
pods: []*v1.Pod{{
ObjectMeta: metav1.ObjectMeta{
Name: testPod,
Namespace: testNamespace,
},
Spec: v1.PodSpec{
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(800, resource.DecimalSI)},
},
Containers: []v1.Container{
{Name: "container1"},
{Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}},
{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
},
},
}},
container: "",
container: "container1",
resource: v1.ResourceCPU,
expectedRequests: nil,
expectedError: fmt.Errorf("missing request for %s in container %s of Pod %s", v1.ResourceCPU, "container1", testPod),
expectedRequests: map[string]int64{testPod: 100},
expectedError: nil,
},
{
name: "pod with restartable init containers",
@@ -2327,7 +2422,9 @@ func TestCalculatePodRequests(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
requests, err := calculatePodRequests(tc.pods, tc.container, tc.resource)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, tc.enablePodLevelResources)
requests, err := calculateRequests(tc.pods, tc.container, tc.resource)
assert.Equal(t, tc.expectedRequests, requests, "requests should be as expected")
assert.Equal(t, tc.expectedError, err, "error should be as expected")
})

View File

@@ -86,7 +86,7 @@ var _ = SIGDescribe(feature.ClusterSizeAutoscalingScaleUp, framework.WithSlow(),
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer(ctx, "resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer(ctx, "resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, nil)
ginkgo.DeferCleanup(resourceConsumer.CleanUp)
resourceConsumer.WaitForReplicas(ctx, replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.

View File

@@ -25,7 +25,9 @@ import (
autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
@@ -67,6 +69,18 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (scale resource: CP
})
})
f.Describe("Deployment (Pod-level Resources Resource Metric)", framework.WithFeatureGate(features.PodLevelResources), func() {
f.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUpPodLevelResources(ctx, "test-deployment-pod-level", e2eautoscaling.KindDeployment, autoscalingv2.ResourceMetricSourceType, f)
})
})
f.Describe("Deployment (Pod-level Resources ContainerResource Metric)", framework.WithFeatureGate(features.PodLevelResources), func() {
f.It(titleUp+titleAverageUtilization, func(ctx context.Context) {
scaleUpPodLevelResources(ctx, "test-deployment-pod-level", e2eautoscaling.KindDeployment, autoscalingv2.ContainerResourceMetricSourceType, f)
})
})
f.Describe("ReplicaSet", func() {
ginkgo.It(titleUp, func(ctx context.Context) {
scaleUp(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
@@ -202,7 +216,7 @@ func (st *HPAScaleTest) run(ctx context.Context, name string, kind schema.GroupV
} else if st.resourceType == memResource {
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle, nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(ctx, rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name)
@@ -310,7 +324,7 @@ func (st *HPAContainerResourceScaleTest) run(ctx context.Context, name string, k
} else if st.resourceType == memResource {
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType)
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType, nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(ctx, rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
ginkgo.DeferCleanup(e2eautoscaling.DeleteContainerResourceHPA, rc, hpa.Name)
@@ -411,9 +425,105 @@ func doNotScaleOnBusySidecar(ctx context.Context, name string, kind schema.Group
st.run(ctx, name, kind, f)
}
// HPAPodResourceScaleTest is a struct that defines the parameters for
// a pod-level resource scaling test.
type HPAPodResourceScaleTest struct {
initPods int
initCPUTotal int
metricSourceType autoscalingv2.MetricSourceType
perPodRequests *v1.ResourceRequirements
perContainerCPURequest int64
perContainerMemRequest int64
targetValue int32
minPods int32
maxPods int32
firstScale int
cpuBurst int
secondScale int32
}
// run executes the HPA pod-level resource scaling test.
// It creates a resource consumer and an HPA, then verifies that the number of
// replicas scales up to the expected number of pods based on the initial CPU
// consumption.
// It also optionally verifies a second scaling event based on a CPU burst.
func (st *HPAPodResourceScaleTest) run(ctx context.Context, name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute
resourceType := cpuResource
rc := e2eautoscaling.NewDynamicResourceConsumer(ctx, name, f.Namespace.Name,
kind, st.initPods, st.initCPUTotal, 0, 0, st.perContainerCPURequest,
st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable,
e2eautoscaling.Idle, st.perPodRequests)
ginkgo.DeferCleanup(rc.CleanUp)
createHPAFn := e2eautoscaling.CreateResourceHorizontalPodAutoscaler
if st.metricSourceType == autoscalingv2.ContainerResourceMetricSourceType {
createHPAFn = e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler
}
hpa := createHPAFn(ctx, rc, resourceType, utilizationMetricType,
st.targetValue, st.minPods, st.maxPods)
ginkgo.DeferCleanup(e2eautoscaling.DeleteHorizontalPodAutoscaler, rc, hpa.Name)
rc.WaitForReplicas(ctx, st.firstScale, timeToWait)
if st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(ctx, int(st.secondScale), timeToWait)
}
}
// scaleUpPodLevelResources configures and runs a test that scales up a workload
// that has pod-level resources set based on a Utilization metric type HPA.
// It also handles the case where the metric source is ContainerResource,
// adjusting the parameters accordingly.
func scaleUpPodLevelResources(ctx context.Context, name string, kind schema.GroupVersionKind, metricSourceType autoscalingv2.MetricSourceType, f *framework.Framework) {
st := &HPAPodResourceScaleTest{
metricSourceType: metricSourceType,
perPodRequests: resourceRequirements(500, 500),
perContainerCPURequest: 0,
perContainerMemRequest: 0,
initCPUTotal: 250,
cpuBurst: 700,
targetValue: 20,
minPods: 1,
maxPods: 5,
initPods: 1,
firstScale: 3,
secondScale: 5,
}
if metricSourceType == autoscalingv2.ContainerResourceMetricSourceType {
// When pod-level resources are set and the HPA is configured on
// ContainerResource metric type, HPA considers the target container
// resource requests, instead of the pod-level resources during
// calculations.
// The values below make sure that HPA is autoscaling based on
// perContainerCPURequest instead of perPodRequests (HPA would not scale
// up if it was considering perPodRequests).
st.perContainerCPURequest = 250
st.perContainerMemRequest = 250
st.initCPUTotal = 125
st.cpuBurst = 350
}
st.run(ctx, name, kind, f)
}
func getTargetValueByType(averageValueTarget, averageUtilizationTarget int, targetType autoscalingv2.MetricTargetType) int32 {
if targetType == utilizationMetricType {
return int32(averageUtilizationTarget)
}
return int32(averageValueTarget)
}
func resourceRequirements(cpuMillis, memMb int64) *v1.ResourceRequirements {
return &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cpuMillis, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memMb*1024*1024, resource.BinarySI), // ResourceMemory is in bytes
},
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(cpuMillis, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(memMb*1024*1024, resource.BinarySI), // ResourceMemory is in bytes
},
}
}

View File

@@ -69,7 +69,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -110,7 +110,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -149,7 +149,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -185,7 +185,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -227,7 +227,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -269,7 +269,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -313,7 +313,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -356,7 +356,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -403,7 +403,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
@@ -455,7 +455,7 @@ var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (non-default behavi
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
@@ -516,7 +516,7 @@ var _ = SIGDescribe(feature.HPAConfigurableTolerance, framework.WithFeatureGate(
hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
nil)
ginkgo.DeferCleanup(rc.CleanUp)
scaleRule := e2eautoscaling.HPAScalingRuleWithToleranceMilli(10000)

View File

@@ -131,9 +131,9 @@ type ResourceConsumer struct {
}
// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer
func NewDynamicResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, enableSidecar SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer {
func NewDynamicResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, enableSidecar SidecarStatusType, sidecarType SidecarWorkloadType, podResources *v1.ResourceRequirements) *ResourceConsumer {
return newResourceConsumer(ctx, name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil, enableSidecar, sidecarType)
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil, enableSidecar, sidecarType, podResources)
}
// getSidecarContainer returns sidecar container
@@ -171,7 +171,7 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/
func newResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string, sidecarStatus SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer {
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string, sidecarStatus SidecarStatusType, sidecarType SidecarWorkloadType, podResources *v1.ResourceRequirements) *ResourceConsumer {
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
@@ -194,7 +194,7 @@ func newResourceConsumer(ctx context.Context, name, nsName string, kind schema.G
framework.ExpectNoError(err)
resourceClient := dynamicClient.Resource(schema.GroupVersionResource{Group: crdGroup, Version: crdVersion, Resource: crdNamePlural}).Namespace(nsName)
runServiceAndWorkloadForResourceConsumer(ctx, clientset, resourceClient, apiExtensionClient, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations, additionalContainers)
runServiceAndWorkloadForResourceConsumer(ctx, clientset, resourceClient, apiExtensionClient, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations, additionalContainers, podResources)
controllerName := name + "-ctrl"
// If sidecar is enabled and busy, run service and consumer for sidecar
if sidecarStatus == Enable && sidecarType == Busy {
@@ -617,7 +617,7 @@ func runServiceAndSidecarForResourceConsumer(ctx context.Context, c clientset.In
ctx, c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.Interface, resourceClient dynamic.ResourceInterface, apiExtensionClient crdclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string, additionalContainers []v1.Container) {
func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.Interface, resourceClient dynamic.ResourceInterface, apiExtensionClient crdclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string, additionalContainers []v1.Container, podResources *v1.ResourceRequirements) {
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := createService(ctx, c, name, ns, serviceAnnotations, map[string]string{"name": name}, port, targetPort)
framework.ExpectNoError(err)
@@ -629,13 +629,16 @@ func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.I
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuLimitMillis,
CpuLimit: cpuLimitMillis,
CPURequest: cpuLimitMillis,
CPULimit: cpuLimitMillis,
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
MemLimit: memLimitMb * 1024 * 1024,
Annotations: podAnnotations,
AdditionalContainers: additionalContainers,
}
if podResources != nil {
rcConfig.PodResources = podResources.DeepCopy()
}
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,

View File

@@ -53,7 +53,9 @@ func (t *HPAUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
f.ClientSet,
f.ScalesGetter,
e2eautoscaling.Disable,
e2eautoscaling.Idle)
e2eautoscaling.Idle,
nil,
)
t.hpa = e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(ctx,
t.rc,
20, /* targetCPUUtilizationPercent */

View File

@@ -119,11 +119,12 @@ type RCConfig struct {
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
GpuLimit int64 // count
CPURequest int64 // millicores
CPULimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
GpuLimit int64 // count
PodResources *v1.ResourceRequirements // Pod-level resources
ReadinessProbe *v1.Probe
DNSPolicy *v1.DNSPolicy
PriorityClassName string
@@ -331,6 +332,10 @@ func (config *DeploymentConfig) create() error {
},
}
if config.PodResources != nil {
deployment.Spec.Template.Spec.Resources = config.PodResources.DeepCopy()
}
if len(config.AdditionalContainers) > 0 {
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
@@ -402,6 +407,10 @@ func (config *ReplicaSetConfig) create() error {
},
}
if config.PodResources != nil {
rs.Spec.Template.Spec.Resources = config.PodResources.DeepCopy()
}
if len(config.AdditionalContainers) > 0 {
rs.Spec.Template.Spec.Containers = append(rs.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
@@ -478,6 +487,10 @@ func (config *RCConfig) create() error {
},
}
if config.PodResources != nil {
rc.Spec.Template.Spec.Resources = config.PodResources.DeepCopy()
}
if len(config.AdditionalContainers) > 0 {
rc.Spec.Template.Spec.Containers = append(rc.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
@@ -521,20 +534,20 @@ func (config *RCConfig) applyTo(template *v1.PodTemplateSpec) {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, v1.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
if config.CpuLimit > 0 || config.MemLimit > 0 || config.GpuLimit > 0 {
if config.CPULimit > 0 || config.MemLimit > 0 || config.GpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
if config.CPULimit > 0 {
template.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CPULimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
if config.CPURequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = v1.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
if config.CPURequest > 0 {
template.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(config.CPURequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)