mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2026-01-07 15:51:45 +00:00
Merge pull request #108441 from pacoxu/pod-overload-ga
mark PodOverhead to GA in v1.24; remove in v1.26
This commit is contained in:
@@ -571,11 +571,6 @@ func dropDisabledFields(
|
||||
})
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) && !overheadInUse(oldPodSpec) {
|
||||
// Set Overhead to nil only if the feature is disabled and it is not used
|
||||
podSpec.Overhead = nil
|
||||
}
|
||||
|
||||
dropDisabledProcMountField(podSpec, oldPodSpec)
|
||||
|
||||
dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
@@ -659,17 +654,6 @@ func ephemeralContainersInUse(podSpec *api.PodSpec) bool {
|
||||
return len(podSpec.EphemeralContainers) > 0
|
||||
}
|
||||
|
||||
// overheadInUse returns true if the pod spec is non-nil and has Overhead set
|
||||
func overheadInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
if podSpec.Overhead != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// procMountInUse returns true if the pod spec is non-nil and has a SecurityContext's ProcMount field set to a non-default value
|
||||
func procMountInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
|
||||
@@ -71,7 +71,7 @@ func PodRequestsAndLimitsReuse(pod *v1.Pod, reuseReqs, reuseLimits v1.ResourceLi
|
||||
|
||||
// if PodOverhead feature is supported, add overhead for running a pod
|
||||
// to the sum of requests and to non-zero limits:
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if pod.Spec.Overhead != nil {
|
||||
addResourceList(reqs, pod.Spec.Overhead)
|
||||
|
||||
for name, quantity := range pod.Spec.Overhead {
|
||||
@@ -152,7 +152,7 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
|
||||
// if PodOverhead feature is supported, add overhead for running a pod
|
||||
// to the total requests if the resource total is non-zero
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if pod.Spec.Overhead != nil {
|
||||
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
|
||||
requestQuantity.Add(podOverhead)
|
||||
}
|
||||
|
||||
@@ -2944,7 +2944,6 @@ type PodSpec struct {
|
||||
// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
|
||||
// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
|
||||
// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead
|
||||
// This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
|
||||
// +optional
|
||||
Overhead ResourceList
|
||||
// EnableServiceLinks indicates whether information about services should be injected into pod's
|
||||
|
||||
@@ -50,8 +50,6 @@ type RuntimeClass struct {
|
||||
// Overhead represents the resource overhead associated with running a pod for a
|
||||
// given RuntimeClass. For more details, see
|
||||
// https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
|
||||
// This field is beta-level as of Kubernetes v1.18, and is only honored by servers
|
||||
// that enable the PodOverhead feature.
|
||||
// +optional
|
||||
Overhead *Overhead
|
||||
|
||||
|
||||
@@ -391,6 +391,7 @@ const (
|
||||
// owner: @egernst
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
// ga: v1.24
|
||||
//
|
||||
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
|
||||
PodOverhead featuregate.Feature = "PodOverhead"
|
||||
@@ -893,7 +894,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||
NonPreemptingPriority: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
PodOverhead: {Default: true, PreRelease: featuregate.Beta},
|
||||
PodOverhead: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
IPv6DualStack: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
EndpointSlice: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
EndpointSliceProxying: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
|
||||
8
pkg/generated/openapi/zz_generated.openapi.go
generated
8
pkg/generated/openapi/zz_generated.openapi.go
generated
@@ -22081,7 +22081,7 @@ func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenA
|
||||
},
|
||||
"overhead": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.",
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
@@ -34564,7 +34564,7 @@ func schema_k8sio_api_node_v1_RuntimeClass(ref common.ReferenceCallback) common.
|
||||
},
|
||||
"overhead": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/\nThis field is in beta starting v1.18 and is only honored by servers that enable the PodOverhead feature.",
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/",
|
||||
Ref: ref("k8s.io/api/node/v1.Overhead"),
|
||||
},
|
||||
},
|
||||
@@ -34831,7 +34831,7 @@ func schema_k8sio_api_node_v1alpha1_RuntimeClassSpec(ref common.ReferenceCallbac
|
||||
},
|
||||
"overhead": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.",
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
||||
Ref: ref("k8s.io/api/node/v1alpha1.Overhead"),
|
||||
},
|
||||
},
|
||||
@@ -34973,7 +34973,7 @@ func schema_k8sio_api_node_v1beta1_RuntimeClass(ref common.ReferenceCallback) co
|
||||
},
|
||||
"overhead": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.",
|
||||
Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
|
||||
Ref: ref("k8s.io/api/node/v1beta1.Overhead"),
|
||||
},
|
||||
},
|
||||
|
||||
@@ -21,15 +21,13 @@ package kuberuntime
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
||||
resources := &runtimeapi.LinuxContainerResources{}
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if pod.Spec.Overhead != nil {
|
||||
cpu := pod.Spec.Overhead.Cpu()
|
||||
memory := pod.Spec.Overhead.Memory()
|
||||
|
||||
|
||||
@@ -30,12 +30,10 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@@ -369,10 +367,9 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
|
||||
limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
|
||||
}
|
||||
|
||||
if feature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
requests = quota.Add(requests, pod.Spec.Overhead)
|
||||
limits = quota.Add(limits, pod.Spec.Overhead)
|
||||
}
|
||||
requests = quota.Add(requests, pod.Spec.Overhead)
|
||||
limits = quota.Add(limits, pod.Spec.Overhead)
|
||||
|
||||
result = quota.Add(result, podComputeUsageHelper(requests, limits))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -27,10 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/utils/clock"
|
||||
testingclock "k8s.io/utils/clock/testing"
|
||||
@@ -154,9 +151,8 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
deletionTimestampNotPastGracePeriod := metav1.NewTime(fakeClock.Now())
|
||||
|
||||
testCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
usage corev1.ResourceList
|
||||
podOverheadEnabled bool
|
||||
pod *api.Pod
|
||||
usage corev1.ResourceList
|
||||
}{
|
||||
"init container CPU": {
|
||||
pod: &api.Pod{
|
||||
@@ -525,41 +521,11 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
corev1.ResourceCPU: resource.MustParse("2"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
podOverheadEnabled: true,
|
||||
},
|
||||
"do not count pod overhead as usage with pod overhead disabled": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Overhead: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
},
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("1"),
|
||||
},
|
||||
Limits: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsCPU: resource.MustParse("1"),
|
||||
corev1.ResourceLimitsCPU: resource.MustParse("2"),
|
||||
corev1.ResourcePods: resource.MustParse("1"),
|
||||
corev1.ResourceCPU: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
podOverheadEnabled: false,
|
||||
},
|
||||
}
|
||||
t.Parallel()
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodOverhead, testCase.podOverheadEnabled)()
|
||||
actual, err := evaluator.Usage(testCase.pod)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
|
||||
@@ -23,11 +23,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/node"
|
||||
"k8s.io/kubernetes/pkg/apis/node/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// strategy implements verification logic for RuntimeClass.
|
||||
@@ -58,12 +56,6 @@ func (strategy) AllowCreateOnUpdate() bool {
|
||||
// PrepareForCreate clears fields that are not allowed to be set by end users
|
||||
// on creation.
|
||||
func (strategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
rc := obj.(*node.RuntimeClass)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) && rc != nil {
|
||||
// Set Overhead to nil only if the feature is disabled and it is not used
|
||||
rc.Overhead = nil
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareForUpdate clears fields that are not allowed to be set by end users on update.
|
||||
|
||||
@@ -25,14 +25,12 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/dynamic/dynamicinformer"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
corev1nodeaffinity "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
@@ -491,7 +489,7 @@ func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck {
|
||||
// returns all failures.
|
||||
func AdmissionCheck(pod *v1.Pod, nodeInfo *framework.NodeInfo, includeAllFailures bool) []AdmissionResult {
|
||||
var admissionResults []AdmissionResult
|
||||
insufficientResources := noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead))
|
||||
insufficientResources := noderesources.Fits(pod, nodeInfo)
|
||||
if len(insufficientResources) != 0 {
|
||||
for i := range insufficientResources {
|
||||
admissionResults = append(admissionResults, AdmissionResult{InsufficientResource: &insufficientResources[i]})
|
||||
|
||||
@@ -37,9 +37,6 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
@@ -506,7 +503,6 @@ func TestAdmissionCheck(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
nodeInfo := framework.NewNodeInfo(tt.existingPods...)
|
||||
nodeInfo.SetNode(tt.node)
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ package feature
|
||||
type Features struct {
|
||||
EnablePodAffinityNamespaceSelector bool
|
||||
EnablePodDisruptionBudget bool
|
||||
EnablePodOverhead bool
|
||||
EnableReadWriteOncePod bool
|
||||
EnableVolumeCapacityPriority bool
|
||||
EnableCSIStorageCapacity bool
|
||||
|
||||
@@ -91,7 +91,6 @@ func NewBalancedAllocation(baArgs runtime.Object, h framework.Handle, fts featur
|
||||
scorer: balancedResourceScorer,
|
||||
useRequested: true,
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -376,7 +376,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
p, _ := NewBalancedAllocation(&test.args, fh, feature.Features{EnablePodOverhead: true})
|
||||
p, _ := NewBalancedAllocation(&test.args, fh, feature.Features{})
|
||||
for i := range test.nodes {
|
||||
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
|
||||
if err != nil {
|
||||
|
||||
@@ -78,7 +78,6 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
|
||||
type Fit struct {
|
||||
ignoredResources sets.String
|
||||
ignoredResourceGroups sets.String
|
||||
enablePodOverhead bool
|
||||
handle framework.Handle
|
||||
resourceAllocationScorer
|
||||
}
|
||||
@@ -126,7 +125,6 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr
|
||||
return &Fit{
|
||||
ignoredResources: sets.NewString(args.IgnoredResources...),
|
||||
ignoredResourceGroups: sets.NewString(args.IgnoredResourceGroups...),
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
handle: h,
|
||||
resourceAllocationScorer: *scorePlugin(args),
|
||||
}, nil
|
||||
@@ -137,8 +135,7 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr
|
||||
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
||||
// regular containers since they run simultaneously.
|
||||
//
|
||||
// If Pod Overhead is specified and the feature gate is set, the resources defined for Overhead
|
||||
// are added to the calculated Resource request sum
|
||||
// The resources defined for Overhead should be added to the calculated Resource request sum
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
@@ -159,7 +156,7 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr
|
||||
// Memory: 1G
|
||||
//
|
||||
// Result: CPU: 3, Memory: 3G
|
||||
func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterState {
|
||||
func computePodResourceRequest(pod *v1.Pod) *preFilterState {
|
||||
result := &preFilterState{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Requests)
|
||||
@@ -171,7 +168,7 @@ func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterSt
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && enablePodOverhead {
|
||||
if pod.Spec.Overhead != nil {
|
||||
result.Add(pod.Spec.Overhead)
|
||||
}
|
||||
return result
|
||||
@@ -179,7 +176,7 @@ func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterSt
|
||||
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
|
||||
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod, f.enablePodOverhead))
|
||||
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -248,8 +245,8 @@ type InsufficientResource struct {
|
||||
}
|
||||
|
||||
// Fits checks if node have enough resources to host the pod.
|
||||
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, enablePodOverhead bool) []InsufficientResource {
|
||||
return fitsRequest(computePodResourceRequest(pod, enablePodOverhead), nodeInfo, nil, nil)
|
||||
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) []InsufficientResource {
|
||||
return fitsRequest(computePodResourceRequest(pod), nodeInfo, nil, nil)
|
||||
}
|
||||
|
||||
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.String) []InsufficientResource {
|
||||
|
||||
@@ -472,7 +472,7 @@ func TestEnoughRequests(t *testing.T) {
|
||||
test.args.ScoringStrategy = defaultScoringStrategy
|
||||
}
|
||||
|
||||
p, err := NewFit(&test.args, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&test.args, nil, plfeature.Features{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -487,7 +487,7 @@ func TestEnoughRequests(t *testing.T) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
|
||||
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod, true), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
|
||||
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
|
||||
if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
|
||||
t.Errorf("insufficient resources do not match: %+v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
|
||||
}
|
||||
@@ -500,7 +500,7 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
node := v1.Node{}
|
||||
nodeInfo.SetNode(&node)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -550,7 +550,7 @@ func TestNotEnoughRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -622,7 +622,7 @@ func TestStorageRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -759,7 +759,7 @@ func TestFitScore(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
args := test.nodeResourcesFitArgs
|
||||
p, err := NewFit(&args, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&args, fh, plfeature.Features{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
@@ -384,7 +384,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
Type: config.LeastAllocated,
|
||||
Resources: test.resources,
|
||||
},
|
||||
}, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
}, fh, plfeature.Features{})
|
||||
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
|
||||
@@ -340,7 +340,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
|
||||
Type: config.MostAllocated,
|
||||
Resources: test.resources,
|
||||
},
|
||||
}, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
}, fh, plfeature.Features{})
|
||||
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
|
||||
@@ -121,7 +121,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
|
||||
Shape: shape,
|
||||
},
|
||||
},
|
||||
}, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
}, fh, plfeature.Features{})
|
||||
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
@@ -340,7 +340,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
p, err := NewFit(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&args, fh, feature.Features{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -588,7 +588,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
p, err := NewFit(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&args, fh, feature.Features{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
@@ -38,8 +38,6 @@ type resourceAllocationScorer struct {
|
||||
useRequested bool
|
||||
scorer func(requested, allocable resourceToValueMap) int64
|
||||
resourceToWeightMap resourceToWeightMap
|
||||
|
||||
enablePodOverhead bool
|
||||
}
|
||||
|
||||
// resourceToValueMap is keyed with resource name and valued with quantity.
|
||||
@@ -129,7 +127,7 @@ func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, reso
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && r.enablePodOverhead {
|
||||
if pod.Spec.Overhead != nil {
|
||||
if quantity, found := pod.Spec.Overhead[resource]; found {
|
||||
podRequest += quantity.Value()
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ import (
|
||||
func NewInTreeRegistry() runtime.Registry {
|
||||
fts := plfeature.Features{
|
||||
EnablePodDisruptionBudget: feature.DefaultFeatureGate.Enabled(features.PodDisruptionBudget),
|
||||
EnablePodOverhead: feature.DefaultFeatureGate.Enabled(features.PodOverhead),
|
||||
EnableReadWriteOncePod: feature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||
EnableCSIStorageCapacity: feature.DefaultFeatureGate.Enabled(features.CSIStorageCapacity),
|
||||
|
||||
@@ -736,7 +736,7 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64)
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if pod.Spec.Overhead != nil {
|
||||
resPtr.Add(pod.Spec.Overhead)
|
||||
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
||||
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
||||
|
||||
Reference in New Issue
Block a user