mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 11:18:16 +00:00
Resolved latest review comments
This commit is contained in:
@@ -1092,7 +1092,8 @@ func inPlacePodVerticalScalingInUse(podSpec *api.PodSpec) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
var inUse bool
|
var inUse bool
|
||||||
VisitContainers(podSpec, Containers, func(c *api.Container, containerType ContainerType) bool {
|
containersMask := Containers | InitContainers
|
||||||
|
VisitContainers(podSpec, containersMask, func(c *api.Container, containerType ContainerType) bool {
|
||||||
if len(c.ResizePolicy) > 0 {
|
if len(c.ResizePolicy) > 0 {
|
||||||
inUse = true
|
inUse = true
|
||||||
return false
|
return false
|
||||||
@@ -1297,7 +1298,7 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
|
|||||||
if c.Name != oldPod.Spec.Containers[i].Name {
|
if c.Name != oldPod.Spec.Containers[i].Name {
|
||||||
return // Update is invalid (container mismatch): let validation handle it.
|
return // Update is invalid (container mismatch): let validation handle it.
|
||||||
}
|
}
|
||||||
if c.Resources.Requests == nil || cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) {
|
if apiequality.Semantic.DeepEqual(oldPod.Spec.Containers[i].Resources, c.Resources) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newPod.Status.Resize = api.PodResizeStatusProposed
|
newPod.Status.Resize = api.PodResizeStatusProposed
|
||||||
@@ -1310,7 +1311,7 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
|
|||||||
if c.Name != oldPod.Spec.InitContainers[i].Name {
|
if c.Name != oldPod.Spec.InitContainers[i].Name {
|
||||||
return // Update is invalid (container mismatch): let validation handle it.
|
return // Update is invalid (container mismatch): let validation handle it.
|
||||||
}
|
}
|
||||||
if c.Resources.Requests == nil || cmp.Equal(oldPod.Spec.InitContainers[i].Resources, c.Resources) {
|
if apiequality.Semantic.DeepEqual(oldPod.Spec.InitContainers[i].Resources, c.Resources) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newPod.Status.Resize = api.PodResizeStatusProposed
|
newPod.Status.Resize = api.PodResizeStatusProposed
|
||||||
|
|||||||
@@ -2950,13 +2950,13 @@ func TestDropSidecarContainers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMarkPodProposedForResize(t *testing.T) {
|
func TestMarkPodProposedForResize(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
containerRestartPolicyAlways := api.ContainerRestartPolicyAlways
|
containerRestartPolicyAlways := api.ContainerRestartPolicyAlways
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
desc string
|
desc string
|
||||||
newPodSpec api.PodSpec
|
newPodSpec api.PodSpec
|
||||||
oldPodSpec api.PodSpec
|
oldPodSpec api.PodSpec
|
||||||
expectProposedResize bool
|
expectProposedResize bool
|
||||||
hasSidecarContainer bool
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "nil requests",
|
desc: "nil requests",
|
||||||
@@ -3211,8 +3211,7 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
expectProposedResize: false,
|
expectProposedResize: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "resources unchanged with sidecar containers",
|
desc: "resources unchanged with sidecar containers",
|
||||||
hasSidecarContainer: true,
|
|
||||||
newPodSpec: api.PodSpec{
|
newPodSpec: api.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3262,8 +3261,7 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
expectProposedResize: false,
|
expectProposedResize: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "requests resized with sidecar containers",
|
desc: "requests resized with sidecar containers",
|
||||||
hasSidecarContainer: true,
|
|
||||||
newPodSpec: api.PodSpec{
|
newPodSpec: api.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3274,14 +3272,6 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "c2",
|
|
||||||
Image: "image",
|
|
||||||
Resources: api.ResourceRequirements{
|
|
||||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
|
||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
InitContainers: []api.Container{
|
InitContainers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3305,14 +3295,6 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "c2",
|
|
||||||
Image: "image",
|
|
||||||
Resources: api.ResourceRequirements{
|
|
||||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
|
||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
InitContainers: []api.Container{
|
InitContainers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3329,8 +3311,7 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
expectProposedResize: true,
|
expectProposedResize: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "limits resized with sidecar containers",
|
desc: "limits resized with sidecar containers",
|
||||||
hasSidecarContainer: true,
|
|
||||||
newPodSpec: api.PodSpec{
|
newPodSpec: api.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3341,14 +3322,6 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "c2",
|
|
||||||
Image: "image",
|
|
||||||
Resources: api.ResourceRequirements{
|
|
||||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
|
||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
InitContainers: []api.Container{
|
InitContainers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3372,14 +3345,6 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "c2",
|
|
||||||
Image: "image",
|
|
||||||
Resources: api.ResourceRequirements{
|
|
||||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
|
||||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
InitContainers: []api.Container{
|
InitContainers: []api.Container{
|
||||||
{
|
{
|
||||||
@@ -3395,6 +3360,102 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectProposedResize: true,
|
expectProposedResize: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "requests resized should fail with non-sidecar init container",
|
||||||
|
newPodSpec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "i1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
oldPodSpec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "i1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectProposedResize: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "limits resized should fail with non-sidecar init containers",
|
||||||
|
newPodSpec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "i1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
oldPodSpec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "i1",
|
||||||
|
Image: "image",
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||||
|
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectProposedResize: false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
desc: "the number of sidecar containers in the pod has increased; no action should be taken.",
|
desc: "the number of sidecar containers in the pod has increased; no action should be taken.",
|
||||||
newPodSpec: api.PodSpec{
|
newPodSpec: api.PodSpec{
|
||||||
@@ -3524,7 +3585,6 @@ func TestMarkPodProposedForResize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.desc, func(t *testing.T) {
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, tc.hasSidecarContainer)
|
|
||||||
newPod := &api.Pod{Spec: tc.newPodSpec}
|
newPod := &api.Pod{Spec: tc.newPodSpec}
|
||||||
newPodUnchanged := newPod.DeepCopy()
|
newPodUnchanged := newPod.DeepCopy()
|
||||||
oldPod := &api.Pod{Spec: tc.oldPodSpec}
|
oldPod := &api.Pod{Spec: tc.oldPodSpec}
|
||||||
|
|||||||
@@ -183,8 +183,7 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
|
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||||
(obj.Spec.Containers[i].Resources.Requests != nil || obj.Spec.Containers[i].Resources.Limits != nil) {
|
|
||||||
// For normal containers, set resize restart policy to default value (NotRequired), if not specified..
|
// For normal containers, set resize restart policy to default value (NotRequired), if not specified..
|
||||||
setDefaultResizePolicy(&obj.Spec.Containers[i])
|
setDefaultResizePolicy(&obj.Spec.Containers[i])
|
||||||
}
|
}
|
||||||
@@ -201,12 +200,7 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||||
isRestartableInitContainer := false
|
if obj.Spec.InitContainers[i].RestartPolicy != nil && *obj.Spec.InitContainers[i].RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||||
c := obj.Spec.InitContainers[i]
|
|
||||||
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
|
||||||
isRestartableInitContainer = true
|
|
||||||
}
|
|
||||||
if isRestartableInitContainer && (c.Resources.Requests != nil || c.Resources.Limits != nil) {
|
|
||||||
// For restartable init containers, set resize restart policy to default value (NotRequired), if not specified.
|
// For restartable init containers, set resize restart policy to default value (NotRequired), if not specified.
|
||||||
setDefaultResizePolicy(&obj.Spec.InitContainers[i])
|
setDefaultResizePolicy(&obj.Spec.InitContainers[i])
|
||||||
}
|
}
|
||||||
@@ -232,6 +226,9 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
|||||||
|
|
||||||
// setDefaultResizePolicy set resize restart policy to default value (NotRequired), if not specified.
|
// setDefaultResizePolicy set resize restart policy to default value (NotRequired), if not specified.
|
||||||
func setDefaultResizePolicy(obj *v1.Container) {
|
func setDefaultResizePolicy(obj *v1.Container) {
|
||||||
|
if obj.Resources.Requests == nil && obj.Resources.Limits == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
resizePolicySpecified := make(map[v1.ResourceName]bool)
|
resizePolicySpecified := make(map[v1.ResourceName]bool)
|
||||||
for _, p := range obj.ResizePolicy {
|
for _, p := range obj.ResizePolicy {
|
||||||
resizePolicySpecified[p.ResourceName] = true
|
resizePolicySpecified[p.ResourceName] = true
|
||||||
|
|||||||
@@ -2985,6 +2985,7 @@ func TestSetDefaultServiceInternalTrafficPolicy(t *testing.T) {
|
|||||||
func TestSetDefaultResizePolicy(t *testing.T) {
|
func TestSetDefaultResizePolicy(t *testing.T) {
|
||||||
// verify we default to NotRequired restart policy for resize when resources are specified
|
// verify we default to NotRequired restart policy for resize when resources are specified
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
restartAlways := v1.ContainerRestartPolicyAlways
|
restartAlways := v1.ContainerRestartPolicyAlways
|
||||||
for desc, tc := range map[string]struct {
|
for desc, tc := range map[string]struct {
|
||||||
testContainer v1.Container
|
testContainer v1.Container
|
||||||
@@ -3184,14 +3185,12 @@ func TestSetDefaultResizePolicy(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
for _, isSidecarContainer := range []bool{true, false} {
|
for _, isSidecarContainer := range []bool{true, false} {
|
||||||
t.Run(desc, func(t *testing.T) {
|
t.Run(desc, func(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, isSidecarContainer)
|
testPod := v1.Pod{}
|
||||||
if isSidecarContainer {
|
if isSidecarContainer {
|
||||||
tc.testContainer.RestartPolicy = &restartAlways
|
tc.testContainer.RestartPolicy = &restartAlways
|
||||||
}
|
|
||||||
testPod := v1.Pod{}
|
|
||||||
testPod.Spec.Containers = append(testPod.Spec.Containers, tc.testContainer)
|
|
||||||
if isSidecarContainer {
|
|
||||||
testPod.Spec.InitContainers = append(testPod.Spec.InitContainers, tc.testContainer)
|
testPod.Spec.InitContainers = append(testPod.Spec.InitContainers, tc.testContainer)
|
||||||
|
} else {
|
||||||
|
testPod.Spec.Containers = append(testPod.Spec.Containers, tc.testContainer)
|
||||||
}
|
}
|
||||||
output := roundTrip(t, runtime.Object(&testPod))
|
output := roundTrip(t, runtime.Object(&testPod))
|
||||||
pod2 := output.(*v1.Pod)
|
pod2 := output.(*v1.Pod)
|
||||||
|
|||||||
@@ -2812,47 +2812,6 @@ func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isPodResizeInProgress(pod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
|
|
||||||
for i := range pod.Spec.Containers {
|
|
||||||
if containerResourcesChanged(&pod.Spec.Containers[i], podStatus) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
|
||||||
for i, c := range pod.Spec.InitContainers {
|
|
||||||
if podutil.IsRestartableInitContainer(&c) {
|
|
||||||
if containerResourcesChanged(&pod.Spec.InitContainers[i], podStatus) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func containerResourcesChanged(c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
|
|
||||||
if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil {
|
|
||||||
if cs.State != kubecontainer.ContainerStateRunning || cs.Resources == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if c.Resources.Requests != nil {
|
|
||||||
if cs.Resources.CPURequest != nil && !cs.Resources.CPURequest.Equal(*c.Resources.Requests.Cpu()) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.Resources.Limits != nil {
|
|
||||||
if cs.Resources.CPULimit != nil && !cs.Resources.CPULimit.Equal(*c.Resources.Limits.Cpu()) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if cs.Resources.MemoryLimit != nil && !cs.Resources.MemoryLimit.Equal(*c.Resources.Limits.Memory()) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// canResizePod determines if the requested resize is currently feasible.
|
// canResizePod determines if the requested resize is currently feasible.
|
||||||
// pod should hold the desired (pre-allocated) spec.
|
// pod should hold the desired (pre-allocated) spec.
|
||||||
// Returns true if the resize can proceed.
|
// Returns true if the resize can proceed.
|
||||||
@@ -2947,6 +2906,14 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
kl.backOff.Reset(key)
|
kl.backOff.Reset(key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i, container := range pod.Spec.InitContainers {
|
||||||
|
if podutil.IsRestartableInitContainer(&container) {
|
||||||
|
if !apiequality.Semantic.DeepEqual(container.Resources, allocatedPod.Spec.InitContainers[i].Resources) {
|
||||||
|
key := kuberuntime.GetStableKey(pod, &container)
|
||||||
|
kl.backOff.Reset(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
allocatedPod = pod
|
allocatedPod = pod
|
||||||
|
|
||||||
// Special case when the updated allocation matches the actual resources. This can occur
|
// Special case when the updated allocation matches the actual resources. This can occur
|
||||||
|
|||||||
@@ -3825,6 +3825,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
|
|||||||
|
|
||||||
func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
|
func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
testContainerName := "ctr0"
|
testContainerName := "ctr0"
|
||||||
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
|
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
|
||||||
|
|
||||||
@@ -3851,10 +3852,9 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
hasSidecarContainer bool
|
oldStatus *v1.PodStatus
|
||||||
oldStatus *v1.PodStatus
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "custom resource in ResourcesAllocated, resize should be null",
|
name: "custom resource in ResourcesAllocated, resize should be null",
|
||||||
@@ -3919,8 +3919,7 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "custom resource in ResourcesAllocated in case of restartable init containers, resize should be null",
|
name: "custom resource in ResourcesAllocated in case of restartable init containers, resize should be null",
|
||||||
hasSidecarContainer: true,
|
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
UID: "1234560",
|
UID: "1234560",
|
||||||
@@ -3952,8 +3951,7 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cpu/memory resource in ResourcesAllocated in case of restartable init containers, resize should be null",
|
name: "cpu/memory resource in ResourcesAllocated in case of restartable init containers, resize should be null",
|
||||||
hasSidecarContainer: true,
|
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
UID: "1234560",
|
UID: "1234560",
|
||||||
@@ -3987,7 +3985,6 @@ func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, test.hasSidecarContainer)
|
|
||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
defer testKubelet.Cleanup()
|
defer testKubelet.Cleanup()
|
||||||
kl := testKubelet.kubelet
|
kl := testKubelet.kubelet
|
||||||
|
|||||||
@@ -3462,218 +3462,6 @@ func TestSyncPodSpans(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsPodResizeInProgress(t *testing.T) {
|
|
||||||
pod := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
UID: "12345",
|
|
||||||
Name: "test",
|
|
||||||
Namespace: "default",
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Requests: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(400, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c2",
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Requests: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(600, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(700, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(800, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
InitContainers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "c3-restartable-init",
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Requests: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
RestartPolicy: &containerRestartPolicyAlways,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c4-init",
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Requests: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(300, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
steadyStateC1Status := &kubecontainer.Status{
|
|
||||||
Name: "c1",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(400, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resizeMemC1Status := &kubecontainer.Status{
|
|
||||||
Name: "c1",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resizeCPUReqC1Status := &kubecontainer.Status{
|
|
||||||
Name: "c1",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(300, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(400, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resizeCPULimitC1Status := &kubecontainer.Status{
|
|
||||||
Name: "c1",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(600, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(400, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
steadyStateC2Status := &kubecontainer.Status{
|
|
||||||
Name: "c2",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(500, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(700, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
steadyStateC3Status := &kubecontainer.Status{
|
|
||||||
Name: "c3-restartable-init",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(500, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resizeCPUReqC3Status := &kubecontainer.Status{
|
|
||||||
Name: "c3-restartable-init",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(300, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(500, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resizeMemLimitC3Status := &kubecontainer.Status{
|
|
||||||
Name: "c3-restartable-init",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(200, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(800, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
resizeCPUReqC4Status := &kubecontainer.Status{
|
|
||||||
Name: "c4-init",
|
|
||||||
State: kubecontainer.ContainerStateRunning,
|
|
||||||
Resources: &kubecontainer.ContainerResources{
|
|
||||||
CPURequest: resource.NewMilliQuantity(300, resource.DecimalSI),
|
|
||||||
CPULimit: resource.NewMilliQuantity(400, resource.DecimalSI),
|
|
||||||
MemoryLimit: resource.NewQuantity(500, resource.DecimalSI),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
mkPodStatus := func(containerStatuses ...*kubecontainer.Status) *kubecontainer.PodStatus {
|
|
||||||
return &kubecontainer.PodStatus{
|
|
||||||
ID: pod.UID,
|
|
||||||
Name: pod.Name,
|
|
||||||
Namespace: pod.Namespace,
|
|
||||||
ContainerStatuses: containerStatuses,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
status *kubecontainer.PodStatus
|
|
||||||
expectResize bool
|
|
||||||
}{{
|
|
||||||
name: "steady state",
|
|
||||||
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, steadyStateC3Status),
|
|
||||||
expectResize: false,
|
|
||||||
}, {
|
|
||||||
name: "terminated container",
|
|
||||||
status: mkPodStatus(&kubecontainer.Status{
|
|
||||||
Name: "c1",
|
|
||||||
State: kubecontainer.ContainerStateExited,
|
|
||||||
Resources: resizeMemC1Status.Resources,
|
|
||||||
}, steadyStateC2Status),
|
|
||||||
expectResize: false,
|
|
||||||
}, {
|
|
||||||
name: "missing container",
|
|
||||||
status: mkPodStatus(steadyStateC2Status),
|
|
||||||
expectResize: false,
|
|
||||||
}, {
|
|
||||||
name: "resizing memory limit",
|
|
||||||
status: mkPodStatus(resizeMemC1Status, steadyStateC2Status),
|
|
||||||
expectResize: true,
|
|
||||||
}, {
|
|
||||||
name: "resizing cpu request",
|
|
||||||
status: mkPodStatus(resizeCPUReqC1Status, steadyStateC2Status),
|
|
||||||
expectResize: true,
|
|
||||||
}, {
|
|
||||||
name: "resizing cpu limit",
|
|
||||||
status: mkPodStatus(resizeCPULimitC1Status, steadyStateC2Status),
|
|
||||||
expectResize: true,
|
|
||||||
}, {
|
|
||||||
name: "resizing cpu request for restartable init container",
|
|
||||||
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, resizeCPUReqC3Status),
|
|
||||||
expectResize: true,
|
|
||||||
}, {
|
|
||||||
name: "resizing memory limit for restartable init container",
|
|
||||||
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, resizeMemLimitC3Status),
|
|
||||||
expectResize: true,
|
|
||||||
}, {
|
|
||||||
name: "non-restartable init container should be ignored",
|
|
||||||
status: mkPodStatus(steadyStateC1Status, steadyStateC2Status, steadyStateC3Status, resizeCPUReqC4Status),
|
|
||||||
expectResize: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
|
||||||
assert.Equal(t, test.expectResize, isPodResizeInProgress(pod, test.status))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRecordAdmissionRejection(t *testing.T) {
|
func TestRecordAdmissionRejection(t *testing.T) {
|
||||||
metrics.Register()
|
metrics.Register()
|
||||||
|
|
||||||
|
|||||||
@@ -1097,72 +1097,78 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.StartupProbe != nil {
|
if podutil.IsRestartableInitContainer(container) {
|
||||||
startup, found := m.startupManager.Get(status.ID)
|
if container.StartupProbe != nil {
|
||||||
if !found {
|
startup, found := m.startupManager.Get(status.ID)
|
||||||
// If the startup probe has not been run, wait for it.
|
if !found {
|
||||||
break
|
// If the startup probe has not been run, wait for it.
|
||||||
}
|
break
|
||||||
if startup != proberesults.Success {
|
}
|
||||||
if startup == proberesults.Failure {
|
if startup != proberesults.Success {
|
||||||
// If the restartable init container failed the startup probe,
|
if startup == proberesults.Failure {
|
||||||
// restart it.
|
// If the restartable init container failed the startup probe,
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
// restart it.
|
||||||
name: container.Name,
|
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
||||||
container: container,
|
name: container.Name,
|
||||||
message: fmt.Sprintf("Init container %s failed startup probe", container.Name),
|
container: container,
|
||||||
reason: reasonStartupProbe,
|
message: fmt.Sprintf("Init container %s failed startup probe", container.Name),
|
||||||
}
|
reason: reasonStartupProbe,
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
}
|
||||||
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
klog.V(4).InfoS("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
|
klog.V(4).InfoS("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
|
||||||
if i == (len(pod.Spec.InitContainers) - 1) {
|
if i == (len(pod.Spec.InitContainers) - 1) {
|
||||||
podHasInitialized = true
|
podHasInitialized = true
|
||||||
} else if !isPreviouslyInitialized {
|
} else if !isPreviouslyInitialized {
|
||||||
// this init container is initialized for the first time, start the next one
|
// this init container is initialized for the first time, start the next one
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
|
||||||
}
|
|
||||||
|
|
||||||
// Restart running sidecar containers which have had their definition changed.
|
|
||||||
if _, _, changed := containerChanged(container, status); changed {
|
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
|
||||||
name: container.Name,
|
|
||||||
container: container,
|
|
||||||
message: fmt.Sprintf("Init container %s definition changed", container.Name),
|
|
||||||
reason: "",
|
|
||||||
}
|
}
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// A restartable init container does not have to take into account its
|
// Restart running sidecar containers which have had their definition changed.
|
||||||
// liveness probe when it determines to start the next init container.
|
if _, _, changed := containerChanged(container, status); changed {
|
||||||
if container.LivenessProbe != nil {
|
|
||||||
liveness, found := m.livenessManager.Get(status.ID)
|
|
||||||
if !found {
|
|
||||||
// If the liveness probe has not been run, wait for it.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if liveness == proberesults.Failure {
|
|
||||||
// If the restartable init container failed the liveness probe,
|
|
||||||
// restart it.
|
|
||||||
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
||||||
name: container.Name,
|
name: container.Name,
|
||||||
container: container,
|
container: container,
|
||||||
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name),
|
message: fmt.Sprintf("Init container %s definition changed", container.Name),
|
||||||
reason: reasonLivenessProbe,
|
reason: "",
|
||||||
}
|
}
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, i, true, status, changes) {
|
// A restartable init container does not have to take into account its
|
||||||
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
|
// liveness probe when it determines to start the next init container.
|
||||||
|
if container.LivenessProbe != nil {
|
||||||
|
liveness, found := m.livenessManager.Get(status.ID)
|
||||||
|
if !found {
|
||||||
|
// If the liveness probe has not been run, wait for it.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if liveness == proberesults.Failure {
|
||||||
|
// If the restartable init container failed the liveness probe,
|
||||||
|
// restart it.
|
||||||
|
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
||||||
|
name: container.Name,
|
||||||
|
container: container,
|
||||||
|
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name),
|
||||||
|
reason: reasonLivenessProbe,
|
||||||
|
}
|
||||||
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
||||||
|
// The container is restarting, so no other actions need to be taken.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, i, true, status, changes) {
|
||||||
|
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else { // init container
|
||||||
|
// nothing do to but wait for it to finish
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -473,8 +473,8 @@ type containerResources struct {
|
|||||||
|
|
||||||
// containerToUpdateInfo contains necessary information to update a container's resources.
|
// containerToUpdateInfo contains necessary information to update a container's resources.
|
||||||
type containerToUpdateInfo struct {
|
type containerToUpdateInfo struct {
|
||||||
// Index of the container in pod.Spec.Containers or pod.Spec.InitContainers that needs resource update
|
// The spec of the container.
|
||||||
apiContainerIdx int
|
container *v1.Container
|
||||||
// ID of the runtime container that needs resource update
|
// ID of the runtime container that needs resource update
|
||||||
kubeContainerID kubecontainer.ContainerID
|
kubeContainerID kubecontainer.ContainerID
|
||||||
// Desired resources for the running container
|
// Desired resources for the running container
|
||||||
@@ -514,9 +514,6 @@ type podActions struct {
|
|||||||
// EphemeralContainersToStart is a list of indexes for the ephemeral containers to start,
|
// EphemeralContainersToStart is a list of indexes for the ephemeral containers to start,
|
||||||
// where the index is the index of the specific container in pod.Spec.EphemeralContainers.
|
// where the index is the index of the specific container in pod.Spec.EphemeralContainers.
|
||||||
EphemeralContainersToStart []int
|
EphemeralContainersToStart []int
|
||||||
// InitContainersToUpdate keeps a list of restartable init containers (sidecar containers) needing resource update.
|
|
||||||
// Init Containers resource update is applicable only for CPU and memory.
|
|
||||||
InitContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
|
|
||||||
// ContainersToUpdate keeps a list of containers needing resource update.
|
// ContainersToUpdate keeps a list of containers needing resource update.
|
||||||
// Container resource update is applicable only for CPU and memory.
|
// Container resource update is applicable only for CPU and memory.
|
||||||
ContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
|
ContainersToUpdate map[v1.ResourceName][]containerToUpdateInfo
|
||||||
@@ -525,8 +522,8 @@ type podActions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p podActions) String() string {
|
func (p podActions) String() string {
|
||||||
return fmt.Sprintf("KillPod: %t, CreateSandbox: %t, UpdatePodResources: %t, Attempt: %d, InitContainersToStart: %v, ContainersToStart: %v, EphemeralContainersToStart: %v,InitContainersToUpdate: %v, ContainersToUpdate: %v, ContainersToKill: %v",
|
return fmt.Sprintf("KillPod: %t, CreateSandbox: %t, UpdatePodResources: %t, Attempt: %d, InitContainersToStart: %v, ContainersToStart: %v, EphemeralContainersToStart: %v,ContainersToUpdate: %v, ContainersToKill: %v",
|
||||||
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.InitContainersToUpdate, p.ContainersToUpdate, p.ContainersToKill)
|
p.KillPod, p.CreateSandbox, p.UpdatePodResources, p.Attempt, p.InitContainersToStart, p.ContainersToStart, p.EphemeralContainersToStart, p.ContainersToUpdate, p.ContainersToKill)
|
||||||
}
|
}
|
||||||
|
|
||||||
// containerChanged will determine whether the container has changed based on the fields that will affect the running of the container.
|
// containerChanged will determine whether the container has changed based on the fields that will affect the running of the container.
|
||||||
@@ -641,23 +638,19 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
|||||||
}
|
}
|
||||||
markContainerForUpdate := func(rName v1.ResourceName, specValue, statusValue int64) {
|
markContainerForUpdate := func(rName v1.ResourceName, specValue, statusValue int64) {
|
||||||
cUpdateInfo := containerToUpdateInfo{
|
cUpdateInfo := containerToUpdateInfo{
|
||||||
apiContainerIdx: containerIdx,
|
container: &container,
|
||||||
kubeContainerID: kubeContainerStatus.ID,
|
kubeContainerID: kubeContainerStatus.ID,
|
||||||
desiredContainerResources: desiredResources,
|
desiredContainerResources: desiredResources,
|
||||||
currentContainerResources: ¤tResources,
|
currentContainerResources: ¤tResources,
|
||||||
}
|
}
|
||||||
// Order the container updates such that resource decreases are applied before increases
|
// Order the container updates such that resource decreases are applied before increases
|
||||||
containersToUpdate := changes.ContainersToUpdate
|
|
||||||
if isRestartableInitContainer {
|
|
||||||
containersToUpdate = changes.InitContainersToUpdate
|
|
||||||
}
|
|
||||||
switch {
|
switch {
|
||||||
case specValue > statusValue: // append
|
case specValue > statusValue: // append
|
||||||
containersToUpdate[rName] = append(containersToUpdate[rName], cUpdateInfo)
|
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)
|
||||||
case specValue < statusValue: // prepend
|
case specValue < statusValue: // prepend
|
||||||
containersToUpdate[rName] = append(containersToUpdate[rName], containerToUpdateInfo{})
|
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})
|
||||||
copy(containersToUpdate[rName][1:], containersToUpdate[rName])
|
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])
|
||||||
containersToUpdate[rName][0] = cUpdateInfo
|
changes.ContainersToUpdate[rName][0] = cUpdateInfo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
|
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
|
||||||
@@ -749,16 +742,6 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(podContainerChanges.InitContainersToUpdate[rName]) > 0 {
|
|
||||||
updateContainerResults, errUpdate := m.updatePodContainerResources(ctx, pod, rName, podContainerChanges.InitContainersToUpdate[rName], true)
|
|
||||||
for _, containerResult := range updateContainerResults {
|
|
||||||
result.AddSyncResult(containerResult)
|
|
||||||
}
|
|
||||||
if errUpdate != nil {
|
|
||||||
return errUpdate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// At downsizing, requests should shrink prior to limits in order to keep "requests <= limits".
|
// At downsizing, requests should shrink prior to limits in order to keep "requests <= limits".
|
||||||
if newPodCgReqValue < currPodCgReqValue {
|
if newPodCgReqValue < currPodCgReqValue {
|
||||||
if err = setPodCgroupConfig(rName, false); err != nil {
|
if err = setPodCgroupConfig(rName, false); err != nil {
|
||||||
@@ -829,17 +812,11 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *kubeGenericRuntimeManager) updatePodContainerResources(ctx context.Context, pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo, isRestartableInitContainer bool) (updateResults []*kubecontainer.SyncResult, err error) {
|
func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo) error {
|
||||||
klog.V(5).InfoS("Updating container resources", "pod", klog.KObj(pod))
|
klog.V(5).InfoS("Updating container resources", "pod", klog.KObj(pod))
|
||||||
var container *v1.Container
|
|
||||||
|
|
||||||
for _, cInfo := range containersToUpdate {
|
for _, cInfo := range containersToUpdate {
|
||||||
var updateContainerResult *kubecontainer.SyncResult
|
container := cInfo.container.DeepCopy()
|
||||||
if isRestartableInitContainer {
|
|
||||||
container = pod.Spec.InitContainers[cInfo.apiContainerIdx].DeepCopy()
|
|
||||||
} else {
|
|
||||||
container = pod.Spec.Containers[cInfo.apiContainerIdx].DeepCopy()
|
|
||||||
}
|
|
||||||
// If updating memory limit, use most recently configured CPU request and limit values.
|
// If updating memory limit, use most recently configured CPU request and limit values.
|
||||||
// If updating CPU request and limit, use most recently configured memory request and limit values.
|
// If updating CPU request and limit, use most recently configured memory request and limit values.
|
||||||
switch resourceName {
|
switch resourceName {
|
||||||
@@ -1035,9 +1012,6 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
|||||||
return changes
|
return changes
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if IsInPlacePodVerticalScalingAllowed(pod) {
|
|
||||||
changes.InitContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)
|
|
||||||
}
|
|
||||||
hasInitialized := m.computeInitContainerActions(pod, podStatus, &changes)
|
hasInitialized := m.computeInitContainerActions(pod, podStatus, &changes)
|
||||||
if changes.KillPod || !hasInitialized {
|
if changes.KillPod || !hasInitialized {
|
||||||
// Initialization failed or still in progress. Skip inspecting non-init
|
// Initialization failed or still in progress. Skip inspecting non-init
|
||||||
|
|||||||
@@ -2246,7 +2246,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
||||||
v1.ResourceMemory: {
|
v1.ResourceMemory: {
|
||||||
{
|
{
|
||||||
apiContainerIdx: 1,
|
container: &pod.Spec.Containers[1],
|
||||||
kubeContainerID: kcs.ID,
|
kubeContainerID: kcs.ID,
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: mem100M.Value(),
|
memoryLimit: mem100M.Value(),
|
||||||
@@ -2260,7 +2260,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
v1.ResourceCPU: {
|
v1.ResourceCPU: {
|
||||||
{
|
{
|
||||||
apiContainerIdx: 1,
|
container: &pod.Spec.Containers[1],
|
||||||
kubeContainerID: kcs.ID,
|
kubeContainerID: kcs.ID,
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: mem100M.Value(),
|
memoryLimit: mem100M.Value(),
|
||||||
@@ -2299,7 +2299,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
||||||
v1.ResourceCPU: {
|
v1.ResourceCPU: {
|
||||||
{
|
{
|
||||||
apiContainerIdx: 1,
|
container: &pod.Spec.Containers[1],
|
||||||
kubeContainerID: kcs.ID,
|
kubeContainerID: kcs.ID,
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: mem100M.Value(),
|
memoryLimit: mem100M.Value(),
|
||||||
@@ -2338,7 +2338,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
||||||
v1.ResourceMemory: {
|
v1.ResourceMemory: {
|
||||||
{
|
{
|
||||||
apiContainerIdx: 2,
|
container: &pod.Spec.Containers[2],
|
||||||
kubeContainerID: kcs.ID,
|
kubeContainerID: kcs.ID,
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: mem200M.Value(),
|
memoryLimit: mem200M.Value(),
|
||||||
@@ -2506,7 +2506,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
||||||
v1.ResourceMemory: {
|
v1.ResourceMemory: {
|
||||||
{
|
{
|
||||||
apiContainerIdx: 1,
|
container: &pod.Spec.Containers[1],
|
||||||
kubeContainerID: kcs.ID,
|
kubeContainerID: kcs.ID,
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: mem200M.Value(),
|
memoryLimit: mem200M.Value(),
|
||||||
@@ -2546,7 +2546,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
||||||
v1.ResourceCPU: {
|
v1.ResourceCPU: {
|
||||||
{
|
{
|
||||||
apiContainerIdx: 2,
|
container: &pod.Spec.Containers[2],
|
||||||
kubeContainerID: kcs.ID,
|
kubeContainerID: kcs.ID,
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: mem100M.Value(),
|
memoryLimit: mem100M.Value(),
|
||||||
@@ -2665,80 +2665,15 @@ func TestUpdatePodContainerResources(t *testing.T) {
|
|||||||
invokeUpdateResources: true,
|
invokeUpdateResources: true,
|
||||||
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
||||||
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
||||||
expectedResults: []*kubecontainer.SyncResult{
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.Containers[0].Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.Containers[1].Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.Containers[2].Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU, error occurs": {
|
|
||||||
resourceName: v1.ResourceCPU,
|
|
||||||
apiSpecResources: []v1.ResourceRequirements{
|
|
||||||
{Limits: res150m150Mi, Requests: res150m150Mi},
|
|
||||||
{Limits: res250m250Mi, Requests: res250m250Mi},
|
|
||||||
{Limits: res350m350Mi, Requests: res350m350Mi},
|
|
||||||
},
|
|
||||||
apiStatusResources: []v1.ResourceRequirements{
|
|
||||||
{Limits: res100m100Mi, Requests: res100m100Mi},
|
|
||||||
{Limits: res200m200Mi, Requests: res200m200Mi},
|
|
||||||
{Limits: res300m300Mi, Requests: res300m300Mi},
|
|
||||||
},
|
|
||||||
requiresRestart: []bool{false, false, false},
|
|
||||||
invokeUpdateResources: true,
|
|
||||||
injectedError: fakeError,
|
|
||||||
expectedCurrentLimits: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
|
|
||||||
expectedCurrentRequests: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
|
|
||||||
expectedResults: []*kubecontainer.SyncResult{
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerCPU,
|
|
||||||
Target: pod.Spec.Containers[0].Name,
|
|
||||||
Error: kubecontainer.ErrUpdateContainerCPU,
|
|
||||||
Message: fakeError.Error(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"Guaranteed QoS Pod - CPU & memory resize requested, update memory, error occurs": {
|
|
||||||
resourceName: v1.ResourceMemory,
|
|
||||||
apiSpecResources: []v1.ResourceRequirements{
|
|
||||||
{Limits: res150m150Mi, Requests: res150m150Mi},
|
|
||||||
{Limits: res250m250Mi, Requests: res250m250Mi},
|
|
||||||
{Limits: res350m350Mi, Requests: res350m350Mi},
|
|
||||||
},
|
|
||||||
apiStatusResources: []v1.ResourceRequirements{
|
|
||||||
{Limits: res100m100Mi, Requests: res100m100Mi},
|
|
||||||
{Limits: res200m200Mi, Requests: res200m200Mi},
|
|
||||||
{Limits: res300m300Mi, Requests: res300m300Mi},
|
|
||||||
},
|
|
||||||
requiresRestart: []bool{false, false, false},
|
|
||||||
invokeUpdateResources: true,
|
|
||||||
injectedError: fakeError,
|
|
||||||
expectedCurrentLimits: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
|
|
||||||
expectedCurrentRequests: []v1.ResourceList{res100m100Mi, res200m200Mi, res300m300Mi},
|
|
||||||
expectedResults: []*kubecontainer.SyncResult{
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.Containers[0].Name,
|
|
||||||
Error: kubecontainer.ErrUpdateContainerMemory,
|
|
||||||
Message: fakeError.Error(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
var containersToUpdate []containerToUpdateInfo
|
var containersToUpdate []containerToUpdateInfo
|
||||||
for idx := range pod.Spec.Containers {
|
for idx := range pod.Spec.Containers {
|
||||||
// default resize policy when pod resize feature is enabled
|
// default resize policy when pod resize feature is enabled
|
||||||
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
|
pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx]
|
||||||
|
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
|
||||||
cInfo := containerToUpdateInfo{
|
cInfo := containerToUpdateInfo{
|
||||||
apiContainerIdx: idx,
|
container: &pod.Spec.Containers[idx],
|
||||||
kubeContainerID: kubecontainer.ContainerID{},
|
kubeContainerID: kubecontainer.ContainerID{},
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
|
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
|
||||||
@@ -2753,28 +2688,20 @@ func TestUpdatePodContainerResources(t *testing.T) {
|
|||||||
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
|
cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
initContainersToUpdate = append(initContainersToUpdate, cInfo)
|
containersToUpdate = append(containersToUpdate, cInfo)
|
||||||
}
|
}
|
||||||
fakeRuntime.Called = []string{}
|
fakeRuntime.Called = []string{}
|
||||||
if tc.injectedError != nil {
|
err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate)
|
||||||
fakeRuntime.InjectError("UpdateContainerResources", tc.injectedError)
|
require.NoError(t, err, dsc)
|
||||||
}
|
|
||||||
updateContainerResults, err := m.updatePodContainerResources(context.TODO(), pod, tc.resourceName, containersToUpdate, false)
|
|
||||||
assert.ElementsMatch(t, tc.expectedResults, updateContainerResults)
|
|
||||||
if tc.injectedError == nil {
|
|
||||||
require.NoError(t, err, dsc)
|
|
||||||
} else {
|
|
||||||
require.EqualError(t, err, tc.injectedError.Error(), dsc)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.invokeUpdateResources {
|
if tc.invokeUpdateResources {
|
||||||
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
|
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
|
||||||
}
|
}
|
||||||
for idx := range pod.Spec.InitContainers {
|
for idx := range pod.Spec.Containers {
|
||||||
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
|
assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
|
||||||
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), initContainersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
|
assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
|
||||||
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
|
assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
|
||||||
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), initContainersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
|
assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2821,11 +2748,9 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
|
|||||||
apiSpecResources []v1.ResourceRequirements
|
apiSpecResources []v1.ResourceRequirements
|
||||||
apiStatusResources []v1.ResourceRequirements
|
apiStatusResources []v1.ResourceRequirements
|
||||||
requiresRestart []bool
|
requiresRestart []bool
|
||||||
injectedError error
|
|
||||||
invokeUpdateResources bool
|
invokeUpdateResources bool
|
||||||
expectedCurrentLimits []v1.ResourceList
|
expectedCurrentLimits []v1.ResourceList
|
||||||
expectedCurrentRequests []v1.ResourceList
|
expectedCurrentRequests []v1.ResourceList
|
||||||
expectedResults []*kubecontainer.SyncResult
|
|
||||||
}{
|
}{
|
||||||
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
|
"Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
|
||||||
resourceName: v1.ResourceCPU,
|
resourceName: v1.ResourceCPU,
|
||||||
@@ -2843,20 +2768,6 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
|
|||||||
invokeUpdateResources: true,
|
invokeUpdateResources: true,
|
||||||
expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
|
expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
|
||||||
expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
|
expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
|
||||||
expectedResults: []*kubecontainer.SyncResult{
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerCPU,
|
|
||||||
Target: pod.Spec.InitContainers[0].Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerCPU,
|
|
||||||
Target: pod.Spec.InitContainers[1].Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerCPU,
|
|
||||||
Target: pod.Spec.InitContainers[2].Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
"Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
|
"Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
|
||||||
resourceName: v1.ResourceMemory,
|
resourceName: v1.ResourceMemory,
|
||||||
@@ -2874,20 +2785,6 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
|
|||||||
invokeUpdateResources: true,
|
invokeUpdateResources: true,
|
||||||
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
||||||
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
|
||||||
expectedResults: []*kubecontainer.SyncResult{
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.InitContainers[0].Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.InitContainers[1].Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Action: kubecontainer.UpdateContainerMemory,
|
|
||||||
Target: pod.Spec.InitContainers[2].Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
var initContainersToUpdate []containerToUpdateInfo
|
var initContainersToUpdate []containerToUpdateInfo
|
||||||
@@ -2896,7 +2793,7 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
|
|||||||
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
|
pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx]
|
||||||
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
|
pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
|
||||||
cInfo := containerToUpdateInfo{
|
cInfo := containerToUpdateInfo{
|
||||||
apiContainerIdx: idx,
|
container: &pod.Spec.InitContainers[idx],
|
||||||
kubeContainerID: kubecontainer.ContainerID{},
|
kubeContainerID: kubecontainer.ContainerID{},
|
||||||
desiredContainerResources: containerResources{
|
desiredContainerResources: containerResources{
|
||||||
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
|
memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
|
||||||
@@ -2914,14 +2811,8 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) {
|
|||||||
initContainersToUpdate = append(initContainersToUpdate, cInfo)
|
initContainersToUpdate = append(initContainersToUpdate, cInfo)
|
||||||
}
|
}
|
||||||
fakeRuntime.Called = []string{}
|
fakeRuntime.Called = []string{}
|
||||||
|
err := m.updatePodContainerResources(pod, tc.resourceName, initContainersToUpdate)
|
||||||
updateContainerResults, err := m.updatePodContainerResources(context.TODO(), pod, tc.resourceName, initContainersToUpdate, true)
|
require.NoError(t, err, dsc)
|
||||||
assert.ElementsMatch(t, tc.expectedResults, updateContainerResults)
|
|
||||||
if tc.injectedError == nil {
|
|
||||||
require.NoError(t, err, dsc)
|
|
||||||
} else {
|
|
||||||
require.EqualError(t, err, tc.injectedError.Error(), dsc)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.invokeUpdateResources {
|
if tc.invokeUpdateResources {
|
||||||
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
|
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
|
||||||
|
|||||||
@@ -262,12 +262,40 @@ func (m *manager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
|
|||||||
return updatePodFromAllocation(pod, allocs)
|
return updatePodFromAllocation(pod, allocs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*v1.Pod, bool) {
|
/* func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*v1.Pod, bool) {
|
||||||
allocated, found := allocs[string(pod.UID)]
|
allocated, found := allocs[string(pod.UID)]
|
||||||
if !found {
|
if !found {
|
||||||
return pod, false
|
return pod, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updated := false
|
||||||
|
updateContainerResources := func(c *v1.Container) {
|
||||||
|
if cAlloc, ok := allocated[c.Name]; ok {
|
||||||
|
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
|
||||||
|
if !updated {
|
||||||
|
pod = pod.DeepCopy()
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
c.Resources = *cAlloc.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range pod.Spec.Containers {
|
||||||
|
updateContainerResources(&pod.Spec.Containers[i])
|
||||||
|
}
|
||||||
|
for i := range pod.Spec.InitContainers {
|
||||||
|
updateContainerResources(&pod.Spec.InitContainers[i])
|
||||||
|
}
|
||||||
|
return pod, updated
|
||||||
|
} */
|
||||||
|
|
||||||
|
// TODO(vibansal): Refactor this function to something above commented code.
|
||||||
|
func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*v1.Pod, bool) {
|
||||||
|
allocated, found := allocs[string(pod.UID)]
|
||||||
|
if !found {
|
||||||
|
return pod, false
|
||||||
|
}
|
||||||
updated := false
|
updated := false
|
||||||
for i, c := range pod.Spec.Containers {
|
for i, c := range pod.Spec.Containers {
|
||||||
if cAlloc, ok := allocated[c.Name]; ok {
|
if cAlloc, ok := allocated[c.Name]; ok {
|
||||||
@@ -283,23 +311,20 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
for i, c := range pod.Spec.InitContainers {
|
||||||
for i, c := range pod.Spec.InitContainers {
|
if cAlloc, ok := allocated[c.Name]; ok {
|
||||||
if podutil.IsRestartableInitContainer(&c) {
|
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
|
||||||
if cAlloc, ok := allocated[c.Name]; ok {
|
// Allocation differs from pod spec, update
|
||||||
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
|
if !updated {
|
||||||
// Allocation differs from pod spec, update
|
// If this is the first update, copy the pod
|
||||||
if !updated {
|
pod = pod.DeepCopy()
|
||||||
// If this is the first update, copy the pod
|
updated = true
|
||||||
pod = pod.DeepCopy()
|
|
||||||
updated = true
|
|
||||||
}
|
|
||||||
pod.Spec.InitContainers[i].Resources = cAlloc
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
pod.Spec.InitContainers[i].Resources = cAlloc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return pod, updated
|
return pod, updated
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -314,7 +339,6 @@ func (m *manager) GetPodResizeStatus(podUID types.UID) v1.PodResizeStatus {
|
|||||||
func (m *manager) SetPodAllocation(pod *v1.Pod) error {
|
func (m *manager) SetPodAllocation(pod *v1.Pod) error {
|
||||||
m.podStatusesLock.RLock()
|
m.podStatusesLock.RLock()
|
||||||
defer m.podStatusesLock.RUnlock()
|
defer m.podStatusesLock.RUnlock()
|
||||||
|
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
alloc := *container.Resources.DeepCopy()
|
alloc := *container.Resources.DeepCopy()
|
||||||
if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
|
if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
|||||||
reqs := reuseOrClearResourceList(opts.Reuse)
|
reqs := reuseOrClearResourceList(opts.Reuse)
|
||||||
var containerStatuses map[string]*v1.ContainerStatus
|
var containerStatuses map[string]*v1.ContainerStatus
|
||||||
if opts.UseStatusResources {
|
if opts.UseStatusResources {
|
||||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
|
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
|
||||||
for i := range pod.Status.ContainerStatuses {
|
for i := range pod.Status.ContainerStatuses {
|
||||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||||
}
|
}
|
||||||
@@ -158,7 +158,7 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
|||||||
if opts.UseStatusResources {
|
if opts.UseStatusResources {
|
||||||
cs, found := containerStatuses[container.Name]
|
cs, found := containerStatuses[container.Name]
|
||||||
if found && cs.Resources != nil {
|
if found && cs.Resources != nil {
|
||||||
containerReqs = setContainerReqs(pod, &container, cs)
|
containerReqs = determineContainerReqs(pod, &container, cs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,7 +188,7 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
|||||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||||
cs, found := containerStatuses[container.Name]
|
cs, found := containerStatuses[container.Name]
|
||||||
if found && cs.Resources != nil {
|
if found && cs.Resources != nil {
|
||||||
containerReqs = setContainerReqs(pod, &container, cs)
|
containerReqs = determineContainerReqs(pod, &container, cs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -221,14 +221,22 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
|||||||
return reqs
|
return reqs
|
||||||
}
|
}
|
||||||
|
|
||||||
// setContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
|
// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
|
||||||
func setContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
||||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||||
return cs.Resources.Requests.DeepCopy()
|
return cs.Resources.Requests.DeepCopy()
|
||||||
}
|
}
|
||||||
return max(container.Resources.Requests, cs.Resources.Requests)
|
return max(container.Resources.Requests, cs.Resources.Requests)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
|
||||||
|
func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
||||||
|
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||||
|
return cs.Resources.Limits.DeepCopy()
|
||||||
|
}
|
||||||
|
return max(container.Resources.Limits, cs.Resources.Limits)
|
||||||
|
}
|
||||||
|
|
||||||
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
|
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
|
||||||
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
|
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
|
||||||
cp := v1.ResourceList{}
|
cp := v1.ResourceList{}
|
||||||
@@ -282,10 +290,13 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
|
|||||||
limits := reuseOrClearResourceList(opts.Reuse)
|
limits := reuseOrClearResourceList(opts.Reuse)
|
||||||
var containerStatuses map[string]*v1.ContainerStatus
|
var containerStatuses map[string]*v1.ContainerStatus
|
||||||
if opts.UseStatusResources {
|
if opts.UseStatusResources {
|
||||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
|
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
|
||||||
for i := range pod.Status.ContainerStatuses {
|
for i := range pod.Status.ContainerStatuses {
|
||||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||||
}
|
}
|
||||||
|
for i := range pod.Status.InitContainerStatuses {
|
||||||
|
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
@@ -293,11 +304,7 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
|
|||||||
if opts.UseStatusResources {
|
if opts.UseStatusResources {
|
||||||
cs, found := containerStatuses[container.Name]
|
cs, found := containerStatuses[container.Name]
|
||||||
if found && cs.Resources != nil {
|
if found && cs.Resources != nil {
|
||||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
containerLimits = determineContainerLimits(pod, &container, cs)
|
||||||
containerLimits = cs.Resources.Limits.DeepCopy()
|
|
||||||
} else {
|
|
||||||
containerLimits = max(container.Resources.Limits, cs.Resources.Limits)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,6 +325,15 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
|
|||||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
|
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
|
||||||
for _, container := range pod.Spec.InitContainers {
|
for _, container := range pod.Spec.InitContainers {
|
||||||
containerLimits := container.Resources.Limits
|
containerLimits := container.Resources.Limits
|
||||||
|
if opts.UseStatusResources {
|
||||||
|
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||||
|
cs, found := containerStatuses[container.Name]
|
||||||
|
if found && cs.Resources != nil {
|
||||||
|
containerLimits = determineContainerLimits(pod, &container, cs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Is the init container marked as a restartable init container?
|
// Is the init container marked as a restartable init container?
|
||||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||||
addResourceList(limits, containerLimits)
|
addResourceList(limits, containerLimits)
|
||||||
|
|||||||
@@ -292,7 +292,6 @@ func TestPodResourceRequests(t *testing.T) {
|
|||||||
podResizeStatus v1.PodResizeStatus
|
podResizeStatus v1.PodResizeStatus
|
||||||
initContainers []v1.Container
|
initContainers []v1.Container
|
||||||
initContainerStatus []v1.ContainerStatus
|
initContainerStatus []v1.ContainerStatus
|
||||||
hasSidecarContainer bool
|
|
||||||
containers []v1.Container
|
containers []v1.Container
|
||||||
containerStatus []v1.ContainerStatus
|
containerStatus []v1.ContainerStatus
|
||||||
expectedRequests v1.ResourceList
|
expectedRequests v1.ResourceList
|
||||||
@@ -429,7 +428,7 @@ func TestPodResourceRequests(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "resized without sidecar containers, infeasible",
|
description: "resized, infeasible",
|
||||||
expectedRequests: v1.ResourceList{
|
expectedRequests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("2"),
|
v1.ResourceCPU: resource.MustParse("2"),
|
||||||
},
|
},
|
||||||
@@ -457,7 +456,7 @@ func TestPodResourceRequests(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "resized with no sidecar containers, no resize status",
|
description: "resized, no resize status",
|
||||||
expectedRequests: v1.ResourceList{
|
expectedRequests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("4"),
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -968,131 +968,6 @@ func doPodResizeTests() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
/*{
|
|
||||||
name: "Guaranteed QoS pod, one restartable init container - increase CPU & memory",
|
|
||||||
containers: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
IsRestartableInitCtr: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patchString: `{"spec":{"initcontainers":[
|
|
||||||
{"name":"c1-init", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
|
|
||||||
]}}`,
|
|
||||||
expected: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
IsRestartableInitCtr: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Guaranteed QoS pod, one restartable init container - decrease CPU & memory",
|
|
||||||
containers: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
IsRestartableInitCtr: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patchString: `{"spec":{"initcontainers":[
|
|
||||||
{"name":"c1-init", "resources":{"requests":{"cpu":"100m","memory":"250Mi"},"limits":{"cpu":"100m","memory":"250Mi"}}}
|
|
||||||
]}}`,
|
|
||||||
expected: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "250Mi", MemLim: "250Mi"},
|
|
||||||
CPUPolicy: &noRestart,
|
|
||||||
MemPolicy: &noRestart,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Guaranteed QoS pod, one restartable init container - increase CPU & decrease memory",
|
|
||||||
containers: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
IsRestartableInitCtr: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patchString: `{"spec":{"initcontainers":[
|
|
||||||
{"name":"c1-init", "resources":{"requests":{"cpu":"200m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"100Mi"}}}
|
|
||||||
]}}`,
|
|
||||||
expected: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "100Mi", MemLim: "100Mi"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Guaranteed QoS pod, one restartable init container - decrease CPU & increase memory",
|
|
||||||
containers: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
IsRestartableInitCtr: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patchString: `{"spec":{"initcontainers":[
|
|
||||||
{"name":"c1-init", "resources":{"requests":{"cpu":"50m","memory":"300Mi"},"limits":{"cpu":"50m","memory":"300Mi"}}}
|
|
||||||
]}}`,
|
|
||||||
expected: []e2epod.ResizableContainerInfo{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "c1-init",
|
|
||||||
Resources: &e2epod.ContainerResources{CPUReq: "50m", CPULim: "50m", MemReq: "300Mi", MemLim: "300Mi"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for idx := range tests {
|
for idx := range tests {
|
||||||
@@ -1128,7 +1003,6 @@ func doPodResizeTests() {
|
|||||||
|
|
||||||
ginkgo.By("verifying initial pod resources are as expected")
|
ginkgo.By("verifying initial pod resources are as expected")
|
||||||
e2epod.VerifyPodResources(newPod, tc.containers)
|
e2epod.VerifyPodResources(newPod, tc.containers)
|
||||||
|
|
||||||
ginkgo.By("verifying initial pod resize policy is as expected")
|
ginkgo.By("verifying initial pod resize policy is as expected")
|
||||||
e2epod.VerifyPodResizePolicy(newPod, tc.containers)
|
e2epod.VerifyPodResizePolicy(newPod, tc.containers)
|
||||||
|
|
||||||
|
|||||||
@@ -98,12 +98,11 @@ func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ResizableContainerInfo struct {
|
type ResizableContainerInfo struct {
|
||||||
Name string
|
Name string
|
||||||
Resources *ContainerResources
|
Resources *ContainerResources
|
||||||
CPUPolicy *v1.ResourceResizeRestartPolicy
|
CPUPolicy *v1.ResourceResizeRestartPolicy
|
||||||
MemPolicy *v1.ResourceResizeRestartPolicy
|
MemPolicy *v1.ResourceResizeRestartPolicy
|
||||||
RestartCount int32
|
RestartCount int32
|
||||||
IsRestartableInitCtr bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type containerPatch struct {
|
type containerPatch struct {
|
||||||
@@ -186,7 +185,6 @@ func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []Resizab
|
|||||||
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||||
|
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtr := &gotPod.Spec.Containers[i]
|
gotCtr := &gotPod.Spec.Containers[i]
|
||||||
ctr := makeResizableContainer(wantCtr)
|
ctr := makeResizableContainer(wantCtr)
|
||||||
@@ -198,7 +196,6 @@ func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
|||||||
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||||
|
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtr := &gotPod.Spec.Containers[i]
|
gotCtr := &gotPod.Spec.Containers[i]
|
||||||
ctr := makeResizableContainer(wantCtr)
|
ctr := makeResizableContainer(wantCtr)
|
||||||
@@ -211,11 +208,11 @@ func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo)
|
|||||||
ginkgo.GinkgoHelper()
|
ginkgo.GinkgoHelper()
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
|
|
||||||
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
|
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
|
||||||
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
||||||
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
|
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, wantCtr := range wantCtrs {
|
for i, wantCtr := range wantCtrs {
|
||||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||||
ctr := makeResizableContainer(wantCtr)
|
ctr := makeResizableContainer(wantCtr)
|
||||||
@@ -372,6 +369,7 @@ func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
|
|||||||
cPatch.Resources.Requests.Memory = container.Resources.MemReq
|
cPatch.Resources.Requests.Memory = container.Resources.MemReq
|
||||||
cPatch.Resources.Limits.CPU = container.Resources.CPULim
|
cPatch.Resources.Limits.CPU = container.Resources.CPULim
|
||||||
cPatch.Resources.Limits.Memory = container.Resources.MemLim
|
cPatch.Resources.Limits.Memory = container.Resources.MemLim
|
||||||
|
|
||||||
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
|
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user