mirror of
https://github.com/outbackdingo/kubernetes.git
synced 2026-01-27 10:19:35 +00:00
Merge pull request #132427 from soma00333/kuberuntime-contextual-logging-1
feat(kubelet): migrate kuberuntime to contextual logging
This commit is contained in:
@@ -1239,7 +1239,8 @@ func RunKubelet(ctx context.Context, kubeServer *options.KubeletServer, kubeDeps
|
||||
kubeDeps.OSInterface = kubecontainer.RealOS{}
|
||||
}
|
||||
|
||||
k, err := createAndInitKubelet(kubeServer,
|
||||
k, err := createAndInitKubelet(ctx,
|
||||
kubeServer,
|
||||
kubeDeps,
|
||||
hostname,
|
||||
nodeName,
|
||||
@@ -1279,7 +1280,9 @@ func startKubelet(ctx context.Context, k kubelet.Bootstrap, podCfg *config.PodCo
|
||||
go k.ListenAndServePodResources(ctx)
|
||||
}
|
||||
|
||||
func createAndInitKubelet(kubeServer *options.KubeletServer,
|
||||
func createAndInitKubelet(
|
||||
ctx context.Context,
|
||||
kubeServer *options.KubeletServer,
|
||||
kubeDeps *kubelet.Dependencies,
|
||||
hostname string,
|
||||
nodeName types.NodeName,
|
||||
@@ -1287,7 +1290,9 @@ func createAndInitKubelet(kubeServer *options.KubeletServer,
|
||||
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
|
||||
// up into "per source" synchronizations
|
||||
|
||||
k, err = kubelet.NewMainKubelet(&kubeServer.KubeletConfiguration,
|
||||
k, err = kubelet.NewMainKubelet(
|
||||
ctx,
|
||||
&kubeServer.KubeletConfiguration,
|
||||
kubeDeps,
|
||||
&kubeServer.ContainerRuntimeOptions,
|
||||
hostname,
|
||||
|
||||
@@ -214,6 +214,7 @@ linters:
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/apis/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/kubeletconfig/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/kuberuntime/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/nodeshutdown/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/pod/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/preemption/.*
|
||||
|
||||
@@ -228,6 +228,7 @@ linters:
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/apis/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/kubeletconfig/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/kuberuntime/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/nodeshutdown/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/pod/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/preemption/.*
|
||||
|
||||
@@ -60,6 +60,7 @@ contextual k8s.io/kubernetes/pkg/kubelet/status/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/sysctl/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/apis/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/kubeletconfig/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/kuberuntime/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/nodeshutdown/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/pod/.*
|
||||
contextual k8s.io/kubernetes/pkg/kubelet/preemption/.*
|
||||
|
||||
@@ -408,7 +408,8 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules.
|
||||
// No initialization of Kubelet and its modules should happen here.
|
||||
func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
func NewMainKubelet(ctx context.Context,
|
||||
kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
kubeDeps *Dependencies,
|
||||
crOptions *config.ContainerRuntimeOptions,
|
||||
hostname string,
|
||||
@@ -434,7 +435,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
nodeStatusMaxImages int32,
|
||||
seccompDefault bool,
|
||||
) (*Kubelet, error) {
|
||||
ctx := context.Background()
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if rootDirectory == "" {
|
||||
@@ -753,6 +753,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
|
||||
runtime, postImageGCHooks, err := kuberuntime.NewKubeGenericRuntimeManager(
|
||||
ctx,
|
||||
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
|
||||
klet.livenessManager,
|
||||
klet.readinessManager,
|
||||
|
||||
@@ -3006,6 +3006,7 @@ func TestNewMainKubeletStandAlone(t *testing.T) {
|
||||
crOptions := &config.ContainerRuntimeOptions{}
|
||||
|
||||
testMainKubelet, err := NewMainKubelet(
|
||||
tCtx,
|
||||
kubeCfg,
|
||||
kubeDep,
|
||||
crOptions,
|
||||
@@ -3065,6 +3066,7 @@ func TestNewMainKubeletStandAlone(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncPodSpans(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
testKubelet := newTestKubelet(t, false)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
@@ -3105,6 +3107,7 @@ func TestSyncPodSpans(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
kubelet.containerRuntime, _, err = kuberuntime.NewKubeGenericRuntimeManager(
|
||||
tCtx,
|
||||
kubelet.recorder,
|
||||
kubelet.livenessManager,
|
||||
kubelet.readinessManager,
|
||||
|
||||
@@ -95,8 +95,7 @@ func (f *fakePodPullingTimeRecorder) RecordImageStartedPulling(podUID types.UID)
|
||||
|
||||
func (f *fakePodPullingTimeRecorder) RecordImageFinishedPulling(podUID types.UID) {}
|
||||
|
||||
func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, tracer trace.Tracer) (*kubeGenericRuntimeManager, error) {
|
||||
ctx := context.Background()
|
||||
func newFakeKubeRuntimeManager(ctx context.Context, runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, tracer trace.Tracer) (*kubeGenericRuntimeManager, error) {
|
||||
recorder := &record.FakeRecorder{}
|
||||
logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2, 10, metav1.Duration{Duration: 10 * time.Second})
|
||||
if err != nil {
|
||||
@@ -123,6 +122,9 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
||||
allocationManager: allocation.NewInMemoryManager(nil, nil, nil, nil, nil, nil),
|
||||
}
|
||||
|
||||
// Initialize swap controller availability check (always false for tests)
|
||||
kubeRuntimeManager.getSwapControllerAvailable = func() bool { return false }
|
||||
|
||||
typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -78,7 +78,7 @@ func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.State {
|
||||
}
|
||||
|
||||
// toRuntimeProtocol converts v1.Protocol to runtimeapi.Protocol.
|
||||
func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
|
||||
func toRuntimeProtocol(logger klog.Logger, protocol v1.Protocol) runtimeapi.Protocol {
|
||||
switch protocol {
|
||||
case v1.ProtocolTCP:
|
||||
return runtimeapi.Protocol_TCP
|
||||
@@ -88,12 +88,12 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
|
||||
return runtimeapi.Protocol_SCTP
|
||||
}
|
||||
|
||||
klog.InfoS("Unknown protocol, defaulting to TCP", "protocol", protocol)
|
||||
logger.Info("Unknown protocol, defaulting to TCP", "protocol", protocol)
|
||||
return runtimeapi.Protocol_TCP
|
||||
}
|
||||
|
||||
// toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
|
||||
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) {
|
||||
func (m *kubeGenericRuntimeManager) toKubeContainer(ctx context.Context, c *runtimeapi.Container) (*kubecontainer.Container, error) {
|
||||
if c == nil || c.Id == "" || c.Image == nil {
|
||||
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
|
||||
}
|
||||
@@ -104,7 +104,7 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*k
|
||||
imageID = c.ImageId
|
||||
}
|
||||
|
||||
annotatedInfo := getContainerInfoFromAnnotations(c.Annotations)
|
||||
annotatedInfo := getContainerInfoFromAnnotations(ctx, c.Annotations)
|
||||
return &kubecontainer.Container{
|
||||
ID: kubecontainer.ContainerID{Type: m.runtimeName, ID: c.Id},
|
||||
Name: c.GetMetadata().GetName(),
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -35,7 +36,8 @@ func seccompLocalhostRef(profileName string) string {
|
||||
}
|
||||
|
||||
func TestGetSeccompProfile(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
unconfinedProfile := &runtimeapi.SecurityProfile{
|
||||
@@ -135,7 +137,8 @@ func TestGetSeccompProfile(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSeccompProfileDefaultSeccomp(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
unconfinedProfile := &runtimeapi.SecurityProfile{
|
||||
|
||||
@@ -30,9 +30,11 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
runtimetesting "k8s.io/cri-api/pkg/apis/testing"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -180,6 +182,7 @@ func TestGetBackoffKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestToKubeContainer(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
c := &runtimeapi.Container{
|
||||
Id: "test-id",
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
@@ -208,14 +211,14 @@ func TestToKubeContainer(t *testing.T) {
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
}
|
||||
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
got, err := m.toKubeContainer(c)
|
||||
got, err := m.toKubeContainer(tCtx, c)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expect, got)
|
||||
|
||||
// unable to convert a nil pointer to a runtime container
|
||||
_, err = m.toKubeContainer(nil)
|
||||
_, err = m.toKubeContainer(tCtx, nil)
|
||||
assert.Error(t, err)
|
||||
_, err = m.sandboxToKubeContainer(nil)
|
||||
assert.Error(t, err)
|
||||
@@ -223,6 +226,7 @@ func TestToKubeContainer(t *testing.T) {
|
||||
|
||||
func TestToKubeContainerWithRuntimeHandlerInImageSpecCri(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RuntimeClassInImageCriAPI, true)
|
||||
tCtx := ktesting.Init(t)
|
||||
c := &runtimeapi.Container{
|
||||
Id: "test-id",
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
@@ -251,21 +255,22 @@ func TestToKubeContainerWithRuntimeHandlerInImageSpecCri(t *testing.T) {
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
}
|
||||
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
got, err := m.toKubeContainer(c)
|
||||
got, err := m.toKubeContainer(tCtx, c)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expect, got)
|
||||
|
||||
// unable to convert a nil pointer to a runtime container
|
||||
_, err = m.toKubeContainer(nil)
|
||||
_, err = m.toKubeContainer(tCtx, nil)
|
||||
assert.Error(t, err)
|
||||
_, err = m.sandboxToKubeContainer(nil)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGetImageUser(t *testing.T) {
|
||||
_, i, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, i, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
type image struct {
|
||||
@@ -332,11 +337,11 @@ func TestGetImageUser(t *testing.T) {
|
||||
|
||||
i.SetFakeImages([]string{"test-image-ref1", "test-image-ref2", "test-image-ref3"})
|
||||
for j, test := range tests {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
i.Images[test.originalImage.name].Username = test.originalImage.username
|
||||
i.Images[test.originalImage.name].Uid = test.originalImage.uid
|
||||
|
||||
uid, username, err := m.getImageUser(ctx, test.originalImage.name)
|
||||
uid, username, err := m.getImageUser(tCtx, test.originalImage.name)
|
||||
assert.NoError(t, err, "TestCase[%d]", j)
|
||||
|
||||
if test.expectedImageUserValues.uid == (*int64)(nil) {
|
||||
@@ -349,6 +354,8 @@ func TestGetImageUser(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestToRuntimeProtocol(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
logger := klog.FromContext(tCtx)
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
protocol string
|
||||
@@ -376,7 +383,7 @@ func TestToRuntimeProtocol(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if result := toRuntimeProtocol(v1.Protocol(test.protocol)); result != test.expected {
|
||||
if result := toRuntimeProtocol(logger, v1.Protocol(test.protocol)); result != test.expected {
|
||||
t.Errorf("expected %d but got %d", test.expected, result)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestRecordOperation(t *testing.T) {
|
||||
@@ -71,17 +71,17 @@ func TestRecordOperation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInstrumentedVersion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, _, _ := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, _, _ := createTestRuntimeManager(tCtx)
|
||||
irs := newInstrumentedRuntimeService(fakeRuntime)
|
||||
vr, err := irs.Version(ctx, "1")
|
||||
vr, err := irs.Version(tCtx, "1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kubeRuntimeAPIVersion, vr.Version)
|
||||
}
|
||||
|
||||
func TestStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, _, _ := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, _, _ := createTestRuntimeManager(tCtx)
|
||||
fakeRuntime.FakeStatus = &runtimeapi.RuntimeStatus{
|
||||
Conditions: []*runtimeapi.RuntimeCondition{
|
||||
{Type: runtimeapi.RuntimeReady, Status: false},
|
||||
@@ -89,7 +89,7 @@ func TestStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
irs := newInstrumentedRuntimeService(fakeRuntime)
|
||||
actural, err := irs.Status(ctx, false)
|
||||
actural, err := irs.Status(tCtx, false)
|
||||
assert.NoError(t, err)
|
||||
expected := &runtimeapi.RuntimeStatus{
|
||||
Conditions: []*runtimeapi.RuntimeCondition{
|
||||
|
||||
@@ -81,10 +81,11 @@ var (
|
||||
// in particular, it ensures that a containerID never appears in an event message as that
|
||||
// is prone to causing a lot of distinct events that do not count well.
|
||||
// it replaces any reference to a containerID with the containerName which is stable, and is what users know.
|
||||
func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) {
|
||||
func (m *kubeGenericRuntimeManager) recordContainerEvent(ctx context.Context, pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) {
|
||||
logger := klog.FromContext(ctx)
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, container)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Can't make a container ref", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
|
||||
logger.Error(err, "Can't make a container ref", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
|
||||
return
|
||||
}
|
||||
eventMessage := message
|
||||
@@ -196,6 +197,7 @@ func (m *kubeGenericRuntimeManager) getPodRuntimeHandler(pod *v1.Pod) (podRuntim
|
||||
// * start the container
|
||||
// * run the post start lifecycle hooks (if applicable)
|
||||
func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string, imageVolumes kubecontainer.ImageVolumes) (string, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
container := spec.container
|
||||
|
||||
// Step 1: pull the image.
|
||||
@@ -206,13 +208,13 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, container)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
logger.Error(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
}
|
||||
|
||||
imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, ref, pod, container.Image, pullSecrets, podSandboxConfig, podRuntimeHandler, container.ImagePullPolicy)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
return msg, err
|
||||
}
|
||||
|
||||
@@ -236,7 +238,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
logDir := BuildContainerLogsDirectory(m.podLogsDirectory, pod.Namespace, pod.Name, pod.UID, container.Name)
|
||||
restartCount, err = calcRestartCountByLogDir(logDir)
|
||||
if err != nil {
|
||||
klog.InfoS("Cannot calculate restartCount from the log directory", "logDir", logDir, "err", err)
|
||||
logger.Info("Cannot calculate restartCount from the log directory", "logDir", logDir, "err", err)
|
||||
restartCount = 0
|
||||
}
|
||||
}
|
||||
@@ -244,7 +246,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
target, err := spec.getTargetID(podStatus)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
return s.Message(), ErrCreateContainerConfig
|
||||
}
|
||||
|
||||
@@ -254,45 +256,45 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
}
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
return s.Message(), ErrCreateContainerConfig
|
||||
}
|
||||
|
||||
// When creating a container, mark the resources as actuated.
|
||||
if err := m.allocationManager.SetActuatedResources(pod, container); err != nil {
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", err)
|
||||
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", err)
|
||||
return err.Error(), ErrCreateContainerConfig
|
||||
}
|
||||
|
||||
err = m.internalLifecycle.PreCreateContainer(pod, container, containerConfig)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Internal PreCreateContainer hook failed: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Internal PreCreateContainer hook failed: %v", s.Message())
|
||||
return s.Message(), ErrPreCreateHook
|
||||
}
|
||||
|
||||
containerID, err := m.runtimeService.CreateContainer(ctx, podSandboxID, containerConfig, podSandboxConfig)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
return s.Message(), ErrCreateContainer
|
||||
}
|
||||
err = m.internalLifecycle.PreStartContainer(pod, container, containerID)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
|
||||
return s.Message(), ErrPreStartHook
|
||||
}
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name)
|
||||
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name)
|
||||
|
||||
// Step 3: start the container.
|
||||
err = m.runtimeService.StartContainer(ctx, containerID)
|
||||
if err != nil {
|
||||
s, _ := grpcstatus.FromError(err)
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
|
||||
return s.Message(), kubecontainer.ErrRunContainer
|
||||
}
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name)
|
||||
m.recordContainerEvent(ctx, pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name)
|
||||
|
||||
// Symlink container logs to the legacy container log location for cluster logging
|
||||
// support.
|
||||
@@ -308,7 +310,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
// to create it in the first place. it happens when journald logging driver is used with docker.
|
||||
if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) {
|
||||
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
|
||||
klog.ErrorS(err, "Failed to create legacy symbolic link", "path", legacySymlink,
|
||||
logger.Error(err, "Failed to create legacy symbolic link", "path", legacySymlink,
|
||||
"containerID", containerID, "containerLogPath", containerLog)
|
||||
}
|
||||
}
|
||||
@@ -321,12 +323,12 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
||||
}
|
||||
msg, handlerErr := m.runner.Run(ctx, kubeContainerID, pod, container, container.Lifecycle.PostStart)
|
||||
if handlerErr != nil {
|
||||
klog.ErrorS(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod),
|
||||
logger.Error(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod),
|
||||
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
|
||||
// do not record the message in the event so that secrets won't leak from the server.
|
||||
m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed")
|
||||
m.recordContainerEvent(ctx, pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed")
|
||||
if err := m.killContainer(ctx, pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil, nil); err != nil {
|
||||
klog.ErrorS(err, "Failed to kill container", "pod", klog.KObj(pod),
|
||||
logger.Error(err, "Failed to kill container", "pod", klog.KObj(pod),
|
||||
"podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String())
|
||||
}
|
||||
return msg, ErrPostStartHook
|
||||
@@ -349,7 +351,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context,
|
||||
}
|
||||
|
||||
// Verify RunAsNonRoot. Non-root verification only supports numeric user.
|
||||
if err := verifyRunAsNonRoot(pod, container, uid, username); err != nil {
|
||||
if err := verifyRunAsNonRoot(ctx, pod, container, uid, username); err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
|
||||
@@ -372,7 +374,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context,
|
||||
Args: args,
|
||||
WorkingDir: container.WorkingDir,
|
||||
Labels: newContainerLabels(container, pod),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount, opts),
|
||||
Annotations: newContainerAnnotations(ctx, container, pod, restartCount, opts),
|
||||
Devices: makeDevices(opts),
|
||||
CDIDevices: makeCDIDevices(opts),
|
||||
Mounts: m.makeMounts(opts, container),
|
||||
@@ -386,7 +388,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context,
|
||||
config.StopSignal = *stopsignal
|
||||
}
|
||||
// set platform specific configurations.
|
||||
if err := m.applyPlatformSpecificContainerConfig(config, container, pod, uid, username, nsTarget); err != nil {
|
||||
if err := m.applyPlatformSpecificContainerConfig(ctx, config, container, pod, uid, username, nsTarget); err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
|
||||
@@ -404,31 +406,33 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context,
|
||||
return config, cleanupAction, nil
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) updateContainerResources(pod *v1.Pod, container *v1.Container, containerID kubecontainer.ContainerID) error {
|
||||
containerResources := m.generateContainerResources(pod, container)
|
||||
func (m *kubeGenericRuntimeManager) updateContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container, containerID kubecontainer.ContainerID) error {
|
||||
containerResources := m.generateContainerResources(ctx, pod, container)
|
||||
if containerResources == nil {
|
||||
return fmt.Errorf("container %q updateContainerResources failed: cannot generate resources config", containerID.String())
|
||||
}
|
||||
ctx := context.Background()
|
||||
logger := klog.FromContext(ctx)
|
||||
err := m.runtimeService.UpdateContainerResources(ctx, containerID.ID, containerResources)
|
||||
if err == nil {
|
||||
err = m.allocationManager.SetActuatedResources(pod, container)
|
||||
} else {
|
||||
logger.Error(err, "UpdateContainerResources failed", "container", containerID.String())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) updatePodSandboxResources(sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) error {
|
||||
func (m *kubeGenericRuntimeManager) updatePodSandboxResources(ctx context.Context, sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
podResourcesRequest := m.generateUpdatePodSandboxResourcesRequest(sandboxID, pod, podResources)
|
||||
if podResourcesRequest == nil {
|
||||
return fmt.Errorf("sandboxID %q updatePodSandboxResources failed: cannot generate resources config", sandboxID)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err := m.runtimeService.UpdatePodSandboxResources(ctx, podResourcesRequest)
|
||||
if err != nil {
|
||||
stat, _ := grpcstatus.FromError(err)
|
||||
if stat.Code() == codes.Unimplemented {
|
||||
klog.V(3).InfoS("updatePodSandboxResources failed: unimplemented; this call is best-effort: proceeding with resize", "sandboxID", sandboxID)
|
||||
logger.V(3).Info("updatePodSandboxResources failed: unimplemented; this call is best-effort: proceeding with resize", "sandboxID", sandboxID)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("updatePodSandboxResources failed for sanboxID %q: %w", sandboxID, err)
|
||||
@@ -528,6 +532,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
|
||||
// The boolean parameter specifies whether returns all containers including
|
||||
// those already exited and dead containers (used for garbage collection).
|
||||
func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, allContainers bool) ([]*runtimeapi.Container, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
filter := &runtimeapi.ContainerFilter{}
|
||||
if !allContainers {
|
||||
filter.State = &runtimeapi.ContainerStateValue{
|
||||
@@ -537,7 +542,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, al
|
||||
|
||||
containers, err := m.runtimeService.ListContainers(ctx, filter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "ListContainers failed")
|
||||
logger.Error(err, "ListContainers failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -576,26 +581,26 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag
|
||||
|
||||
// readLastStringFromContainerLogs attempts to read up to the max log length from the end of the CRI log represented
|
||||
// by path. It reads up to max log lines.
|
||||
func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) string {
|
||||
func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(ctx context.Context, path string) string {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
if err := m.ReadLogs(context.Background(), path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
if err := m.ReadLogs(ctx, path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) convertToKubeContainerStatus(status *runtimeapi.ContainerStatus) (cStatus *kubecontainer.Status) {
|
||||
cStatus = toKubeContainerStatus(status, m.runtimeName)
|
||||
func (m *kubeGenericRuntimeManager) convertToKubeContainerStatus(ctx context.Context, status *runtimeapi.ContainerStatus) (cStatus *kubecontainer.Status) {
|
||||
cStatus = toKubeContainerStatus(ctx, status, m.runtimeName)
|
||||
if status.State == runtimeapi.ContainerState_CONTAINER_EXITED {
|
||||
// Populate the termination message if needed.
|
||||
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
|
||||
annotatedInfo := getContainerInfoFromAnnotations(ctx, status.Annotations)
|
||||
// If a container cannot even be started, it certainly does not have logs, so no need to fallbackToLogs.
|
||||
fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError &&
|
||||
cStatus.ExitCode != 0 && cStatus.Reason != "ContainerCannotRun"
|
||||
tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs)
|
||||
if checkLogs {
|
||||
tMessage = m.readLastStringFromContainerLogs(status.GetLogPath())
|
||||
tMessage = m.readLastStringFromContainerLogs(ctx, status.GetLogPath())
|
||||
}
|
||||
// Enrich the termination message written by the application is not empty
|
||||
if len(tMessage) != 0 {
|
||||
@@ -610,12 +615,13 @@ func (m *kubeGenericRuntimeManager) convertToKubeContainerStatus(status *runtime
|
||||
|
||||
// getPodContainerStatuses gets all containers' statuses for the pod.
|
||||
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace, activePodSandboxID string) ([]*kubecontainer.Status, []*kubecontainer.Status, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// Select all containers of the given pod.
|
||||
containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{
|
||||
LabelSelector: map[string]string{kubelettypes.KubernetesPodUIDLabel: string(uid)},
|
||||
})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "ListContainers error")
|
||||
logger.Error(err, "ListContainers error")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -633,14 +639,14 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context,
|
||||
}
|
||||
if err != nil {
|
||||
// Merely log this here; GetPodStatus will actually report the error out.
|
||||
klog.V(4).InfoS("ContainerStatus return error", "containerID", c.Id, "err", err)
|
||||
logger.V(4).Info("ContainerStatus return error", "containerID", c.Id, "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
status := resp.GetStatus()
|
||||
if status == nil {
|
||||
return nil, nil, remote.ErrContainerStatusNil
|
||||
}
|
||||
cStatus := m.convertToKubeContainerStatus(status)
|
||||
cStatus := m.convertToKubeContainerStatus(ctx, status)
|
||||
statuses = append(statuses, cStatus)
|
||||
if c.PodSandboxId == activePodSandboxID {
|
||||
activeContainerStatuses = append(activeContainerStatuses, cStatus)
|
||||
@@ -652,9 +658,9 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context,
|
||||
return statuses, activeContainerStatuses, nil
|
||||
}
|
||||
|
||||
func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status {
|
||||
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
|
||||
labeledInfo := getContainerInfoFromLabels(status.Labels)
|
||||
func toKubeContainerStatus(ctx context.Context, status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status {
|
||||
annotatedInfo := getContainerInfoFromAnnotations(ctx, status.Annotations)
|
||||
labeledInfo := getContainerInfoFromLabels(ctx, status.Labels)
|
||||
var cStatusResources *kubecontainer.ContainerResources
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
// If runtime reports cpu & memory resources info, add it to container status
|
||||
@@ -730,7 +736,8 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin
|
||||
|
||||
// executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes.
|
||||
func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
|
||||
klog.V(3).InfoS("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String())
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(3).Info("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String())
|
||||
|
||||
start := metav1.Now()
|
||||
done := make(chan struct{})
|
||||
@@ -738,19 +745,19 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod
|
||||
defer close(done)
|
||||
defer utilruntime.HandleCrash()
|
||||
if _, err := m.runner.Run(ctx, containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
|
||||
klog.ErrorS(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.Error(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerSpec.Name, "containerID", containerID.String())
|
||||
// do not record the message in the event so that secrets won't leak from the server.
|
||||
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, "PreStopHook failed")
|
||||
m.recordContainerEvent(ctx, pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, "PreStopHook failed")
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(gracePeriod) * time.Second):
|
||||
klog.V(2).InfoS("PreStop hook not completed in grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.V(2).Info("PreStop hook not completed in grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerSpec.Name, "containerID", containerID.String(), "gracePeriod", gracePeriod)
|
||||
case <-done:
|
||||
klog.V(3).InfoS("PreStop hook completed", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.V(3).Info("PreStop hook completed", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerSpec.Name, "containerID", containerID.String())
|
||||
}
|
||||
|
||||
@@ -777,8 +784,8 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context.
|
||||
return nil, nil, remote.ErrContainerStatusNil
|
||||
}
|
||||
|
||||
l := getContainerInfoFromLabels(s.Labels)
|
||||
a := getContainerInfoFromAnnotations(s.Annotations)
|
||||
l := getContainerInfoFromLabels(ctx, s.Labels)
|
||||
a := getContainerInfoFromAnnotations(ctx, s.Annotations)
|
||||
// Notice that the followings are not full spec. The container killing code should not use
|
||||
// un-restored fields.
|
||||
pod = &v1.Pod{
|
||||
@@ -809,6 +816,7 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context.
|
||||
// * Run the pre-stop lifecycle hooks (if applicable).
|
||||
// * Stop the container.
|
||||
func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64, ordering *terminationOrdering) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
var containerSpec *v1.Container
|
||||
if pod != nil {
|
||||
if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil {
|
||||
@@ -825,16 +833,16 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P
|
||||
}
|
||||
|
||||
// From this point, pod and container must be non-nil.
|
||||
gracePeriod := setTerminationGracePeriod(pod, containerSpec, containerName, containerID, reason)
|
||||
gracePeriod := setTerminationGracePeriod(ctx, pod, containerSpec, containerName, containerID, reason)
|
||||
|
||||
if len(message) == 0 {
|
||||
message = fmt.Sprintf("Stopping container %s", containerSpec.Name)
|
||||
}
|
||||
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message)
|
||||
m.recordContainerEvent(ctx, pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message)
|
||||
|
||||
if gracePeriodOverride != nil {
|
||||
gracePeriod = *gracePeriodOverride
|
||||
klog.V(3).InfoS("Killing container with a grace period override", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.V(3).Info("Killing container with a grace period override", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
|
||||
}
|
||||
|
||||
@@ -855,16 +863,16 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P
|
||||
gracePeriod = minimumGracePeriodInSeconds
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.V(2).Info("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
|
||||
|
||||
err := m.runtimeService.StopContainer(ctx, containerID.ID, gracePeriod)
|
||||
if err != nil && !crierror.IsNotFound(err) {
|
||||
klog.ErrorS(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.Error(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod)
|
||||
return err
|
||||
}
|
||||
klog.V(3).InfoS("Container exited normally", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
logger.V(3).Info("Container exited normally", "pod", klog.KObj(pod), "podUID", pod.UID,
|
||||
"containerName", containerName, "containerID", containerID.String())
|
||||
|
||||
if ordering != nil {
|
||||
@@ -876,6 +884,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P
|
||||
|
||||
// killContainersWithSyncResult kills all pod's containers with sync results.
|
||||
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
|
||||
logger := klog.FromContext(ctx)
|
||||
containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers))
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
@@ -897,7 +906,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con
|
||||
if err := m.killContainer(ctx, pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride, termOrdering); err != nil {
|
||||
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
|
||||
// Use runningPod for logging as the pod passed in could be *nil*.
|
||||
klog.ErrorS(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID,
|
||||
logger.Error(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID,
|
||||
"containerName", container.Name, "containerID", container.ID)
|
||||
}
|
||||
containerResults <- killContainerResult
|
||||
@@ -917,6 +926,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con
|
||||
// present. This reduces load on the container garbage collector by only
|
||||
// preserving the most recent terminated init container.
|
||||
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// only the last execution of each init container should be preserved, and only preserve it if it is in the
|
||||
// list of init containers to keep.
|
||||
initContainerNames := sets.New[string]()
|
||||
@@ -940,7 +950,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.C
|
||||
continue
|
||||
}
|
||||
// prune all other init containers that match this container name
|
||||
klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
|
||||
logger.V(4).Info("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
|
||||
if err := m.removeContainer(ctx, status.ID.ID); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
continue
|
||||
@@ -953,6 +963,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.C
|
||||
// of the container because it assumes all init containers have been stopped
|
||||
// before the call happens.
|
||||
func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
logger := klog.FromContext(ctx)
|
||||
initContainerNames := sets.New[string]()
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
initContainerNames.Insert(container.Name)
|
||||
@@ -965,7 +976,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod
|
||||
}
|
||||
count++
|
||||
// Purge all init containers that match this container name
|
||||
klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
|
||||
logger.V(4).Info("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count)
|
||||
if err := m.removeContainer(ctx, status.ID.ID); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
continue
|
||||
@@ -1003,7 +1014,8 @@ func hasAnyRegularContainerCreated(pod *v1.Pod, podStatus *kubecontainer.PodStat
|
||||
// - Start the first init container that has not been started.
|
||||
// - Restart all restartable init containers that have started but are not running.
|
||||
// - Kill the restartable init containers that are not alive or started.
|
||||
func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus, changes *podActions) bool {
|
||||
func (m *kubeGenericRuntimeManager) computeInitContainerActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, changes *podActions) bool {
|
||||
logger := klog.FromContext(ctx)
|
||||
if len(pod.Spec.InitContainers) == 0 {
|
||||
return true
|
||||
}
|
||||
@@ -1053,7 +1065,7 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
||||
for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- {
|
||||
container := &pod.Spec.InitContainers[i]
|
||||
status := podStatus.FindContainerStatusByName(container.Name)
|
||||
klog.V(4).InfoS("Computing init container action", "pod", klog.KObj(pod), "container", container.Name, "status", status)
|
||||
logger.V(4).Info("Computing init container action", "pod", klog.KObj(pod), "container", container.Name, "status", status)
|
||||
if status == nil {
|
||||
// If the container is previously initialized but its status is not
|
||||
// found, it means its last status is removed for some reason.
|
||||
@@ -1107,7 +1119,7 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
|
||||
logger.V(4).Info("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
|
||||
if i == (len(pod.Spec.InitContainers) - 1) {
|
||||
podHasInitialized = true
|
||||
} else if !isPreviouslyInitialized {
|
||||
@@ -1150,7 +1162,7 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
||||
}
|
||||
}
|
||||
|
||||
if !m.computePodResizeAction(pod, i, true, status, changes) {
|
||||
if !m.computePodResizeAction(ctx, pod, i, true, status, changes) {
|
||||
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
|
||||
break
|
||||
}
|
||||
@@ -1175,7 +1187,7 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
||||
break
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
|
||||
logger.V(4).Info("Init container has been initialized", "pod", klog.KObj(pod), "container", container.Name)
|
||||
if i == (len(pod.Spec.InitContainers) - 1) {
|
||||
podHasInitialized = true
|
||||
} else {
|
||||
@@ -1197,7 +1209,7 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
||||
} else { // init container
|
||||
if !isInitContainerFailed(status) {
|
||||
klog.V(4).InfoS("This should not happen, init container is in unknown state but not failed", "pod", klog.KObj(pod), "containerStatus", status)
|
||||
logger.V(4).Info("This should not happen, init container is in unknown state but not failed", "pod", klog.KObj(pod), "containerStatus", status)
|
||||
}
|
||||
|
||||
if !restartOnFailure {
|
||||
@@ -1244,9 +1256,10 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
||||
|
||||
// GetContainerLogs returns logs of a specific container.
|
||||
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("Failed to get container status", "containerID", containerID.String(), "err", err)
|
||||
logger.V(4).Info("Failed to get container status", "containerID", containerID.String(), "err", err)
|
||||
return fmt.Errorf("unable to retrieve container logs for %v", containerID.String())
|
||||
}
|
||||
status := resp.GetStatus()
|
||||
@@ -1306,7 +1319,8 @@ func (m *kubeGenericRuntimeManager) RunInContainer(ctx context.Context, id kubec
|
||||
// Notice that we assume that the container should only be removed in non-running state, and
|
||||
// it will not write container logs anymore in that state.
|
||||
func (m *kubeGenericRuntimeManager) removeContainer(ctx context.Context, containerID string) error {
|
||||
klog.V(4).InfoS("Removing container", "containerID", containerID)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Removing container", "containerID", containerID)
|
||||
// Call internal container post-stop lifecycle hook.
|
||||
if err := m.internalLifecycle.PostStopContainer(containerID); err != nil {
|
||||
return err
|
||||
@@ -1339,7 +1353,7 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(ctx context.Context, cont
|
||||
}
|
||||
// Remove the legacy container log symlink.
|
||||
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
|
||||
labeledInfo := getContainerInfoFromLabels(status.Labels)
|
||||
labeledInfo := getContainerInfoFromLabels(ctx, status.Labels)
|
||||
legacySymlink := legacyLogSymlink(containerID, labeledInfo.ContainerName, labeledInfo.PodName,
|
||||
labeledInfo.PodNamespace)
|
||||
if err := m.osInterface.Remove(legacySymlink); err != nil && !os.IsNotExist(err) {
|
||||
@@ -1355,7 +1369,7 @@ func (m *kubeGenericRuntimeManager) DeleteContainer(ctx context.Context, contain
|
||||
}
|
||||
|
||||
// setTerminationGracePeriod determines the grace period to use when killing a container
|
||||
func setTerminationGracePeriod(pod *v1.Pod, containerSpec *v1.Container, containerName string, containerID kubecontainer.ContainerID, reason containerKillReason) int64 {
|
||||
func setTerminationGracePeriod(ctx context.Context, pod *v1.Pod, containerSpec *v1.Container, containerName string, containerID kubecontainer.ContainerID, reason containerKillReason) int64 {
|
||||
gracePeriod := int64(minimumGracePeriodInSeconds)
|
||||
switch {
|
||||
case pod.DeletionGracePeriodSeconds != nil:
|
||||
@@ -1363,11 +1377,11 @@ func setTerminationGracePeriod(pod *v1.Pod, containerSpec *v1.Container, contain
|
||||
case pod.Spec.TerminationGracePeriodSeconds != nil:
|
||||
switch reason {
|
||||
case reasonStartupProbe:
|
||||
if isProbeTerminationGracePeriodSecondsSet(pod, containerSpec, containerSpec.StartupProbe, containerName, containerID, "StartupProbe") {
|
||||
if isProbeTerminationGracePeriodSecondsSet(ctx, pod, containerSpec, containerSpec.StartupProbe, containerName, containerID, "StartupProbe") {
|
||||
return *containerSpec.StartupProbe.TerminationGracePeriodSeconds
|
||||
}
|
||||
case reasonLivenessProbe:
|
||||
if isProbeTerminationGracePeriodSecondsSet(pod, containerSpec, containerSpec.LivenessProbe, containerName, containerID, "LivenessProbe") {
|
||||
if isProbeTerminationGracePeriodSecondsSet(ctx, pod, containerSpec, containerSpec.LivenessProbe, containerName, containerID, "LivenessProbe") {
|
||||
return *containerSpec.LivenessProbe.TerminationGracePeriodSeconds
|
||||
}
|
||||
}
|
||||
@@ -1376,10 +1390,11 @@ func setTerminationGracePeriod(pod *v1.Pod, containerSpec *v1.Container, contain
|
||||
return gracePeriod
|
||||
}
|
||||
|
||||
func isProbeTerminationGracePeriodSecondsSet(pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool {
|
||||
func isProbeTerminationGracePeriodSecondsSet(ctx context.Context, pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool {
|
||||
logger := klog.FromContext(ctx)
|
||||
if probe != nil && probe.TerminationGracePeriodSeconds != nil {
|
||||
if *probe.TerminationGracePeriodSeconds > *pod.Spec.TerminationGracePeriodSeconds {
|
||||
klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probeType", probeType, "probeGracePeriod", *probe.TerminationGracePeriodSeconds, "podGracePeriod", *pod.Spec.TerminationGracePeriodSeconds)
|
||||
logger.V(4).Info("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probeType", probeType, "probeGracePeriod", *probe.TerminationGracePeriodSeconds, "podGracePeriod", *pod.Spec.TerminationGracePeriodSeconds)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -52,14 +53,14 @@ import (
|
||||
var defaultPageSize = int64(os.Getpagesize())
|
||||
|
||||
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(ctx context.Context, config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
|
||||
enforceMemoryQoS := false
|
||||
// Set memory.min and memory.high if MemoryQoS enabled with cgroups v2
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
|
||||
isCgroup2UnifiedMode() {
|
||||
enforceMemoryQoS = true
|
||||
}
|
||||
cl, err := m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget, enforceMemoryQoS)
|
||||
cl, err := m.generateLinuxContainerConfig(ctx, container, pod, uid, username, nsTarget, enforceMemoryQoS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,13 +78,13 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config
|
||||
}
|
||||
|
||||
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) (*runtimeapi.LinuxContainerConfig, error) {
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) (*runtimeapi.LinuxContainerConfig, error) {
|
||||
sc, err := m.determineEffectiveSecurityContext(pod, container, uid, username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lc := &runtimeapi.LinuxContainerConfig{
|
||||
Resources: m.generateLinuxContainerResources(pod, container, enforceMemoryQoS),
|
||||
Resources: m.generateLinuxContainerResources(ctx, pod, container, enforceMemoryQoS),
|
||||
SecurityContext: sc,
|
||||
}
|
||||
|
||||
@@ -124,7 +125,8 @@ func getMemoryLimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
|
||||
}
|
||||
|
||||
// generateLinuxContainerResources generates linux container resources config for runtime
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod, container *v1.Container, enforceMemoryQoS bool) *runtimeapi.LinuxContainerResources {
|
||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container, enforceMemoryQoS bool) *runtimeapi.LinuxContainerResources {
|
||||
logger := klog.FromContext(ctx)
|
||||
// set linux container resources
|
||||
var cpuRequest *resource.Quantity
|
||||
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
|
||||
@@ -137,16 +139,16 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
|
||||
// If pod has exclusive cpu and the container in question has integer cpu requests
|
||||
// the cfs quota will not be enforced
|
||||
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.ContainerHasExclusiveCPUs(pod, container)
|
||||
klog.V(5).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
|
||||
logger.V(5).Info("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
|
||||
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit, disableCPUQuota)
|
||||
|
||||
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
||||
int64(m.machineInfo.MemoryCapacity)))
|
||||
|
||||
lcr.HugepageLimits = GetHugepageLimitsFromResources(container.Resources)
|
||||
lcr.HugepageLimits = GetHugepageLimitsFromResources(ctx, container.Resources)
|
||||
|
||||
// Configure swap for the container
|
||||
m.configureContainerSwapResources(lcr, pod, container)
|
||||
m.configureContainerSwapResources(ctx, lcr, pod, container)
|
||||
|
||||
// Set memory.min and memory.high to enforce MemoryQoS
|
||||
if enforceMemoryQoS {
|
||||
@@ -191,7 +193,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
|
||||
lcr.Unified[k] = v
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("MemoryQoS config for container", "pod", klog.KObj(pod), "containerName", container.Name, "unified", unified)
|
||||
logger.V(4).Info("MemoryQoS config for container", "pod", klog.KObj(pod), "containerName", container.Name, "unified", unified)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,21 +202,21 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
|
||||
|
||||
// configureContainerSwapResources configures the swap resources for a specified (linux) container.
|
||||
// Swap is only configured if a swap cgroup controller is available and the NodeSwap feature gate is enabled.
|
||||
func (m *kubeGenericRuntimeManager) configureContainerSwapResources(lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) {
|
||||
if !swapControllerAvailable() {
|
||||
func (m *kubeGenericRuntimeManager) configureContainerSwapResources(ctx context.Context, lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) {
|
||||
if !m.getSwapControllerAvailable() {
|
||||
return
|
||||
}
|
||||
|
||||
swapConfigurationHelper := newSwapConfigurationHelper(*m.machineInfo)
|
||||
swapConfigurationHelper := newSwapConfigurationHelper(*m.machineInfo, m.getSwapControllerAvailable)
|
||||
// NOTE(ehashman): Behavior is defined in the opencontainers runtime spec:
|
||||
// https://github.com/opencontainers/runtime-spec/blob/1c3f411f041711bbeecf35ff7e93461ea6789220/config-linux.md#memory
|
||||
switch m.GetContainerSwapBehavior(pod, container) {
|
||||
case types.NoSwap:
|
||||
swapConfigurationHelper.ConfigureNoSwap(lcr)
|
||||
swapConfigurationHelper.ConfigureNoSwap(ctx, lcr)
|
||||
case types.LimitedSwap:
|
||||
swapConfigurationHelper.ConfigureLimitedSwap(lcr, pod, container)
|
||||
swapConfigurationHelper.ConfigureLimitedSwap(ctx, lcr, pod, container)
|
||||
default:
|
||||
swapConfigurationHelper.ConfigureNoSwap(lcr)
|
||||
swapConfigurationHelper.ConfigureNoSwap(ctx, lcr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +225,7 @@ func (m *kubeGenericRuntimeManager) configureContainerSwapResources(lcr *runtime
|
||||
func (m *kubeGenericRuntimeManager) GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) types.SwapBehavior {
|
||||
c := types.SwapBehavior(m.memorySwapBehavior)
|
||||
if c == types.LimitedSwap {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeSwap) || !swapControllerAvailable() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeSwap) || !m.getSwapControllerAvailable() {
|
||||
return types.NoSwap
|
||||
}
|
||||
|
||||
@@ -246,7 +248,7 @@ func (m *kubeGenericRuntimeManager) GetContainerSwapBehavior(pod *v1.Pod, contai
|
||||
}
|
||||
|
||||
// generateContainerResources generates platform specific (linux) container resources config for runtime
|
||||
func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||
func (m *kubeGenericRuntimeManager) generateContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||
enforceMemoryQoS := false
|
||||
// Set memory.min and memory.high if MemoryQoS enabled with cgroups v2
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
|
||||
@@ -254,7 +256,7 @@ func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, cont
|
||||
enforceMemoryQoS = true
|
||||
}
|
||||
return &runtimeapi.ContainerResources{
|
||||
Linux: m.generateLinuxContainerResources(pod, container, enforceMemoryQoS),
|
||||
Linux: m.generateLinuxContainerResources(ctx, pod, container, enforceMemoryQoS),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,7 +323,8 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit
|
||||
}
|
||||
|
||||
// GetHugepageLimitsFromResources returns limits of each hugepages from resources.
|
||||
func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtimeapi.HugepageLimit {
|
||||
func GetHugepageLimitsFromResources(ctx context.Context, resources v1.ResourceRequirements) []*runtimeapi.HugepageLimit {
|
||||
logger := klog.FromContext(ctx)
|
||||
var hugepageLimits []*runtimeapi.HugepageLimit
|
||||
|
||||
// For each page size, limit to 0.
|
||||
@@ -340,13 +343,13 @@ func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtim
|
||||
|
||||
pageSize, err := v1helper.HugePageSizeFromResourceName(resourceObj)
|
||||
if err != nil {
|
||||
klog.InfoS("Failed to get hugepage size from resource", "object", resourceObj, "err", err)
|
||||
logger.Info("Failed to get hugepage size from resource", "object", resourceObj, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize.Value())
|
||||
if err != nil {
|
||||
klog.InfoS("Size is invalid", "object", resourceObj, "err", err)
|
||||
logger.Info("Size is invalid", "object", resourceObj, "err", err)
|
||||
continue
|
||||
}
|
||||
requiredHugepageLimits[sizeString] = uint64(amountObj.Value())
|
||||
@@ -399,21 +402,21 @@ var isCgroup2UnifiedMode = func() bool {
|
||||
return libcontainercgroups.IsCgroup2UnifiedMode()
|
||||
}
|
||||
|
||||
// Note: this function variable is being added here so it would be possible to mock
|
||||
// the swap controller availability for unit tests by assigning a new function to it. Without it,
|
||||
// the swap controller availability would solely depend on the environment running the test.
|
||||
var swapControllerAvailable = sync.OnceValue(func() bool {
|
||||
// checkSwapControllerAvailability checks if swap controller is available.
|
||||
// It returns true if the swap controller is available, false otherwise.
|
||||
func checkSwapControllerAvailability(ctx context.Context) bool {
|
||||
// See https://github.com/containerd/containerd/pull/7838/
|
||||
logger := klog.FromContext(ctx)
|
||||
const warn = "Failed to detect the availability of the swap controller, assuming not available"
|
||||
p := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"
|
||||
if isCgroup2UnifiedMode() {
|
||||
// memory.swap.max does not exist in the cgroup root, so we check /sys/fs/cgroup/<SELF>/memory.swap.max
|
||||
cm, err := libcontainercgroups.ParseCgroupFile("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
klog.V(5).ErrorS(fmt.Errorf("failed to parse /proc/self/cgroup: %w", err), warn)
|
||||
logger.V(5).Error(fmt.Errorf("failed to parse /proc/self/cgroup: %w", err), warn)
|
||||
return false
|
||||
}
|
||||
// Fr cgroup v2 unified hierarchy, there are no per-controller
|
||||
// For cgroup v2 unified hierarchy, there are no per-controller
|
||||
// cgroup paths, so the cm map returned by ParseCgroupFile above
|
||||
// has a single element where the key is empty string ("") and
|
||||
// the value is the cgroup path the <pid> is in.
|
||||
@@ -421,37 +424,50 @@ var swapControllerAvailable = sync.OnceValue(func() bool {
|
||||
}
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
klog.V(5).ErrorS(err, warn)
|
||||
logger.V(5).Error(err, warn)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// initSwapControllerAvailabilityCheck returns a function that checks swap controller availability
|
||||
// with lazy initialization using sync.OnceValue
|
||||
func initSwapControllerAvailabilityCheck(ctx context.Context) func() bool {
|
||||
return sync.OnceValue(func() bool {
|
||||
return checkSwapControllerAvailability(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
type swapConfigurationHelper struct {
|
||||
machineInfo cadvisorv1.MachineInfo
|
||||
machineInfo cadvisorv1.MachineInfo
|
||||
getSwapControllerAvailable func() bool
|
||||
}
|
||||
|
||||
func newSwapConfigurationHelper(machineInfo cadvisorv1.MachineInfo) *swapConfigurationHelper {
|
||||
return &swapConfigurationHelper{machineInfo: machineInfo}
|
||||
func newSwapConfigurationHelper(machineInfo cadvisorv1.MachineInfo, getSwapControllerAvailable func() bool) *swapConfigurationHelper {
|
||||
return &swapConfigurationHelper{
|
||||
machineInfo: machineInfo,
|
||||
getSwapControllerAvailable: getSwapControllerAvailable,
|
||||
}
|
||||
}
|
||||
|
||||
func (m swapConfigurationHelper) ConfigureLimitedSwap(lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) {
|
||||
func (m swapConfigurationHelper) ConfigureLimitedSwap(ctx context.Context, lcr *runtimeapi.LinuxContainerResources, pod *v1.Pod, container *v1.Container) {
|
||||
logger := klog.FromContext(ctx)
|
||||
containerMemoryRequest := container.Resources.Requests.Memory()
|
||||
swapLimit, err := calcSwapForBurstablePods(containerMemoryRequest.Value(), int64(m.machineInfo.MemoryCapacity), int64(m.machineInfo.SwapCapacity))
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "cannot calculate swap allocation amount; disallowing swap")
|
||||
m.ConfigureNoSwap(lcr)
|
||||
logger.Error(err, "Cannot calculate swap allocation amount; disallowing swap")
|
||||
m.ConfigureNoSwap(ctx, lcr)
|
||||
return
|
||||
}
|
||||
|
||||
m.configureSwap(lcr, swapLimit)
|
||||
m.configureSwap(ctx, lcr, swapLimit)
|
||||
}
|
||||
|
||||
func (m swapConfigurationHelper) ConfigureNoSwap(lcr *runtimeapi.LinuxContainerResources) {
|
||||
func (m swapConfigurationHelper) ConfigureNoSwap(ctx context.Context, lcr *runtimeapi.LinuxContainerResources) {
|
||||
if !isCgroup2UnifiedMode() {
|
||||
if swapControllerAvailable() {
|
||||
if m.getSwapControllerAvailable() {
|
||||
// memorySwapLimit = total permitted memory+swap; if equal to memory limit, => 0 swap above memory limit
|
||||
// Some swapping is still possible.
|
||||
// Note that if memory limit is 0, memory swap limit is ignored.
|
||||
@@ -460,12 +476,13 @@ func (m swapConfigurationHelper) ConfigureNoSwap(lcr *runtimeapi.LinuxContainerR
|
||||
return
|
||||
}
|
||||
|
||||
m.configureSwap(lcr, 0)
|
||||
m.configureSwap(ctx, lcr, 0)
|
||||
}
|
||||
|
||||
func (m swapConfigurationHelper) configureSwap(lcr *runtimeapi.LinuxContainerResources, swapMemory int64) {
|
||||
func (m swapConfigurationHelper) configureSwap(ctx context.Context, lcr *runtimeapi.LinuxContainerResources, swapMemory int64) {
|
||||
logger := klog.FromContext(ctx)
|
||||
if !isCgroup2UnifiedMode() {
|
||||
klog.ErrorS(fmt.Errorf("swap configuration is not supported with cgroup v1"), "swap configuration under cgroup v1 is unexpected")
|
||||
logger.Error(fmt.Errorf("swap configuration is not supported with cgroup v1"), "Swap configuration under cgroup v1 is unexpected")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -46,21 +46,21 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig {
|
||||
ctx := context.Background()
|
||||
func makeExpectedConfig(t *testing.T, tCtx context.Context, m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig {
|
||||
container := &pod.Spec.Containers[containerIndex]
|
||||
podIP := ""
|
||||
restartCount := 0
|
||||
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, []string{podIP}, nil)
|
||||
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(tCtx, pod, container, podIP, []string{podIP}, nil)
|
||||
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
|
||||
stopsignal := getContainerConfigStopSignal(container)
|
||||
restartCountUint32 := uint32(restartCount)
|
||||
envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
|
||||
|
||||
l, _ := m.generateLinuxContainerConfig(container, pod, new(int64), "", nil, enforceMemoryQoS)
|
||||
l, _ := m.generateLinuxContainerConfig(tCtx, container, pod, new(int64), "", nil, enforceMemoryQoS)
|
||||
|
||||
expectedConfig := &runtimeapi.ContainerConfig{
|
||||
Metadata: &runtimeapi.ContainerMetadata{
|
||||
@@ -72,7 +72,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
|
||||
Args: []string(nil),
|
||||
WorkingDir: container.WorkingDir,
|
||||
Labels: newContainerLabels(container, pod),
|
||||
Annotations: newContainerAnnotations(container, pod, restartCount, opts),
|
||||
Annotations: newContainerAnnotations(tCtx, container, pod, restartCount, opts),
|
||||
Devices: makeDevices(opts),
|
||||
Mounts: m.makeMounts(opts, container),
|
||||
LogPath: containerLogsPath,
|
||||
@@ -90,8 +90,8 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
|
||||
}
|
||||
|
||||
func TestGenerateContainerConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, imageService, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, imageService, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
runAsUser := int64(1000)
|
||||
@@ -119,8 +119,8 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expectedConfig := makeExpectedConfig(m, pod, 0, false)
|
||||
containerConfig, _, err := m.generateContainerConfig(ctx, &pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil, nil)
|
||||
expectedConfig := makeExpectedConfig(t, tCtx, m, pod, 0, false)
|
||||
containerConfig, _, err := m.generateContainerConfig(tCtx, &pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
|
||||
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
|
||||
@@ -151,11 +151,11 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil, nil)
|
||||
_, _, err = m.generateContainerConfig(tCtx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
imageID, _ := imageService.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
resp, _ := imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: imageID}, false)
|
||||
imageID, _ := imageService.PullImage(tCtx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
resp, _ := imageService.ImageStatus(tCtx, &runtimeapi.ImageSpec{Image: imageID}, false)
|
||||
|
||||
resp.Image.Uid = nil
|
||||
resp.Image.Username = "test"
|
||||
@@ -163,12 +163,13 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
|
||||
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue
|
||||
|
||||
_, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil, nil)
|
||||
_, _, err = m.generateContainerConfig(tCtx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil, nil)
|
||||
assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username")
|
||||
}
|
||||
|
||||
func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
m.cpuCFSQuota = true
|
||||
|
||||
assert.NoError(t, err)
|
||||
@@ -262,7 +263,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
||||
pod.Spec.Resources = test.podResources
|
||||
}
|
||||
|
||||
linuxConfig, err := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false)
|
||||
linuxConfig, err := m.generateLinuxContainerConfig(tCtx, &pod.Spec.Containers[0], pod, new(int64), "", nil, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name)
|
||||
assert.Equal(t, test.expected.CpuQuota, linuxConfig.GetResources().CpuQuota, test.name)
|
||||
@@ -272,7 +273,8 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalculateLinuxResources(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
m.cpuCFSQuota = true
|
||||
|
||||
assert.NoError(t, err)
|
||||
@@ -421,7 +423,8 @@ func TestCalculateLinuxResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
podRequestMemory := resource.MustParse("128Mi")
|
||||
@@ -490,8 +493,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
memoryLow int64
|
||||
memoryHigh int64
|
||||
}
|
||||
l1, _ := m.generateLinuxContainerConfig(&pod1.Spec.Containers[0], pod1, new(int64), "", nil, true)
|
||||
l2, _ := m.generateLinuxContainerConfig(&pod2.Spec.Containers[0], pod2, new(int64), "", nil, true)
|
||||
l1, _ := m.generateLinuxContainerConfig(tCtx, &pod1.Spec.Containers[0], pod1, new(int64), "", nil, true)
|
||||
l2, _ := m.generateLinuxContainerConfig(tCtx, &pod2.Spec.Containers[0], pod2, new(int64), "", nil, true)
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
@@ -518,7 +521,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
linuxConfig, err := m.generateLinuxContainerConfig(&test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true)
|
||||
linuxConfig, err := m.generateLinuxContainerConfig(tCtx, &test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expected.containerConfig, linuxConfig, test.name)
|
||||
assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.min"], strconv.FormatInt(test.expected.memoryLow, 10), test.name)
|
||||
@@ -527,6 +530,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetHugepageLimitsFromResources(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
var baseHugepage []*runtimeapi.HugepageLimit
|
||||
|
||||
// For each page size, limit to 0.
|
||||
@@ -672,7 +676,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
results := GetHugepageLimitsFromResources(test.resources)
|
||||
results := GetHugepageLimitsFromResources(tCtx, test.resources)
|
||||
if !reflect.DeepEqual(expectedHugepages, results) {
|
||||
t.Errorf("%s test failed. Expected %v but got %v", test.name, expectedHugepages, results)
|
||||
}
|
||||
@@ -684,7 +688,8 @@ func TestGetHugepageLimitsFromResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating test RuntimeManager: %v", err)
|
||||
}
|
||||
@@ -741,7 +746,7 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false)
|
||||
got, err := m.generateLinuxContainerConfig(tCtx, &tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false)
|
||||
assert.NoError(t, err)
|
||||
if !proto.Equal(tc.want, got.SecurityContext.NamespaceOptions) {
|
||||
t.Errorf("%v: want %q, got %q", t.Name(), tc.want, got.SecurityContext.NamespaceOptions)
|
||||
@@ -757,7 +762,8 @@ var (
|
||||
)
|
||||
|
||||
func TestGenerateLinuxConfigSupplementalGroupsPolicy(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating test RuntimeManager: %v", err)
|
||||
}
|
||||
@@ -823,7 +829,7 @@ func TestGenerateLinuxConfigSupplementalGroupsPolicy(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", nil, false)
|
||||
actual, err := m.generateLinuxContainerConfig(tCtx, &tc.pod.Spec.Containers[0], tc.pod, nil, "", nil, false)
|
||||
if !tc.expectErr {
|
||||
assert.Emptyf(t, err, "Unexpected error")
|
||||
assert.EqualValuesf(t, tc.expected, actual.SecurityContext.SupplementalGroupsPolicy, "SupplementalGroupPolicy for %s", tc.name)
|
||||
@@ -837,7 +843,8 @@ func TestGenerateLinuxConfigSupplementalGroupsPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateLinuxContainerResources(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
|
||||
|
||||
@@ -915,14 +922,14 @@ func TestGenerateLinuxContainerResources(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("cgroup%s:%s", tc.cgroupVersion, tc.name), func(t *testing.T) {
|
||||
defer setSwapControllerAvailableDuringTest(false)()
|
||||
setCgroupVersionDuringTest(tc.cgroupVersion)
|
||||
m.getSwapControllerAvailable = func() bool { return false }
|
||||
|
||||
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{Limits: tc.limits, Requests: tc.requests}
|
||||
|
||||
m.singleProcessOOMKill = ptr.To(tc.singleProcessOOMKill)
|
||||
|
||||
resources := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[0], false)
|
||||
resources := m.generateLinuxContainerResources(tCtx, pod, &pod.Spec.Containers[0], false)
|
||||
tc.expected.HugepageLimits = resources.HugepageLimits
|
||||
assert.Equal(t, tc.expected, resources)
|
||||
})
|
||||
@@ -930,7 +937,8 @@ func TestGenerateLinuxContainerResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetContainerSwapBehavior(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1041,7 +1049,7 @@ func TestGetContainerSwapBehavior(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m.memorySwapBehavior = string(tt.configuredMemorySwap)
|
||||
setCgroupVersionDuringTest(tt.cgroupVersion)
|
||||
defer setSwapControllerAvailableDuringTest(tt.isSwapControllerAvailable)()
|
||||
m.getSwapControllerAvailable = func() bool { return tt.isSwapControllerAvailable }
|
||||
testpod := pod.DeepCopy()
|
||||
testpod.Status.QOSClass = tt.qosClass
|
||||
if tt.containerResourceOverride != nil {
|
||||
@@ -1053,7 +1061,8 @@ func TestGetContainerSwapBehavior(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
m.machineInfo.MemoryCapacity = 42949672960 // 40Gb == 40 * 1024^3
|
||||
m.machineInfo.SwapCapacity = 5368709120 // 5Gb == 5 * 1024^3
|
||||
@@ -1283,7 +1292,7 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) {
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
setCgroupVersionDuringTest(tc.cgroupVersion)
|
||||
defer setSwapControllerAvailableDuringTest(!tc.swapDisabledOnNode)()
|
||||
m.getSwapControllerAvailable = func() bool { return !tc.swapDisabledOnNode }
|
||||
m.memorySwapBehavior = string(tc.swapBehavior)
|
||||
|
||||
var resourceReqsC1, resourceReqsC2 v1.ResourceRequirements
|
||||
@@ -1320,8 +1329,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) {
|
||||
assert.True(t, types.IsCriticalPod(pod), "pod is expected to be critical")
|
||||
}
|
||||
|
||||
resourcesC1 := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[0], false)
|
||||
resourcesC2 := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[1], false)
|
||||
resourcesC1 := m.generateLinuxContainerResources(tCtx, pod, &pod.Spec.Containers[0], false)
|
||||
resourcesC2 := m.generateLinuxContainerResources(tCtx, pod, &pod.Spec.Containers[1], false)
|
||||
|
||||
if tc.swapDisabledOnNode {
|
||||
expectSwapDisabled(tc.cgroupVersion, resourcesC1, resourcesC2)
|
||||
@@ -1351,7 +1360,8 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateUpdatePodSandboxResourcesRequest(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
podRequestCPU := resource.MustParse("400m")
|
||||
@@ -1736,7 +1746,7 @@ func TestGenerateUpdatePodSandboxResourcesRequest(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
expectedLcr := m.calculateSandboxResources(tc.pod)
|
||||
expectedLcr := m.calculateSandboxResources(tCtx, tc.pod)
|
||||
expectedLcrOverhead := m.convertOverheadToLinuxResources(tc.pod)
|
||||
|
||||
podResourcesCfg := cm.ResourceConfigForPod(tc.pod, tc.enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
|
||||
@@ -1756,7 +1766,8 @@ func TestGenerateUpdatePodSandboxResourcesRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdatePodSandboxResources(t *testing.T) {
|
||||
fakeRuntime, _, m, errCreate := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, errCreate := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, errCreate)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1779,13 +1790,12 @@ func TestUpdatePodSandboxResources(t *testing.T) {
|
||||
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
assert.Len(t, fakeContainers, 1)
|
||||
|
||||
ctx := context.Background()
|
||||
_, _, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace, "")
|
||||
_, _, err := m.getPodContainerStatuses(tCtx, pod.UID, pod.Name, pod.Namespace, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
resourceConfig := &cm.ResourceConfig{}
|
||||
|
||||
err = m.updatePodSandboxResources(fakeSandbox.Id, pod, resourceConfig)
|
||||
err = m.updatePodSandboxResources(tCtx, fakeSandbox.Id, pod, resourceConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify sandbox is updated
|
||||
@@ -1804,14 +1814,3 @@ func setCgroupVersionDuringTest(version CgroupVersion) {
|
||||
return version == cgroupV2
|
||||
}
|
||||
}
|
||||
|
||||
func setSwapControllerAvailableDuringTest(available bool) func() {
|
||||
original := swapControllerAvailable
|
||||
swapControllerAvailable = func() bool {
|
||||
return available
|
||||
}
|
||||
|
||||
return func() {
|
||||
swapControllerAvailable = original
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -38,6 +37,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@@ -47,8 +47,8 @@ import (
|
||||
|
||||
// TestRemoveContainer tests removing the container and its corresponding container logs.
|
||||
func TestRemoveContainer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -86,7 +86,7 @@ func TestRemoveContainer(t *testing.T) {
|
||||
fakeOS.Create(expectedContainerLogPath)
|
||||
fakeOS.Create(expectedContainerLogPathRotated)
|
||||
|
||||
err = m.removeContainer(ctx, containerID)
|
||||
err = m.removeContainer(tCtx, containerID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify container log is removed.
|
||||
@@ -96,14 +96,15 @@ func TestRemoveContainer(t *testing.T) {
|
||||
fakeOS.Removes)
|
||||
// Verify container is removed
|
||||
assert.Contains(t, fakeRuntime.Called, "RemoveContainer")
|
||||
containers, err := fakeRuntime.ListContainers(ctx, &runtimeapi.ContainerFilter{Id: containerID})
|
||||
containers, err := fakeRuntime.ListContainers(tCtx, &runtimeapi.ContainerFilter{Id: containerID})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, containers)
|
||||
}
|
||||
|
||||
// TestKillContainer tests killing the container in a Pod.
|
||||
func TestKillContainer(t *testing.T) {
|
||||
_, _, m, _ := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, _ := createTestRuntimeManager(tCtx)
|
||||
|
||||
tests := []struct {
|
||||
caseName string
|
||||
@@ -129,8 +130,8 @@ func TestKillContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
ctx := context.Background()
|
||||
err := m.killContainer(ctx, test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride, nil)
|
||||
tCtx := ktesting.Init(t)
|
||||
err := m.killContainer(tCtx, test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride, nil)
|
||||
if test.succeed != (err == nil) {
|
||||
t.Errorf("%s: expected %v, got %v (%v)", test.caseName, test.succeed, (err == nil), err)
|
||||
}
|
||||
@@ -141,6 +142,7 @@ func TestKillContainer(t *testing.T) {
|
||||
// the internal type (i.e., toKubeContainerStatus()) for containers in
|
||||
// different states.
|
||||
func TestToKubeContainerStatus(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
cid := &kubecontainer.ContainerID{Type: "testRuntime", ID: "dummyid"}
|
||||
meta := &runtimeapi.ContainerMetadata{Name: "cname", Attempt: 3}
|
||||
imageSpec := &runtimeapi.ImageSpec{Image: "fimage"}
|
||||
@@ -229,7 +231,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
actual := toKubeContainerStatus(test.input, cid.Type)
|
||||
actual := toKubeContainerStatus(tCtx, test.input, cid.Type)
|
||||
assert.Equal(t, test.expected, actual, desc)
|
||||
}
|
||||
}
|
||||
@@ -238,6 +240,7 @@ func TestToKubeContainerStatus(t *testing.T) {
|
||||
// the internal type (i.e., toKubeContainerStatus()) for containers that returns Resources.
|
||||
func TestToKubeContainerStatusWithResources(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
tCtx := ktesting.Init(t)
|
||||
cid := &kubecontainer.ContainerID{Type: "testRuntime", ID: "dummyid"}
|
||||
meta := &runtimeapi.ContainerMetadata{Name: "cname", Attempt: 3}
|
||||
imageSpec := &runtimeapi.ImageSpec{Image: "fimage"}
|
||||
@@ -363,7 +366,7 @@ func TestToKubeContainerStatusWithResources(t *testing.T) {
|
||||
// TODO: remove skip once the failing test has been fixed.
|
||||
t.Skip("Skip failing test on Windows.")
|
||||
}
|
||||
actual := toKubeContainerStatus(test.input, cid.Type)
|
||||
actual := toKubeContainerStatus(tCtx, test.input, cid.Type)
|
||||
assert.Equal(t, test.expected, actual, desc)
|
||||
})
|
||||
}
|
||||
@@ -374,6 +377,7 @@ func TestToKubeContainerStatusWithUser(t *testing.T) {
|
||||
t.Skip("Updating Pod Container User is not supported on Windows.")
|
||||
}
|
||||
|
||||
tCtx := ktesting.Init(t)
|
||||
cid := &kubecontainer.ContainerID{Type: "testRuntime", ID: "dummyid"}
|
||||
meta := &runtimeapi.ContainerMetadata{Name: "cname", Attempt: 3}
|
||||
imageSpec := &runtimeapi.ImageSpec{Image: "fimage"}
|
||||
@@ -447,16 +451,16 @@ func TestToKubeContainerStatusWithUser(t *testing.T) {
|
||||
StartedAt: startedAt,
|
||||
User: test.input,
|
||||
}
|
||||
actual := toKubeContainerStatus(cStatus, cid.Type)
|
||||
actual := toKubeContainerStatus(tCtx, cStatus, cid.Type)
|
||||
assert.EqualValues(t, test.expected, actual.User, desc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Container) {
|
||||
|
||||
tCtx := ktesting.Init(t)
|
||||
// Setup
|
||||
fakeRuntime, _, m, _ := createTestRuntimeManager()
|
||||
fakeRuntime, _, m, _ := createTestRuntimeManager(tCtx)
|
||||
|
||||
gracePeriod := int64(30)
|
||||
cID := kubecontainer.ContainerID{
|
||||
@@ -512,9 +516,9 @@ func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Containe
|
||||
|
||||
// Configured and works as expected
|
||||
t.Run("PreStop-CMDExec", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
testContainer.Lifecycle = cmdLifeCycle
|
||||
_ = m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod, nil)
|
||||
_ = m.killContainer(tCtx, testPod, cID, "foo", "testKill", "", &gracePeriod, nil)
|
||||
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
|
||||
t.Errorf("CMD Prestop hook was not invoked")
|
||||
}
|
||||
@@ -523,11 +527,11 @@ func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Containe
|
||||
// Configured and working HTTP hook
|
||||
t.Run("PreStop-HTTPGet", func(t *testing.T) {
|
||||
t.Run("consistent", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
defer func() { fakeHTTP.req = nil }()
|
||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt32(80)
|
||||
testContainer.Lifecycle = httpLifeCycle
|
||||
_ = m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod, nil)
|
||||
_ = m.killContainer(tCtx, testPod, cID, "foo", "testKill", "", &gracePeriod, nil)
|
||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
t.Errorf("HTTP Prestop hook was not invoked")
|
||||
}
|
||||
@@ -536,13 +540,13 @@ func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Containe
|
||||
|
||||
// When there is no time to run PreStopHook
|
||||
t.Run("PreStop-NoTimeToRun", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
gracePeriodLocal := int64(0)
|
||||
|
||||
testPod.DeletionGracePeriodSeconds = &gracePeriodLocal
|
||||
testPod.Spec.TerminationGracePeriodSeconds = &gracePeriodLocal
|
||||
|
||||
_ = m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriodLocal, nil)
|
||||
_ = m.killContainer(tCtx, testPod, cID, "foo", "testKill", "", &gracePeriodLocal, nil)
|
||||
if fakeHTTP.req != nil {
|
||||
t.Errorf("HTTP Prestop hook Should not execute when gracePeriod is 0")
|
||||
}
|
||||
@@ -550,10 +554,10 @@ func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Containe
|
||||
|
||||
// Post Start script
|
||||
t.Run("PostStart-CmdExe", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
// Fake all the things you need before trying to create a container
|
||||
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
|
||||
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
|
||||
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(tCtx, testPod, 0)
|
||||
testContainer.Lifecycle = cmdPostStart
|
||||
fakePodStatus := &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
@@ -570,7 +574,7 @@ func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Containe
|
||||
}
|
||||
|
||||
// Now try to create a container, which should in turn invoke PostStart Hook
|
||||
_, err := m.startContainer(ctx, fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{}, nil)
|
||||
_, err := m.startContainer(tCtx, fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("startContainer error =%v", err)
|
||||
}
|
||||
@@ -770,6 +774,7 @@ func TestRestartCountByLogDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKillContainerGracePeriod(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
shortGracePeriod := int64(10)
|
||||
mediumGracePeriod := int64(30)
|
||||
@@ -927,7 +932,7 @@ func TestKillContainerGracePeriod(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actualGracePeriod := setTerminationGracePeriod(test.pod, &test.pod.Spec.Containers[0], "", kubecontainer.ContainerID{}, test.reason)
|
||||
actualGracePeriod := setTerminationGracePeriod(tCtx, test.pod, &test.pod.Spec.Containers[0], "", kubecontainer.ContainerID{}, test.reason)
|
||||
require.Equal(t, test.expectedGracePeriod, actualGracePeriod)
|
||||
})
|
||||
}
|
||||
@@ -935,7 +940,8 @@ func TestKillContainerGracePeriod(t *testing.T) {
|
||||
|
||||
// TestUpdateContainerResources tests updating a container in a Pod.
|
||||
func TestUpdateContainerResources(t *testing.T) {
|
||||
fakeRuntime, _, m, errCreate := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, errCreate := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, errCreate)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -958,12 +964,11 @@ func TestUpdateContainerResources(t *testing.T) {
|
||||
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
assert.Len(t, fakeContainers, 1)
|
||||
|
||||
ctx := context.Background()
|
||||
cStatus, _, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace, "")
|
||||
cStatus, _, err := m.getPodContainerStatuses(tCtx, pod.UID, pod.Name, pod.Namespace, "")
|
||||
assert.NoError(t, err)
|
||||
containerID := cStatus[0].ID
|
||||
|
||||
err = m.updateContainerResources(pod, &pod.Spec.Containers[0], containerID)
|
||||
err = m.updateContainerResources(tCtx, pod, &pod.Spec.Containers[0], containerID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify container is updated
|
||||
|
||||
@@ -20,6 +20,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
@@ -28,12 +30,12 @@ import (
|
||||
)
|
||||
|
||||
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(ctx context.Context, config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateContainerResources generates platform specific container resources config for runtime
|
||||
func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||
func (m *kubeGenericRuntimeManager) generateContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -53,3 +55,8 @@ func toKubeContainerUser(statusUser *runtimeapi.ContainerUser) *kubecontainer.Co
|
||||
func (m *kubeGenericRuntimeManager) GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) types.SwapBehavior {
|
||||
return types.NoSwap
|
||||
}
|
||||
|
||||
// initSwapControllerAvailabilityCheck returns a function that always returns false on unsupported platforms
|
||||
func initSwapControllerAvailabilityCheck(ctx context.Context) func() bool {
|
||||
return func() bool { return false }
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
@@ -32,8 +34,8 @@ import (
|
||||
)
|
||||
|
||||
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, _ *kubecontainer.ContainerID) error {
|
||||
windowsConfig, err := m.generateWindowsContainerConfig(container, pod, uid, username)
|
||||
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(ctx context.Context, config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, _ *kubecontainer.ContainerID) error {
|
||||
windowsConfig, err := m.generateWindowsContainerConfig(ctx, container, pod, uid, username)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -43,9 +45,9 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config
|
||||
}
|
||||
|
||||
// generateContainerResources generates platform specific (windows) container resources config for runtime
|
||||
func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||
func (m *kubeGenericRuntimeManager) generateContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container) *runtimeapi.ContainerResources {
|
||||
return &runtimeapi.ContainerResources{
|
||||
Windows: m.generateWindowsContainerResources(pod, container),
|
||||
Windows: m.generateWindowsContainerResources(ctx, pod, container),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,14 +57,14 @@ func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(san
|
||||
}
|
||||
|
||||
// generateWindowsContainerResources generates windows container resources config for runtime
|
||||
func (m *kubeGenericRuntimeManager) generateWindowsContainerResources(pod *v1.Pod, container *v1.Container) *runtimeapi.WindowsContainerResources {
|
||||
wcr := m.calculateWindowsResources(container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
|
||||
func (m *kubeGenericRuntimeManager) generateWindowsContainerResources(ctx context.Context, pod *v1.Pod, container *v1.Container) *runtimeapi.WindowsContainerResources {
|
||||
wcr := m.calculateWindowsResources(ctx, container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
|
||||
|
||||
return wcr
|
||||
}
|
||||
|
||||
// calculateWindowsResources will create the windowsContainerResources type based on the provided CPU and memory resource requests, limits
|
||||
func (m *kubeGenericRuntimeManager) calculateWindowsResources(cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.WindowsContainerResources {
|
||||
func (m *kubeGenericRuntimeManager) calculateWindowsResources(ctx context.Context, cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.WindowsContainerResources {
|
||||
resources := runtimeapi.WindowsContainerResources{}
|
||||
|
||||
memLimit := memoryLimit.Value()
|
||||
@@ -100,7 +102,8 @@ func (m *kubeGenericRuntimeManager) calculateWindowsResources(cpuLimit, memoryLi
|
||||
if resources.CpuCount > 0 {
|
||||
if resources.CpuMaximum > 0 {
|
||||
resources.CpuMaximum = 0
|
||||
klog.InfoS("Mutually exclusive options: CPUCount priority > CPUMaximum priority on Windows Server Containers. CPUMaximum should be ignored")
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Mutually exclusive options: CPUCount priority > CPUMaximum priority on Windows Server Containers. CPUMaximum should be ignored")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,9 +116,9 @@ func (m *kubeGenericRuntimeManager) calculateWindowsResources(cpuLimit, memoryLi
|
||||
|
||||
// generateWindowsContainerConfig generates windows container config for kubelet runtime v1.
|
||||
// Refer https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/cri-windows.md.
|
||||
func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) (*runtimeapi.WindowsContainerConfig, error) {
|
||||
func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, uid *int64, username string) (*runtimeapi.WindowsContainerConfig, error) {
|
||||
wc := &runtimeapi.WindowsContainerConfig{
|
||||
Resources: m.generateWindowsContainerResources(pod, container),
|
||||
Resources: m.generateWindowsContainerResources(ctx, pod, container),
|
||||
SecurityContext: &runtimeapi.WindowsContainerSecurityContext{},
|
||||
}
|
||||
|
||||
@@ -188,3 +191,8 @@ func toKubeContainerUser(statusUser *runtimeapi.ContainerUser) *kubecontainer.Co
|
||||
func (m *kubeGenericRuntimeManager) GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) types.SwapBehavior {
|
||||
return types.NoSwap
|
||||
}
|
||||
|
||||
// initSwapControllerAvailabilityCheck returns a function that always returns false on Windows
|
||||
func initSwapControllerAvailabilityCheck(ctx context.Context) func() bool {
|
||||
return func() bool { return false }
|
||||
}
|
||||
|
||||
@@ -30,10 +30,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/winstats"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestApplyPlatformSpecificContainerConfig(t *testing.T) {
|
||||
_, _, fakeRuntimeSvc, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, fakeRuntimeSvc, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
containerConfig := &runtimeapi.ContainerConfig{}
|
||||
@@ -81,7 +83,7 @@ func TestApplyPlatformSpecificContainerConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err = fakeRuntimeSvc.applyPlatformSpecificContainerConfig(containerConfig, &pod.Spec.Containers[0], pod, new(int64), "foo", nil)
|
||||
err = fakeRuntimeSvc.applyPlatformSpecificContainerConfig(tCtx, containerConfig, &pod.Spec.Containers[0], pod, new(int64), "foo", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
limit := int64(3000)
|
||||
@@ -154,7 +156,8 @@ func TestCalculateWindowsResources(t *testing.T) {
|
||||
// TODO: remove skip once the failing test has been fixed.
|
||||
t.Skip("Skip failing test on Windows.")
|
||||
|
||||
_, _, fakeRuntimeSvc, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, fakeRuntimeSvc, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
@@ -192,7 +195,7 @@ func TestCalculateWindowsResources(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
windowsContainerResources := fakeRuntimeSvc.calculateWindowsResources(&test.cpuLim, &test.memLim)
|
||||
windowsContainerResources := fakeRuntimeSvc.calculateWindowsResources(tCtx, &test.cpuLim, &test.memLim)
|
||||
assert.Equal(t, test.expected, windowsContainerResources)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,6 +127,7 @@ func (cgc *containerGC) enforceMaxContainersPerEvictUnit(ctx context.Context, ev
|
||||
|
||||
// removeOldestN removes the oldest toRemove containers and returns the resulting slice.
|
||||
func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containerGCInfo, toRemove int) []containerGCInfo {
|
||||
logger := klog.FromContext(ctx)
|
||||
// Remove from oldest to newest (last to first).
|
||||
numToKeep := len(containers) - toRemove
|
||||
if numToKeep > 0 {
|
||||
@@ -142,12 +143,12 @@ func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containe
|
||||
}
|
||||
message := "Container is in unknown state, try killing it before removal"
|
||||
if err := cgc.manager.killContainer(ctx, nil, id, containers[i].name, message, reasonUnknown, nil, nil); err != nil {
|
||||
klog.ErrorS(err, "Failed to stop container", "containerID", containers[i].id)
|
||||
logger.Error(err, "Failed to stop container", "containerID", containers[i].id)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := cgc.manager.removeContainer(ctx, containers[i].id); err != nil {
|
||||
klog.ErrorS(err, "Failed to remove container", "containerID", containers[i].id)
|
||||
logger.Error(err, "Failed to remove container", "containerID", containers[i].id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,16 +173,17 @@ func (cgc *containerGC) removeOldestNSandboxes(ctx context.Context, sandboxes []
|
||||
|
||||
// removeSandbox removes the sandbox by sandboxID.
|
||||
func (cgc *containerGC) removeSandbox(ctx context.Context, sandboxID string) {
|
||||
klog.V(4).InfoS("Removing sandbox", "sandboxID", sandboxID)
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(4).Info("Removing sandbox", "sandboxID", sandboxID)
|
||||
// In normal cases, kubelet should've already called StopPodSandbox before
|
||||
// GC kicks in. To guard against the rare cases where this is not true, try
|
||||
// stopping the sandbox before removing it.
|
||||
if err := cgc.client.StopPodSandbox(ctx, sandboxID); err != nil {
|
||||
klog.ErrorS(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID)
|
||||
logger.Error(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID)
|
||||
return
|
||||
}
|
||||
if err := cgc.client.RemovePodSandbox(ctx, sandboxID); err != nil {
|
||||
klog.ErrorS(err, "Failed to remove sandbox", "sandboxID", sandboxID)
|
||||
logger.Error(err, "Failed to remove sandbox", "sandboxID", sandboxID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,7 +208,7 @@ func (cgc *containerGC) evictableContainers(ctx context.Context, minAge time.Dur
|
||||
continue
|
||||
}
|
||||
|
||||
labeledInfo := getContainerInfoFromLabels(container.Labels)
|
||||
labeledInfo := getContainerInfoFromLabels(ctx, container.Labels)
|
||||
containerInfo := containerGCInfo{
|
||||
id: container.Id,
|
||||
name: container.Metadata.Name,
|
||||
@@ -326,6 +328,7 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods
|
||||
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories
|
||||
// are evictable if there are no corresponding pods.
|
||||
func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesReady bool) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
osInterface := cgc.manager.osInterface
|
||||
podLogsDirectory := cgc.manager.podLogsDirectory
|
||||
if allSourcesReady {
|
||||
@@ -340,10 +343,10 @@ func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesR
|
||||
if !cgc.podStateProvider.ShouldPodContentBeRemoved(podUID) {
|
||||
continue
|
||||
}
|
||||
klog.V(4).InfoS("Removing pod logs", "podUID", podUID)
|
||||
logger.V(4).Info("Removing pod logs", "podUID", podUID)
|
||||
err := osInterface.RemoveAll(filepath.Join(podLogsDirectory, name))
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to remove pod logs directory", "path", name)
|
||||
logger.Error(err, "Failed to remove pod logs directory", "path", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,11 +361,11 @@ func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesR
|
||||
if err != nil {
|
||||
// TODO: we should handle container not found (i.e. container was deleted) case differently
|
||||
// once https://github.com/kubernetes/kubernetes/issues/63336 is resolved
|
||||
klog.InfoS("Error getting ContainerStatus for containerID", "containerID", containerID, "err", err)
|
||||
logger.Info("Error getting ContainerStatus for containerID", "containerID", containerID, "err", err)
|
||||
} else {
|
||||
status := resp.GetStatus()
|
||||
if status == nil {
|
||||
klog.V(4).InfoS("Container status is nil")
|
||||
logger.V(4).Info("Container status is nil")
|
||||
continue
|
||||
}
|
||||
if status.State != runtimeapi.ContainerState_CONTAINER_EXITED {
|
||||
@@ -377,18 +380,18 @@ func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesR
|
||||
// See https://github.com/kubernetes/kubernetes/issues/52172
|
||||
//
|
||||
// We only remove unhealthy symlink for dead containers
|
||||
klog.V(5).InfoS("Container is still running, not removing symlink", "containerID", containerID, "path", logSymlink)
|
||||
logger.V(5).Info("Container is still running, not removing symlink", "containerID", containerID, "path", logSymlink)
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
klog.V(4).InfoS("Unable to obtain container ID", "err", err)
|
||||
logger.V(4).Info("Unable to obtain container ID", "err", err)
|
||||
}
|
||||
err := osInterface.Remove(logSymlink)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to remove container log dead symlink", "path", logSymlink)
|
||||
logger.Error(err, "Failed to remove container log dead symlink", "path", logSymlink)
|
||||
} else {
|
||||
klog.V(4).InfoS("Removed symlink", "path", logSymlink)
|
||||
logger.V(4).Info("Removed symlink", "path", logSymlink)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -29,10 +28,12 @@ import (
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestSandboxGC(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
|
||||
@@ -160,7 +161,7 @@ func TestSandboxGC(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
podStateProvider.removed = make(map[types.UID]struct{})
|
||||
podStateProvider.terminated = make(map[types.UID]struct{})
|
||||
fakeSandboxes := makeFakePodSandboxes(t, m, test.sandboxes)
|
||||
@@ -176,13 +177,13 @@ func TestSandboxGC(t *testing.T) {
|
||||
fakeRuntime.SetFakeSandboxes(fakeSandboxes)
|
||||
fakeRuntime.SetFakeContainers(fakeContainers)
|
||||
|
||||
err := m.containerGC.evictSandboxes(ctx, test.evictTerminatingPods)
|
||||
err := m.containerGC.evictSandboxes(tCtx, test.evictTerminatingPods)
|
||||
assert.NoError(t, err)
|
||||
realRemain, err := fakeRuntime.ListPodSandbox(ctx, nil)
|
||||
realRemain, err := fakeRuntime.ListPodSandbox(tCtx, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, realRemain, len(test.remain))
|
||||
for _, remain := range test.remain {
|
||||
resp, err := fakeRuntime.PodSandboxStatus(ctx, fakeSandboxes[remain].Id, false)
|
||||
resp, err := fakeRuntime.PodSandboxStatus(tCtx, fakeSandboxes[remain].Id, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, resp.Status)
|
||||
}
|
||||
@@ -203,7 +204,8 @@ func makeGCContainer(podName, containerName string, attempt int, createdAt int64
|
||||
}
|
||||
|
||||
func TestContainerGC(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
|
||||
@@ -388,7 +390,7 @@ func TestContainerGC(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
podStateProvider.removed = make(map[types.UID]struct{})
|
||||
podStateProvider.terminated = make(map[types.UID]struct{})
|
||||
fakeContainers := makeFakeContainers(t, m, test.containers)
|
||||
@@ -405,13 +407,13 @@ func TestContainerGC(t *testing.T) {
|
||||
if test.policy == nil {
|
||||
test.policy = &defaultGCPolicy
|
||||
}
|
||||
err := m.containerGC.evictContainers(ctx, *test.policy, test.allSourcesReady, test.evictTerminatingPods)
|
||||
err := m.containerGC.evictContainers(tCtx, *test.policy, test.allSourcesReady, test.evictTerminatingPods)
|
||||
assert.NoError(t, err)
|
||||
realRemain, err := fakeRuntime.ListContainers(ctx, nil)
|
||||
realRemain, err := fakeRuntime.ListContainers(tCtx, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, realRemain, len(test.remain))
|
||||
for _, remain := range test.remain {
|
||||
resp, err := fakeRuntime.ContainerStatus(ctx, fakeContainers[remain].Id, false)
|
||||
resp, err := fakeRuntime.ContainerStatus(tCtx, fakeContainers[remain].Id, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &fakeContainers[remain].ContainerStatus, resp.Status)
|
||||
}
|
||||
@@ -421,8 +423,8 @@ func TestContainerGC(t *testing.T) {
|
||||
|
||||
// Notice that legacy container symlink is not tested since it may be deprecated soon.
|
||||
func TestPodLogDirectoryGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
|
||||
@@ -450,20 +452,20 @@ func TestPodLogDirectoryGC(t *testing.T) {
|
||||
}
|
||||
|
||||
// allSourcesReady == true, pod log directories without corresponding pod should be removed.
|
||||
err = m.containerGC.evictPodLogsDirectories(ctx, true)
|
||||
err = m.containerGC.evictPodLogsDirectories(tCtx, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, removed, fakeOS.Removes)
|
||||
|
||||
// allSourcesReady == false, pod log directories should not be removed.
|
||||
fakeOS.Removes = []string{}
|
||||
err = m.containerGC.evictPodLogsDirectories(ctx, false)
|
||||
err = m.containerGC.evictPodLogsDirectories(tCtx, false)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, fakeOS.Removes)
|
||||
}
|
||||
|
||||
func TestUnknownStateContainerGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider)
|
||||
@@ -474,13 +476,13 @@ func TestUnknownStateContainerGC(t *testing.T) {
|
||||
})
|
||||
fakeRuntime.SetFakeContainers(fakeContainers)
|
||||
|
||||
err = m.containerGC.evictContainers(ctx, defaultGCPolicy, true, false)
|
||||
err = m.containerGC.evictContainers(tCtx, defaultGCPolicy, true, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, fakeRuntime.GetCalls(), "StopContainer", "RemoveContainer",
|
||||
"container in unknown state should be stopped before being removed")
|
||||
|
||||
remain, err := fakeRuntime.ListContainers(ctx, nil)
|
||||
remain, err := fakeRuntime.ListContainers(tCtx, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, remain)
|
||||
}
|
||||
|
||||
@@ -31,15 +31,16 @@ import (
|
||||
// PullImage pulls an image from the network to local storage using the supplied
|
||||
// secrets if necessary.
|
||||
func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecontainer.ImageSpec, credentials []crededentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *crededentialprovider.TrackedAuthConfig, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
img := image.Image
|
||||
imgSpec := toRuntimeAPIImageSpec(image)
|
||||
|
||||
if len(credentials) == 0 {
|
||||
klog.V(3).InfoS("Pulling image without credentials", "image", img)
|
||||
logger.V(3).Info("Pulling image without credentials", "image", img)
|
||||
|
||||
imageRef, err := m.imageService.PullImage(ctx, imgSpec, nil, podSandboxConfig)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to pull image", "image", img)
|
||||
logger.Error(err, "Failed to pull image", "image", img)
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
@@ -72,9 +73,10 @@ func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecon
|
||||
// GetImageRef gets the ID of the image which has already been in
|
||||
// the local storage. It returns ("", nil) if the image isn't in the local storage.
|
||||
func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubecontainer.ImageSpec) (string, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get image status", "image", image.Image)
|
||||
logger.Error(err, "Failed to get image status", "image", image.Image)
|
||||
return "", err
|
||||
}
|
||||
if resp.Image == nil {
|
||||
@@ -84,9 +86,10 @@ func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubec
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) GetImageSize(ctx context.Context, image kubecontainer.ImageSpec) (uint64, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get image status", "image", image.Image)
|
||||
logger.Error(err, "Failed to get image status", "image", image.Image)
|
||||
return 0, err
|
||||
}
|
||||
if resp.Image == nil {
|
||||
@@ -97,11 +100,12 @@ func (m *kubeGenericRuntimeManager) GetImageSize(ctx context.Context, image kube
|
||||
|
||||
// ListImages gets all images currently on the machine.
|
||||
func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubecontainer.Image, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
var images []kubecontainer.Image
|
||||
|
||||
allImages, err := m.imageService.ListImages(ctx, nil)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list images")
|
||||
logger.Error(err, "Failed to list images")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -113,7 +117,7 @@ func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubeconta
|
||||
// field is empty and log a warning message.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI) {
|
||||
if img.Spec == nil || (img.Spec != nil && img.Spec.RuntimeHandler == "") {
|
||||
klog.V(2).InfoS("WARNING: RuntimeHandler is empty", "ImageID", img.Id)
|
||||
logger.V(2).Info("WARNING: RuntimeHandler is empty", "ImageID", img.Id)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,9 +136,10 @@ func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubeconta
|
||||
|
||||
// RemoveImage removes the specified image.
|
||||
func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubecontainer.ImageSpec) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
err := m.imageService.RemoveImage(ctx, &runtimeapi.ImageSpec{Image: image.Image})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to remove image", "image", image.Image)
|
||||
logger.Error(err, "Failed to remove image", "image", image.Image)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -146,9 +151,10 @@ func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubec
|
||||
// this is a known issue, and we'll address this by getting imagefs stats directly from CRI.
|
||||
// TODO: Get imagefs stats directly from CRI.
|
||||
func (m *kubeGenericRuntimeManager) ImageStats(ctx context.Context) (*kubecontainer.ImageStats, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
allImages, err := m.imageService.ListImages(ctx, nil)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list images")
|
||||
logger.Error(err, "Failed to list images")
|
||||
return nil, err
|
||||
}
|
||||
stats := &kubecontainer.ImageStats{}
|
||||
@@ -159,9 +165,10 @@ func (m *kubeGenericRuntimeManager) ImageStats(ctx context.Context) (*kubecontai
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
allImages, err := m.imageService.ImageFsInfo(ctx)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get image filesystem")
|
||||
logger.Error(err, "Failed to get image filesystem")
|
||||
return nil, err
|
||||
}
|
||||
return allImages, nil
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
@@ -34,51 +33,52 @@ import (
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
imagepullmanager "k8s.io/kubernetes/pkg/kubelet/images/pullmanager"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestPullImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
imageRef, creds, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
imageRef, creds, err := fakeManager.PullImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "busybox", imageRef)
|
||||
assert.Nil(t, creds) // as this was an anonymous pull
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
images, err := fakeManager.ListImages(tCtx)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, images, 1)
|
||||
assert.Equal(t, []string{"busybox"}, images[0].RepoTags)
|
||||
}
|
||||
|
||||
func TestPullImageWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fakeImageService.InjectError("PullImage", fmt.Errorf("test-error"))
|
||||
imageRef, creds, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
imageRef, creds, err := fakeManager.PullImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", imageRef)
|
||||
assert.Nil(t, creds)
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
images, err := fakeManager.ListImages(tCtx)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, images)
|
||||
}
|
||||
|
||||
func TestListImages(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
images := []string{"1111", "2222", "3333"}
|
||||
expected := sets.New[string](images...)
|
||||
fakeImageService.SetFakeImages(images)
|
||||
|
||||
actualImages, err := fakeManager.ListImages(ctx)
|
||||
actualImages, err := fakeManager.ListImages(tCtx)
|
||||
assert.NoError(t, err)
|
||||
actual := sets.New[string]()
|
||||
for _, i := range actualImages {
|
||||
@@ -89,8 +89,8 @@ func TestListImages(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListImagesPinnedField(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
imagesPinned := map[string]bool{
|
||||
@@ -105,7 +105,7 @@ func TestListImagesPinnedField(t *testing.T) {
|
||||
}
|
||||
fakeImageService.SetFakeImages(imageList)
|
||||
|
||||
actualImages, err := fakeManager.ListImages(ctx)
|
||||
actualImages, err := fakeManager.ListImages(tCtx)
|
||||
assert.NoError(t, err)
|
||||
for _, image := range actualImages {
|
||||
assert.Equal(t, imagesPinned[image.ID], image.Pinned)
|
||||
@@ -113,51 +113,51 @@ func TestListImagesPinnedField(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListImagesWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure"))
|
||||
|
||||
actualImages, err := fakeManager.ListImages(ctx)
|
||||
actualImages, err := fakeManager.ListImages(tCtx)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, actualImages)
|
||||
}
|
||||
|
||||
func TestGetImageRef(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
image := "busybox"
|
||||
fakeImageService.SetFakeImages([]string{image})
|
||||
imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
imageRef, err := fakeManager.GetImageRef(tCtx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, image, imageRef)
|
||||
}
|
||||
|
||||
func TestImageSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
const imageSize = uint64(64)
|
||||
fakeImageService.SetFakeImageSize(imageSize)
|
||||
image := "busybox"
|
||||
fakeImageService.SetFakeImages([]string{image})
|
||||
actualSize, err := fakeManager.GetImageSize(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
actualSize, err := fakeManager.GetImageSize(tCtx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, imageSize, actualSize)
|
||||
}
|
||||
|
||||
func TestGetImageRefImageNotAvailableLocally(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
image := "busybox"
|
||||
|
||||
imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
imageRef, err := fakeManager.GetImageRef(tCtx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.NoError(t, err)
|
||||
|
||||
imageNotAvailableLocallyRef := ""
|
||||
@@ -165,61 +165,61 @@ func TestGetImageRefImageNotAvailableLocally(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetImageRefWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
image := "busybox"
|
||||
|
||||
fakeImageService.InjectError("ImageStatus", fmt.Errorf("test-error"))
|
||||
|
||||
imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image})
|
||||
imageRef, err := fakeManager.GetImageRef(tCtx, kubecontainer.ImageSpec{Image: image})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", imageRef)
|
||||
}
|
||||
|
||||
func TestRemoveImage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
_, _, err = fakeManager.PullImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, fakeImageService.Images, 1)
|
||||
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
err = fakeManager.RemoveImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, fakeImageService.Images)
|
||||
}
|
||||
|
||||
func TestRemoveImageNoOpIfImageNotLocal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
err = fakeManager.RemoveImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveImageWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
_, _, err = fakeManager.PullImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, fakeImageService.Images, 1)
|
||||
|
||||
fakeImageService.InjectError("RemoveImage", fmt.Errorf("test-failure"))
|
||||
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
err = fakeManager.RemoveImage(tCtx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.Error(t, err)
|
||||
assert.Len(t, fakeImageService.Images, 1)
|
||||
}
|
||||
|
||||
func TestImageStats(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
const imageSize = 64
|
||||
@@ -227,26 +227,26 @@ func TestImageStats(t *testing.T) {
|
||||
images := []string{"1111", "2222", "3333"}
|
||||
fakeImageService.SetFakeImages(images)
|
||||
|
||||
actualStats, err := fakeManager.ImageStats(ctx)
|
||||
actualStats, err := fakeManager.ImageStats(tCtx)
|
||||
assert.NoError(t, err)
|
||||
expectedStats := &kubecontainer.ImageStats{TotalStorageBytes: imageSize * uint64(len(images))}
|
||||
assert.Equal(t, expectedStats, actualStats)
|
||||
}
|
||||
|
||||
func TestImageStatsWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure"))
|
||||
|
||||
actualImageStats, err := fakeManager.ImageStats(ctx)
|
||||
actualImageStats, err := fakeManager.ImageStats(tCtx)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, actualImageStats)
|
||||
}
|
||||
|
||||
func TestPullWithSecrets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
// auth value is equivalent to: "username":"passed-user","password":"passed-password"
|
||||
dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}}
|
||||
dockercfgContent, err := json.Marshal(dockerCfg)
|
||||
@@ -309,7 +309,7 @@ func TestPullWithSecrets(t *testing.T) {
|
||||
builtInKeyRing := &credentialprovider.BasicDockerKeyring{}
|
||||
builtInKeyRing.Add(nil, test.builtInDockerConfig)
|
||||
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
fsRecordAccessor, err := imagepullmanager.NewFSPullRecordsAccessor(t.TempDir())
|
||||
@@ -317,7 +317,7 @@ func TestPullWithSecrets(t *testing.T) {
|
||||
t.Fatal("failed to setup an file pull records accessor")
|
||||
}
|
||||
|
||||
imagePullManager, err := imagepullmanager.NewImagePullManager(context.Background(), fsRecordAccessor, imagepullmanager.AlwaysVerifyImagePullPolicy(), fakeManager, 10)
|
||||
imagePullManager, err := imagepullmanager.NewImagePullManager(tCtx, fsRecordAccessor, imagepullmanager.AlwaysVerifyImagePullPolicy(), fakeManager, 10)
|
||||
if err != nil {
|
||||
t.Fatal("failed to setup an image pull manager")
|
||||
}
|
||||
@@ -335,14 +335,14 @@ func TestPullWithSecrets(t *testing.T) {
|
||||
&fakePodPullingTimeRecorder{},
|
||||
)
|
||||
|
||||
_, _, err = fakeManager.imagePuller.EnsureImageExists(ctx, nil, makeTestPod("testpod", "testpod-ns", "testpod-uid", []v1.Container{}), test.imageName, test.passedSecrets, nil, "", v1.PullAlways)
|
||||
_, _, err = fakeManager.imagePuller.EnsureImageExists(tCtx, nil, makeTestPod("testpod", "testpod-ns", "testpod-uid", []v1.Container{}), test.imageName, test.passedSecrets, nil, "", v1.PullAlways)
|
||||
require.NoError(t, err)
|
||||
fakeImageService.AssertImagePulledWithAuth(t, &runtimeapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullWithSecretsWithError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
dockerCfg := map[string]map[string]map[string]string{
|
||||
"auths": {
|
||||
@@ -379,7 +379,7 @@ func TestPullWithSecretsWithError(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager()
|
||||
_, fakeImageService, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if test.shouldInjectError {
|
||||
@@ -391,7 +391,7 @@ func TestPullWithSecretsWithError(t *testing.T) {
|
||||
t.Fatal("failed to setup an file pull records accessor")
|
||||
}
|
||||
|
||||
imagePullManager, err := imagepullmanager.NewImagePullManager(context.Background(), fsRecordAccessor, imagepullmanager.AlwaysVerifyImagePullPolicy(), fakeManager, 10)
|
||||
imagePullManager, err := imagepullmanager.NewImagePullManager(tCtx, fsRecordAccessor, imagepullmanager.AlwaysVerifyImagePullPolicy(), fakeManager, 10)
|
||||
if err != nil {
|
||||
t.Fatal("failed to setup an image pull manager")
|
||||
}
|
||||
@@ -409,11 +409,11 @@ func TestPullWithSecretsWithError(t *testing.T) {
|
||||
&fakePodPullingTimeRecorder{},
|
||||
)
|
||||
|
||||
imageRef, _, err := fakeManager.imagePuller.EnsureImageExists(ctx, nil, makeTestPod("testpod", "testpod-ns", "testpod-uid", []v1.Container{}), test.imageName, test.passedSecrets, nil, "", v1.PullAlways)
|
||||
imageRef, _, err := fakeManager.imagePuller.EnsureImageExists(tCtx, nil, makeTestPod("testpod", "testpod-ns", "testpod-uid", []v1.Container{}), test.imageName, test.passedSecrets, nil, "", v1.PullAlways)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", imageRef)
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
images, err := fakeManager.ListImages(tCtx)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, images)
|
||||
})
|
||||
@@ -421,8 +421,8 @@ func TestPullWithSecretsWithError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPullThenListWithAnnotations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, fakeManager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, fakeManager, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
imageSpec := kubecontainer.ImageSpec{
|
||||
@@ -432,10 +432,10 @@ func TestPullThenListWithAnnotations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, err = fakeManager.PullImage(ctx, imageSpec, nil, nil)
|
||||
_, _, err = fakeManager.PullImage(tCtx, imageSpec, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
images, err := fakeManager.ListImages(tCtx)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, images, 1)
|
||||
assert.Equal(t, images[0].Spec, imageSpec)
|
||||
|
||||
@@ -32,6 +32,6 @@ import (
|
||||
func (m *kubeGenericRuntimeManager) ReadLogs(ctx context.Context, path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
// Convert v1.PodLogOptions into internal log options.
|
||||
opts := logs.NewLogOptions(apiOpts, time.Now())
|
||||
logger := klog.Background()
|
||||
logger := klog.FromContext(ctx)
|
||||
return logs.ReadLogs(ctx, &logger, path, containerID, opts, m.runtimeService, stdout, stderr)
|
||||
}
|
||||
|
||||
@@ -182,6 +182,10 @@ type kubeGenericRuntimeManager struct {
|
||||
|
||||
// Root directory used to store pod logs
|
||||
podLogsDirectory string
|
||||
|
||||
// Swap controller availability check function (Linux only)
|
||||
// Uses sync.OnceValue for lazy initialization
|
||||
getSwapControllerAvailable func() bool
|
||||
}
|
||||
|
||||
// KubeGenericRuntime is a interface contains interfaces for container runtime and command.
|
||||
@@ -193,6 +197,7 @@ type KubeGenericRuntime interface {
|
||||
|
||||
// NewKubeGenericRuntimeManager creates a new kubeGenericRuntimeManager
|
||||
func NewKubeGenericRuntimeManager(
|
||||
ctx context.Context,
|
||||
recorder record.EventRecorder,
|
||||
livenessManager proberesults.Manager,
|
||||
readinessManager proberesults.Manager,
|
||||
@@ -231,7 +236,8 @@ func NewKubeGenericRuntimeManager(
|
||||
tokenManager *token.Manager,
|
||||
getServiceAccount plugin.GetServiceAccountFunc,
|
||||
) (KubeGenericRuntime, []images.PostImageGCHook, error) {
|
||||
ctx := context.Background()
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
runtimeService = newInstrumentedRuntimeService(runtimeService)
|
||||
imageService = newInstrumentedImageManagerService(imageService)
|
||||
tracer := tracerProvider.Tracer(instrumentationScope)
|
||||
@@ -262,30 +268,33 @@ func NewKubeGenericRuntimeManager(
|
||||
podLogsDirectory: podLogsDirectory,
|
||||
}
|
||||
|
||||
// Initialize swap controller availability check with lazy evaluation
|
||||
kubeRuntimeManager.getSwapControllerAvailable = initSwapControllerAvailabilityCheck(ctx)
|
||||
|
||||
typedVersion, err := kubeRuntimeManager.getTypedVersion(ctx)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Get runtime version failed")
|
||||
logger.Error(err, "Get runtime version failed")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Only matching kubeRuntimeAPIVersion is supported now
|
||||
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
|
||||
if typedVersion.Version != kubeRuntimeAPIVersion {
|
||||
klog.ErrorS(err, "This runtime api version is not supported",
|
||||
logger.Error(err, "This runtime api version is not supported",
|
||||
"apiVersion", typedVersion.Version,
|
||||
"supportedAPIVersion", kubeRuntimeAPIVersion)
|
||||
return nil, nil, ErrVersionNotSupported
|
||||
}
|
||||
|
||||
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
|
||||
klog.InfoS("Container runtime initialized",
|
||||
logger.Info("Container runtime initialized",
|
||||
"containerRuntime", typedVersion.RuntimeName,
|
||||
"version", typedVersion.RuntimeVersion,
|
||||
"apiVersion", typedVersion.RuntimeApiVersion)
|
||||
|
||||
if imageCredentialProviderConfigPath != "" || imageCredentialProviderBinDir != "" {
|
||||
if err := plugin.RegisterCredentialProviderPlugins(imageCredentialProviderConfigPath, imageCredentialProviderBinDir, tokenManager.GetServiceAccountToken, getServiceAccount); err != nil {
|
||||
klog.ErrorS(err, "Failed to register CRI auth plugins")
|
||||
logger.Error(err, "Failed to register CRI auth plugins")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -400,6 +409,7 @@ func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer.
|
||||
// specifies whether the runtime returns all containers including those already
|
||||
// exited and dead containers (used for garbage collection).
|
||||
func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
pods := make(map[kubetypes.UID]*kubecontainer.Pod)
|
||||
sandboxes, err := m.getKubeletSandboxes(ctx, all)
|
||||
if err != nil {
|
||||
@@ -408,7 +418,7 @@ func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*k
|
||||
for i := range sandboxes {
|
||||
s := sandboxes[i]
|
||||
if s.Metadata == nil {
|
||||
klog.V(4).InfoS("Sandbox does not have metadata", "sandbox", s)
|
||||
logger.V(4).Info("Sandbox does not have metadata", "sandbox", s)
|
||||
continue
|
||||
}
|
||||
podUID := kubetypes.UID(s.Metadata.Uid)
|
||||
@@ -422,7 +432,7 @@ func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*k
|
||||
p := pods[podUID]
|
||||
converted, err := m.sandboxToKubeContainer(s)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("Convert sandbox of pod failed", "runtimeName", m.runtimeName, "sandbox", s, "podUID", podUID, "err", err)
|
||||
logger.V(4).Info("Convert sandbox of pod failed", "runtimeName", m.runtimeName, "sandbox", s, "podUID", podUID, "err", err)
|
||||
continue
|
||||
}
|
||||
p.Sandboxes = append(p.Sandboxes, converted)
|
||||
@@ -436,11 +446,11 @@ func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*k
|
||||
for i := range containers {
|
||||
c := containers[i]
|
||||
if c.Metadata == nil {
|
||||
klog.V(4).InfoS("Container does not have metadata", "container", c)
|
||||
logger.V(4).Info("Container does not have metadata", "container", c)
|
||||
continue
|
||||
}
|
||||
|
||||
labelledInfo := getContainerInfoFromLabels(c.Labels)
|
||||
labelledInfo := getContainerInfoFromLabels(ctx, c.Labels)
|
||||
pod, found := pods[labelledInfo.PodUID]
|
||||
if !found {
|
||||
pod = &kubecontainer.Pod{
|
||||
@@ -451,9 +461,9 @@ func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*k
|
||||
pods[labelledInfo.PodUID] = pod
|
||||
}
|
||||
|
||||
converted, err := m.toKubeContainer(c)
|
||||
converted, err := m.toKubeContainer(ctx, c)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("Convert container of pod failed", "runtimeName", m.runtimeName, "container", c, "podUID", labelledInfo.PodUID, "err", err)
|
||||
logger.V(4).Info("Convert container of pod failed", "runtimeName", m.runtimeName, "container", c, "podUID", labelledInfo.PodUID, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -474,7 +484,7 @@ func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*k
|
||||
sort.SliceStable(result, func(i, j int) bool {
|
||||
return result[i].CreatedAt > result[j].CreatedAt
|
||||
})
|
||||
klog.V(4).InfoS("Retrieved pods from runtime", "all", all)
|
||||
logger.V(4).Info("Retrieved pods from runtime", "all", all)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -594,7 +604,8 @@ func containerResourcesFromRequirements(requirements *v1.ResourceRequirements) c
|
||||
// computePodResizeAction determines the actions required (if any) to resize the given container.
|
||||
// Returns whether to keep (true) or restart (false) the container.
|
||||
// TODO(vibansal): Make this function to be agnostic to whether it is dealing with a restartable init container or not (i.e. remove the argument `isRestartableInitContainer`).
|
||||
func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
|
||||
func (m *kubeGenericRuntimeManager) computePodResizeAction(ctx context.Context, pod *v1.Pod, containerIdx int, isRestartableInitContainer bool, kubeContainerStatus *kubecontainer.Status, changes *podActions) (keepContainer bool) {
|
||||
logger := klog.FromContext(ctx)
|
||||
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); !resizable {
|
||||
return true
|
||||
}
|
||||
@@ -616,7 +627,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
|
||||
actuatedResources, found := m.allocationManager.GetActuatedResources(pod.UID, container.Name)
|
||||
if !found {
|
||||
klog.ErrorS(nil, "Missing actuated resource record", "pod", klog.KObj(pod), "container", container.Name)
|
||||
logger.Error(nil, "Missing actuated resource record", "pod", klog.KObj(pod), "container", container.Name)
|
||||
// Proceed with the zero-value actuated resources. For restart NotRequired, this may
|
||||
// result in an extra call to UpdateContainerResources, but that call should be idempotent.
|
||||
// For RestartContainer, this may trigger a container restart.
|
||||
@@ -693,6 +704,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, podContainerChanges podActions) *kubecontainer.SyncResult {
|
||||
logger := klog.FromContext(ctx)
|
||||
start := time.Now()
|
||||
success := false
|
||||
defer func() {
|
||||
@@ -705,23 +717,23 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *
|
||||
enforceCPULimits := m.cpuCFSQuota
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod) {
|
||||
enforceCPULimits = false
|
||||
klog.V(2).InfoS("Disabled CFS quota", "pod", klog.KObj(pod))
|
||||
logger.V(2).Info("Disabled CFS quota", "pod", klog.KObj(pod))
|
||||
}
|
||||
podResources := cm.ResourceConfigForPod(pod, enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
|
||||
if podResources == nil {
|
||||
klog.ErrorS(nil, "Unable to get resource configuration", "pod", klog.KObj(pod))
|
||||
logger.Error(nil, "Unable to get resource configuration", "pod", klog.KObj(pod))
|
||||
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get resource configuration processing resize for pod %q", format.Pod(pod)))
|
||||
return resizeResult
|
||||
}
|
||||
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
|
||||
if err != nil {
|
||||
klog.ErrorS(nil, "Unable to get pod cgroup memory config", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Unable to get pod cgroup memory config", "pod", klog.KObj(pod))
|
||||
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get pod cgroup memory config for pod %q", format.Pod(pod)))
|
||||
return resizeResult
|
||||
}
|
||||
currentPodCPUConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
|
||||
if err != nil {
|
||||
klog.ErrorS(nil, "Unable to get pod cgroup cpu config", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Unable to get pod cgroup cpu config", "pod", klog.KObj(pod))
|
||||
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("unable to get pod cgroup cpu config for pod %q", format.Pod(pod)))
|
||||
return resizeResult
|
||||
}
|
||||
@@ -733,7 +745,7 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *
|
||||
// Before proceeding with the resize, perform a best-effort check to catch potential resize
|
||||
// errors in order to avoid a partial-resize state.
|
||||
if err := m.validatePodResizeAction(ctx, pod, podStatus, currentPodResources, podResources, podContainerChanges); err != nil {
|
||||
klog.ErrorS(err, "Allocated pod resize is not currently feasible", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Allocated pod resize is not currently feasible", "pod", klog.KObj(pod))
|
||||
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, err.Error())
|
||||
return resizeResult
|
||||
}
|
||||
@@ -758,12 +770,12 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *
|
||||
}
|
||||
err = pcm.SetPodCgroupConfig(pod, resizedResources)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to set cgroup config", "resource", rName, "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to set cgroup config", "resource", rName, "pod", klog.KObj(pod))
|
||||
return err
|
||||
}
|
||||
currentPodResources = mergeResourceConfig(currentPodResources, resizedResources)
|
||||
if err = m.updatePodSandboxResources(podContainerChanges.SandboxID, pod, currentPodResources); err != nil {
|
||||
klog.ErrorS(err, "Failed to notify runtime for UpdatePodSandboxResources", "resource", rName, "pod", klog.KObj(pod))
|
||||
if err = m.updatePodSandboxResources(ctx, podContainerChanges.SandboxID, pod, currentPodResources); err != nil {
|
||||
logger.Error(err, "Failed to notify runtime for UpdatePodSandboxResources", "resource", rName, "pod", klog.KObj(pod))
|
||||
// Don't propagate the error since the updatePodSandboxResources call is best-effort.
|
||||
}
|
||||
return nil
|
||||
@@ -786,8 +798,8 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *
|
||||
}
|
||||
}
|
||||
if len(podContainerChanges.ContainersToUpdate[rName]) > 0 {
|
||||
if err = m.updatePodContainerResources(pod, rName, podContainerChanges.ContainersToUpdate[rName]); err != nil {
|
||||
klog.ErrorS(err, "updatePodContainerResources failed", "pod", format.Pod(pod), "resource", rName)
|
||||
if err = m.updatePodContainerResources(ctx, pod, rName, podContainerChanges.ContainersToUpdate[rName]); err != nil {
|
||||
logger.Error(err, "updatePodContainerResources failed", "pod", format.Pod(pod), "resource", rName)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -823,7 +835,7 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(ctx context.Context, pod *
|
||||
if len(podContainerChanges.ContainersToUpdate[v1.ResourceCPU]) > 0 || podContainerChanges.UpdatePodResources {
|
||||
if podResources.CPUShares == nil {
|
||||
// This shouldn't happen: ResourceConfigForPod always returns a non-nil value for CPUShares.
|
||||
klog.ErrorS(nil, "podResources.CPUShares is nil", "pod", pod.Name)
|
||||
logger.Error(nil, "podResources.CPUShares is nil", "pod", pod.Name)
|
||||
resizeResult.Fail(kubecontainer.ErrResizePodInPlace, fmt.Sprintf("podResources.CPUShares is nil for pod %s", pod.Name))
|
||||
return resizeResult
|
||||
}
|
||||
@@ -921,8 +933,9 @@ func (m *kubeGenericRuntimeManager) validateMemoryResizeAction(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo) error {
|
||||
klog.V(5).InfoS("Updating container resources", "pod", klog.KObj(pod))
|
||||
func (m *kubeGenericRuntimeManager) updatePodContainerResources(ctx context.Context, pod *v1.Pod, resourceName v1.ResourceName, containersToUpdate []containerToUpdateInfo) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(5).Info("Updating container resources", "pod", klog.KObj(pod))
|
||||
|
||||
for _, cInfo := range containersToUpdate {
|
||||
container := cInfo.container.DeepCopy()
|
||||
@@ -948,10 +961,10 @@ func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, res
|
||||
v1.ResourceMemory: *resource.NewQuantity(cInfo.currentContainerResources.memoryRequest, resource.BinarySI),
|
||||
}
|
||||
}
|
||||
if err := m.updateContainerResources(pod, container, cInfo.kubeContainerID); err != nil {
|
||||
if err := m.updateContainerResources(ctx, pod, container, cInfo.kubeContainerID); err != nil {
|
||||
// Log error and abort as container updates need to succeed in the order determined by computePodResizeAction.
|
||||
// The recovery path is for SyncPod to keep retrying at later times until it succeeds.
|
||||
klog.ErrorS(err, "updateContainerResources failed", "container", container.Name, "cID", cInfo.kubeContainerID,
|
||||
logger.Error(err, "updateContainerResources failed", "container", container.Name, "cID", cInfo.kubeContainerID,
|
||||
"pod", format.Pod(pod), "resourceName", resourceName)
|
||||
return err
|
||||
}
|
||||
@@ -973,7 +986,8 @@ func (m *kubeGenericRuntimeManager) updatePodContainerResources(pod *v1.Pod, res
|
||||
|
||||
// computePodActions checks whether the pod spec has changed and returns the changes if true.
|
||||
func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {
|
||||
klog.V(5).InfoS("Syncing Pod", "pod", klog.KObj(pod))
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(5).Info("Syncing Pod", "pod", klog.KObj(pod))
|
||||
|
||||
createPodSandbox, attempt, sandboxID := runtimeutil.PodSandboxChanged(pod, podStatus)
|
||||
changes := podActions{
|
||||
@@ -1052,7 +1066,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
}
|
||||
|
||||
// Check initialization progress.
|
||||
hasInitialized := m.computeInitContainerActions(pod, podStatus, &changes)
|
||||
hasInitialized := m.computeInitContainerActions(ctx, pod, podStatus, &changes)
|
||||
if changes.KillPod || !hasInitialized {
|
||||
// Initialization failed or still in progress. Skip inspecting non-init
|
||||
// containers.
|
||||
@@ -1070,7 +1084,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
// to it.
|
||||
if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {
|
||||
if err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {
|
||||
klog.ErrorS(err, "Internal container post-stop lifecycle hook failed for container in pod with error",
|
||||
logger.Error(err, "Internal container post-stop lifecycle hook failed for container in pod with error",
|
||||
"containerName", container.Name, "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
@@ -1079,7 +1093,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
// need to restart it.
|
||||
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
|
||||
if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
|
||||
klog.V(3).InfoS("Container of pod is not in the desired state and shall be started", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("Container of pod is not in the desired state and shall be started", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
changes.ContainersToStart = append(changes.ContainersToStart, idx)
|
||||
if containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {
|
||||
// If container is in unknown state, we don't know whether it
|
||||
@@ -1113,7 +1127,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
// If the container failed the startup probe, we should kill it.
|
||||
message = fmt.Sprintf("Container %s failed startup probe", container.Name)
|
||||
reason = reasonStartupProbe
|
||||
} else if !m.computePodResizeAction(pod, idx, false, containerStatus, &changes) {
|
||||
} else if !m.computePodResizeAction(ctx, pod, idx, false, containerStatus, &changes) {
|
||||
// computePodResizeAction updates 'changes' if resize policy requires restarting this container
|
||||
continue
|
||||
} else {
|
||||
@@ -1136,7 +1150,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
message: message,
|
||||
reason: reason,
|
||||
}
|
||||
klog.V(2).InfoS("Message for Container of pod", "containerName", container.Name, "containerStatusID", containerStatus.ID, "pod", klog.KObj(pod), "containerMessage", message)
|
||||
logger.V(2).Info("Message for Container of pod", "containerName", container.Name, "containerStatusID", containerStatus.ID, "pod", klog.KObj(pod), "containerMessage", message)
|
||||
}
|
||||
|
||||
if keepCount == 0 && len(changes.ContainersToStart) == 0 {
|
||||
@@ -1160,33 +1174,34 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
// 7. Resize running containers (if InPlacePodVerticalScaling==true)
|
||||
// 8. Create normal containers.
|
||||
func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// Step 1: Compute sandbox and container changes.
|
||||
podContainerChanges := m.computePodActions(ctx, pod, podStatus)
|
||||
klog.V(3).InfoS("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod))
|
||||
if podContainerChanges.CreateSandbox {
|
||||
ref, err := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
if podContainerChanges.SandboxID != "" {
|
||||
m.recorder.Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.")
|
||||
} else {
|
||||
klog.V(4).InfoS("SyncPod received new pod, will create a sandbox for it", "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("SyncPod received new pod, will create a sandbox for it", "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Kill the pod if the sandbox has changed.
|
||||
if podContainerChanges.KillPod {
|
||||
if podContainerChanges.CreateSandbox {
|
||||
klog.V(4).InfoS("Stopping PodSandbox for pod, will start new one", "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Stopping PodSandbox for pod, will start new one", "pod", klog.KObj(pod))
|
||||
} else {
|
||||
klog.V(4).InfoS("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
killResult := m.killPodWithSyncResult(ctx, pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
|
||||
result.AddPodSyncResult(killResult)
|
||||
if killResult.Error() != nil {
|
||||
klog.ErrorS(killResult.Error(), "killPodWithSyncResult failed")
|
||||
logger.Error(killResult.Error(), "killPodWithSyncResult failed")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1196,12 +1211,12 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
} else {
|
||||
// Step 3: kill any running containers in this pod which are not to keep.
|
||||
for containerID, containerInfo := range podContainerChanges.ContainersToKill {
|
||||
klog.V(3).InfoS("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
|
||||
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
|
||||
result.AddSyncResult(killContainerResult)
|
||||
if err := m.killContainer(ctx, pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil, nil); err != nil {
|
||||
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
|
||||
klog.ErrorS(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
|
||||
logger.Error(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1233,7 +1248,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
var msg string
|
||||
var err error
|
||||
|
||||
klog.V(4).InfoS("Creating PodSandbox for pod", "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Creating PodSandbox for pod", "pod", klog.KObj(pod))
|
||||
metrics.StartedPodsTotal.Inc()
|
||||
createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
|
||||
result.AddSyncResult(createSandboxResult)
|
||||
@@ -1253,11 +1268,11 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
if err := m.runtimeHelper.PrepareDynamicResources(ctx, pod); err != nil {
|
||||
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if referr != nil {
|
||||
klog.ErrorS(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedPrepareDynamicResources, "Failed to prepare dynamic resources: %v", err)
|
||||
klog.ErrorS(err, "Failed to prepare dynamic resources", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to prepare dynamic resources", "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1272,29 +1287,29 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
// SyncPod can still be running when we get here, which
|
||||
// means the PodWorker has not acked the deletion.
|
||||
if m.podStateProvider.IsPodTerminationRequested(pod.UID) {
|
||||
klog.V(4).InfoS("Pod was deleted and sandbox failed to be created", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
logger.V(4).Info("Pod was deleted and sandbox failed to be created", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
return
|
||||
}
|
||||
metrics.StartedPodsErrorsTotal.Inc()
|
||||
createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
|
||||
klog.ErrorS(err, "CreatePodSandbox for pod failed", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "CreatePodSandbox for pod failed", "pod", klog.KObj(pod))
|
||||
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if referr != nil {
|
||||
klog.ErrorS(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed to create pod sandbox: %v", err)
|
||||
return
|
||||
}
|
||||
klog.V(4).InfoS("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
|
||||
|
||||
resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false)
|
||||
if err != nil {
|
||||
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if referr != nil {
|
||||
klog.ErrorS(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
logger.Error(referr, "Couldn't make a ref to pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err)
|
||||
klog.ErrorS(err, "Failed to get pod sandbox status; Skipping pod", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to get pod sandbox status; Skipping pod", "pod", klog.KObj(pod))
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
@@ -1307,8 +1322,8 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
// host-network, we may use a stale IP.
|
||||
if !kubecontainer.IsHostNetworkPod(pod) {
|
||||
// Overwrite the podIPs passed in the pod status, since we just started the pod sandbox.
|
||||
podIPs = m.determinePodSandboxIPs(pod.Namespace, pod.Name, resp.GetStatus())
|
||||
klog.V(4).InfoS("Determined the ip for pod after sandbox changed", "IPs", podIPs, "pod", klog.KObj(pod))
|
||||
podIPs = m.determinePodSandboxIPs(ctx, pod.Namespace, pod.Name, resp.GetStatus())
|
||||
logger.V(4).Info("Determined the ip for pod after sandbox changed", "IPs", podIPs, "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1323,17 +1338,17 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
// Get podSandboxConfig for containers to start.
|
||||
configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID)
|
||||
result.AddSyncResult(configPodSandboxResult)
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt)
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(ctx, pod, podContainerChanges.Attempt)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
|
||||
klog.ErrorS(err, "GeneratePodSandboxConfig for pod failed", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "GeneratePodSandboxConfig for pod failed", "pod", klog.KObj(pod))
|
||||
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message)
|
||||
return
|
||||
}
|
||||
|
||||
imageVolumePullResults, err := m.getImageVolumes(ctx, pod, podSandboxConfig, pullSecrets)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Get image volumes for pod failed", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Get image volumes for pod failed", "pod", klog.KObj(pod))
|
||||
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, err.Error())
|
||||
return
|
||||
}
|
||||
@@ -1347,10 +1362,10 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name)
|
||||
result.AddSyncResult(startContainerResult)
|
||||
|
||||
isInBackOff, msg, err := m.doBackOff(pod, spec.container, podStatus, backOff)
|
||||
isInBackOff, msg, err := m.doBackOff(ctx, pod, spec.container, podStatus, backOff)
|
||||
if isInBackOff {
|
||||
startContainerResult.Fail(err, msg)
|
||||
klog.V(4).InfoS("Backing Off restarting container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Backing Off restarting container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1358,10 +1373,10 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
if sc.HasWindowsHostProcessRequest(pod, spec.container) {
|
||||
metrics.StartedHostProcessContainersTotal.WithLabelValues(metricLabel).Inc()
|
||||
}
|
||||
klog.V(4).InfoS("Creating container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Creating container in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod))
|
||||
|
||||
// We fail late here to populate the "ErrImagePull" and "ImagePullBackOff" correctly to the end user.
|
||||
imageVolumes, err := m.toKubeContainerImageVolumes(imageVolumePullResults, spec.container, pod, startContainerResult)
|
||||
imageVolumes, err := m.toKubeContainerImageVolumes(ctx, imageVolumePullResults, spec.container, pod, startContainerResult)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1381,7 +1396,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
// repetitive log spam
|
||||
switch {
|
||||
case err == images.ErrImagePullBackOff:
|
||||
klog.V(3).InfoS("Container start failed in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod), "containerMessage", msg, "err", err)
|
||||
logger.V(3).Info("Container start failed in pod", "containerType", typeName, "container", spec.container.Name, "pod", klog.KObj(pod), "containerMessage", msg, "err", err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%v %v start failed in pod %v: %w: %s", typeName, spec.container.Name, format.Pod(pod), err, msg))
|
||||
}
|
||||
@@ -1405,15 +1420,15 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
// Start the next init container.
|
||||
if err := start(ctx, "init container", metrics.InitContainer, containerStartSpec(container)); err != nil {
|
||||
if podutil.IsRestartableInitContainer(container) {
|
||||
klog.V(4).InfoS("Failed to start the restartable init container for the pod, skipping", "initContainerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Failed to start the restartable init container for the pod, skipping", "initContainerName", container.Name, "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
klog.V(4).InfoS("Failed to initialize the pod, as the init container failed to start, aborting", "initContainerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Failed to initialize the pod, as the init container failed to start, aborting", "initContainerName", container.Name, "pod", klog.KObj(pod))
|
||||
return
|
||||
}
|
||||
|
||||
// Successfully started the container; clear the entry in the failure
|
||||
klog.V(4).InfoS("Completed init container for pod", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Completed init container for pod", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
|
||||
@@ -1464,7 +1479,7 @@ type imageVolumePullResult struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) toKubeContainerImageVolumes(imageVolumePullResults imageVolumePulls, container *v1.Container, pod *v1.Pod, syncResult *kubecontainer.SyncResult) (kubecontainer.ImageVolumes, error) {
|
||||
func (m *kubeGenericRuntimeManager) toKubeContainerImageVolumes(ctx context.Context, imageVolumePullResults imageVolumePulls, container *v1.Container, pod *v1.Pod, syncResult *kubecontainer.SyncResult) (kubecontainer.ImageVolumes, error) {
|
||||
if len(imageVolumePullResults) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1482,7 +1497,7 @@ func (m *kubeGenericRuntimeManager) toKubeContainerImageVolumes(imageVolumePullR
|
||||
|
||||
if res.err != nil {
|
||||
s, _ := grpcstatus.FromError(res.err)
|
||||
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
m.recordContainerEvent(ctx, pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message())
|
||||
lastErr = res.err
|
||||
lastMsg = res.msg
|
||||
continue
|
||||
@@ -1500,13 +1515,14 @@ func (m *kubeGenericRuntimeManager) toKubeContainerImageVolumes(imageVolumePullR
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) getImageVolumes(ctx context.Context, pod *v1.Pod, podSandboxConfig *runtimeapi.PodSandboxConfig, pullSecrets []v1.Secret) (imageVolumePulls, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
podRuntimeHandler, err := m.getPodRuntimeHandler(pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get pod runtime handler", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to get pod runtime handler", "pod", klog.KObj(pod))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1521,12 +1537,12 @@ func (m *kubeGenericRuntimeManager) getImageVolumes(ctx context.Context, pod *v1
|
||||
ctx, objectRef, pod, volume.Image.Reference, pullSecrets, podSandboxConfig, podRuntimeHandler, volume.Image.PullPolicy,
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to ensure image", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to ensure image", "pod", klog.KObj(pod))
|
||||
res[volume.Name] = imageVolumePullResult{err: err, msg: msg}
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Pulled image", "ref", ref, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Pulled image", "ref", ref, "pod", klog.KObj(pod))
|
||||
res[volume.Name] = imageVolumePullResult{spec: &runtimeapi.ImageSpec{
|
||||
Image: ref,
|
||||
UserSpecifiedImage: volume.Image.Reference,
|
||||
@@ -1540,7 +1556,8 @@ func (m *kubeGenericRuntimeManager) getImageVolumes(ctx context.Context, pod *v1
|
||||
|
||||
// If a container is still in backoff, the function will return a brief backoff error and
|
||||
// a detailed error message.
|
||||
func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
|
||||
func (m *kubeGenericRuntimeManager) doBackOff(ctx context.Context, pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
var cStatus *kubecontainer.Status
|
||||
for _, c := range podStatus.ContainerStatuses {
|
||||
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
|
||||
@@ -1553,7 +1570,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Checking backoff for container in pod", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("Checking backoff for container in pod", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
|
||||
ts := cStatus.FinishedAt
|
||||
// backOff requires a unique key to identify the container.
|
||||
@@ -1564,7 +1581,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
fmt.Sprintf("Back-off restarting failed container %s in pod %s", container.Name, format.Pod(pod)))
|
||||
}
|
||||
err := fmt.Errorf("back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
|
||||
klog.V(3).InfoS("Back-off restarting failed container", "err", err.Error())
|
||||
logger.V(3).Info("Back-off restarting failed container", "err", err.Error())
|
||||
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
|
||||
}
|
||||
|
||||
@@ -1584,6 +1601,7 @@ func (m *kubeGenericRuntimeManager) KillPod(ctx context.Context, pod *v1.Pod, ru
|
||||
// killPodWithSyncResult kills a runningPod and returns SyncResult.
|
||||
// Note: The pod passed in could be *nil* when kubelet restarted.
|
||||
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
|
||||
logger := klog.FromContext(ctx)
|
||||
killContainerResults := m.killContainersWithSyncResult(ctx, pod, runningPod, gracePeriodOverride)
|
||||
for _, containerResult := range killContainerResults {
|
||||
result.AddSyncResult(containerResult)
|
||||
@@ -1596,7 +1614,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, p
|
||||
for _, podSandbox := range runningPod.Sandboxes {
|
||||
if err := m.runtimeService.StopPodSandbox(ctx, podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) {
|
||||
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
|
||||
klog.ErrorS(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID)
|
||||
logger.Error(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1604,11 +1622,12 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, p
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) GeneratePodStatus(event *runtimeapi.ContainerEventResponse) *kubecontainer.PodStatus {
|
||||
podIPs := m.determinePodSandboxIPs(event.PodSandboxStatus.Metadata.Namespace, event.PodSandboxStatus.Metadata.Name, event.PodSandboxStatus)
|
||||
ctx := context.TODO() // This context will be passed as parameter in the future
|
||||
podIPs := m.determinePodSandboxIPs(ctx, event.PodSandboxStatus.Metadata.Namespace, event.PodSandboxStatus.Metadata.Name, event.PodSandboxStatus)
|
||||
|
||||
kubeContainerStatuses := []*kubecontainer.Status{}
|
||||
for _, status := range event.ContainersStatuses {
|
||||
kubeContainerStatuses = append(kubeContainerStatuses, m.convertToKubeContainerStatus(status))
|
||||
kubeContainerStatuses = append(kubeContainerStatuses, m.convertToKubeContainerStatus(ctx, status))
|
||||
}
|
||||
|
||||
sort.Sort(containerStatusByCreated(kubeContainerStatuses))
|
||||
@@ -1626,6 +1645,7 @@ func (m *kubeGenericRuntimeManager) GeneratePodStatus(event *runtimeapi.Containe
|
||||
// GetPodStatus retrieves the status of the pod, including the
|
||||
// information of all containers in the pod that are visible in Runtime.
|
||||
func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// Now we retain restart count of container as a container label. Each time a container
|
||||
// restarts, pod will read the restart count from the registered dead container, increment
|
||||
// it to get the new restart count, and then add a label with the new restart count on
|
||||
@@ -1654,7 +1674,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
|
||||
|
||||
podFullName := format.Pod(pod)
|
||||
|
||||
klog.V(4).InfoS("getSandboxIDByPodUID got sandbox IDs for pod", "podSandboxID", podSandboxIDs, "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("getSandboxIDByPodUID got sandbox IDs for pod", "podSandboxID", podSandboxIDs, "pod", klog.KObj(pod))
|
||||
|
||||
sandboxStatuses := []*runtimeapi.PodSandboxStatus{}
|
||||
containerStatuses := []*kubecontainer.Status{}
|
||||
@@ -1673,7 +1693,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "PodSandboxStatus of sandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
|
||||
logger.Error(err, "PodSandboxStatus of sandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod))
|
||||
return nil, err
|
||||
}
|
||||
if resp.GetStatus() == nil {
|
||||
@@ -1683,7 +1703,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
|
||||
sandboxStatuses = append(sandboxStatuses, resp.Status)
|
||||
// Only get pod IP from latest sandbox
|
||||
if idx == 0 && resp.Status.State == runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
podIPs = m.determinePodSandboxIPs(namespace, name, resp.Status)
|
||||
podIPs = m.determinePodSandboxIPs(ctx, namespace, name, resp.Status)
|
||||
activePodSandboxID = podSandboxID
|
||||
}
|
||||
|
||||
@@ -1694,11 +1714,11 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
|
||||
// e.g. CI job 'pull-kubernetes-e2e-gce-alpha-features' will runs with
|
||||
// features gate enabled, which includes Evented PLEG, but uses the
|
||||
// runtime without Evented PLEG support.
|
||||
klog.V(4).InfoS("Runtime does not set pod status timestamp", "pod", klog.KObj(pod))
|
||||
logger.V(4).Info("Runtime does not set pod status timestamp", "pod", klog.KObj(pod))
|
||||
containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
|
||||
if err != nil {
|
||||
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
|
||||
klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -1707,7 +1727,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
|
||||
// timestamp from sandboxStatus.
|
||||
timestamp = time.Unix(0, resp.Timestamp)
|
||||
for _, cs := range resp.ContainersStatuses {
|
||||
cStatus := m.convertToKubeContainerStatus(cs)
|
||||
cStatus := m.convertToKubeContainerStatus(ctx, cs)
|
||||
containerStatuses = append(containerStatuses, cStatus)
|
||||
}
|
||||
}
|
||||
@@ -1719,7 +1739,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety
|
||||
containerStatuses, activeContainerStatuses, err = m.getPodContainerStatuses(ctx, uid, name, namespace, activePodSandboxID)
|
||||
if err != nil {
|
||||
if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) {
|
||||
klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -1743,7 +1763,7 @@ func (m *kubeGenericRuntimeManager) GetContainerStatus(ctx context.Context, id k
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("runtime container status: %w", err)
|
||||
}
|
||||
return m.convertToKubeContainerStatus(resp.GetStatus()), nil
|
||||
return m.convertToKubeContainerStatus(ctx, resp.GetStatus()), nil
|
||||
}
|
||||
|
||||
// GarbageCollect removes dead containers using the specified container gc policy.
|
||||
@@ -1754,9 +1774,10 @@ func (m *kubeGenericRuntimeManager) GarbageCollect(ctx context.Context, gcPolicy
|
||||
// UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim
|
||||
// with the podCIDR supplied by the kubelet.
|
||||
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR string) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
// TODO(#35531): do we really want to write a method on this manager for each
|
||||
// field of the config?
|
||||
klog.InfoS("Updating runtime config through cri with podcidr", "CIDR", podCIDR)
|
||||
logger.Info("Updating runtime config through cri with podcidr", "CIDR", podCIDR)
|
||||
return m.runtimeService.UpdateRuntimeConfig(ctx,
|
||||
&runtimeapi.RuntimeConfig{
|
||||
NetworkConfig: &runtimeapi.NetworkConfig{
|
||||
|
||||
@@ -59,6 +59,7 @@ import (
|
||||
imagetypes "k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -67,11 +68,11 @@ var (
|
||||
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
|
||||
)
|
||||
|
||||
func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
|
||||
return createTestRuntimeManagerWithErrors(nil)
|
||||
func createTestRuntimeManager(ctx context.Context) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
|
||||
return createTestRuntimeManagerWithErrors(ctx, nil)
|
||||
}
|
||||
|
||||
func createTestRuntimeManagerWithErrors(errors map[string][]error) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
|
||||
func createTestRuntimeManagerWithErrors(ctx context.Context, errors map[string][]error) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
|
||||
fakeRuntimeService := apitest.NewFakeRuntimeService()
|
||||
if errors != nil {
|
||||
fakeRuntimeService.Errors = errors
|
||||
@@ -85,7 +86,7 @@ func createTestRuntimeManagerWithErrors(errors map[string][]error) (*apitest.Fak
|
||||
MemoryCapacity: uint64(memoryCapacityQuantity.Value()),
|
||||
}
|
||||
osInterface := &containertest.FakeOS{}
|
||||
manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, noopoteltrace.NewTracerProvider().Tracer(""))
|
||||
manager, err := newFakeKubeRuntimeManager(ctx, fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, noopoteltrace.NewTracerProvider().Tracer(""))
|
||||
return fakeRuntimeService, fakeImageService, manager, err
|
||||
}
|
||||
|
||||
@@ -140,7 +141,8 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *
|
||||
|
||||
// makeFakePodSandbox creates a fake pod sandbox based on a sandbox template.
|
||||
func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template sandboxTemplate) *apitest.FakePodSandbox {
|
||||
config, err := m.generatePodSandboxConfig(template.pod, template.attempt)
|
||||
tCtx := ktesting.Init(t)
|
||||
config, err := m.generatePodSandboxConfig(tCtx, template.pod, template.attempt)
|
||||
assert.NoError(t, err, "generatePodSandboxConfig for sandbox template %+v", template)
|
||||
|
||||
podSandboxID := apitest.BuildSandboxName(config.Metadata)
|
||||
@@ -183,11 +185,11 @@ func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates
|
||||
|
||||
// makeFakeContainer creates a fake container based on a container template.
|
||||
func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
|
||||
ctx := context.Background()
|
||||
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
|
||||
tCtx := ktesting.Init(t)
|
||||
sandboxConfig, err := m.generatePodSandboxConfig(tCtx, template.pod, template.sandboxAttempt)
|
||||
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
|
||||
|
||||
containerConfig, _, err := m.generateContainerConfig(ctx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil, nil)
|
||||
containerConfig, _, err := m.generateContainerConfig(tCtx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil, nil)
|
||||
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
|
||||
|
||||
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
|
||||
@@ -299,22 +301,24 @@ func verifyContainerStatuses(t *testing.T, runtime *apitest.FakeRuntimeService,
|
||||
}
|
||||
|
||||
func TestNewKubeRuntimeManager(t *testing.T) {
|
||||
_, _, _, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, _, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
version, err := m.Version(ctx)
|
||||
version, err := m.Version(tCtx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, kubeRuntimeAPIVersion, version.String())
|
||||
}
|
||||
|
||||
func TestContainerRuntimeType(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
runtimeType := m.Type()
|
||||
@@ -322,8 +326,8 @@ func TestContainerRuntimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
containers := []v1.Container{
|
||||
@@ -352,7 +356,7 @@ func TestGetPodStatus(t *testing.T) {
|
||||
// Set fake sandbox and faked containers to fakeRuntime.
|
||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pod.UID, podStatus.ID)
|
||||
assert.Equal(t, pod.Name, podStatus.Name)
|
||||
@@ -361,8 +365,8 @@ func TestGetPodStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStopContainerWithNotFoundError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
containers := []v1.Container{
|
||||
@@ -391,17 +395,17 @@ func TestStopContainerWithNotFoundError(t *testing.T) {
|
||||
// Set fake sandbox and faked containers to fakeRuntime.
|
||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container"))
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
require.NoError(t, err)
|
||||
p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus)
|
||||
gracePeriod := int64(1)
|
||||
err = m.KillPod(ctx, pod, p, &gracePeriod)
|
||||
err = m.KillPod(tCtx, pod, p, &gracePeriod)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetPodStatusWithNotFoundError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
containers := []v1.Container{
|
||||
@@ -430,7 +434,7 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) {
|
||||
// Set fake sandbox and faked containers to fakeRuntime.
|
||||
makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container"))
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pod.UID, podStatus.ID)
|
||||
require.Equal(t, pod.Name, podStatus.Name)
|
||||
@@ -439,8 +443,8 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{
|
||||
@@ -470,7 +474,7 @@ func TestGetPods(t *testing.T) {
|
||||
containers := make([]*kubecontainer.Container, len(fakeContainers))
|
||||
for i := range containers {
|
||||
fakeContainer := fakeContainers[i]
|
||||
c, err := m.toKubeContainer(&runtimeapi.Container{
|
||||
c, err := m.toKubeContainer(tCtx, &runtimeapi.Container{
|
||||
Id: fakeContainer.Id,
|
||||
Metadata: fakeContainer.Metadata,
|
||||
State: fakeContainer.State,
|
||||
@@ -508,7 +512,7 @@ func TestGetPods(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := m.GetPods(ctx, false)
|
||||
actual, err := m.GetPods(tCtx, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !verifyPods(expected, actual) {
|
||||
@@ -517,8 +521,8 @@ func TestGetPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetPodsSorted(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}}
|
||||
@@ -535,7 +539,7 @@ func TestGetPodsSorted(t *testing.T) {
|
||||
}
|
||||
fakeRuntime.SetFakeSandboxes(fakeSandboxes)
|
||||
|
||||
actual, err := m.GetPods(ctx, false)
|
||||
actual, err := m.GetPods(tCtx, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, actual, 3)
|
||||
@@ -547,8 +551,8 @@ func TestGetPodsSorted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKillPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pod := &v1.Pod{
|
||||
@@ -586,7 +590,7 @@ func TestKillPod(t *testing.T) {
|
||||
containers := make([]*kubecontainer.Container, len(fakeContainers))
|
||||
for i := range containers {
|
||||
fakeContainer := fakeContainers[i]
|
||||
c, err := m.toKubeContainer(&runtimeapi.Container{
|
||||
c, err := m.toKubeContainer(tCtx, &runtimeapi.Container{
|
||||
Id: fakeContainer.Id,
|
||||
Metadata: fakeContainer.Metadata,
|
||||
State: fakeContainer.State,
|
||||
@@ -614,7 +618,7 @@ func TestKillPod(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err = m.KillPod(ctx, pod, runningPod, nil)
|
||||
err = m.KillPod(tCtx, pod, runningPod, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, fakeRuntime.Containers, 3)
|
||||
assert.Len(t, fakeRuntime.Sandboxes, 1)
|
||||
@@ -627,7 +631,8 @@ func TestKillPod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncPod(t *testing.T) {
|
||||
fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, fakeImage, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
containers := []v1.Container{
|
||||
@@ -654,7 +659,7 @@ func TestSyncPod(t *testing.T) {
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(tCtx, pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
assert.Len(t, fakeRuntime.Containers, 2)
|
||||
assert.Len(t, fakeImage.Images, 2)
|
||||
@@ -668,7 +673,8 @@ func TestSyncPod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
containers := []v1.Container{
|
||||
@@ -714,7 +720,7 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(tCtx, pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls)
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
@@ -726,8 +732,8 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPruneInitContainers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
init1 := makeTestContainer("init1", "busybox")
|
||||
@@ -753,10 +759,10 @@ func TestPruneInitContainers(t *testing.T) {
|
||||
}
|
||||
fakes := makeFakeContainers(t, m, templates)
|
||||
fakeRuntime.SetFakeContainers(fakes)
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
|
||||
m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
|
||||
m.pruneInitContainersBeforeStart(tCtx, pod, podStatus)
|
||||
expectedContainers := sets.New[string](fakes[0].Id, fakes[2].Id)
|
||||
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
|
||||
t.Errorf("expected %v, got %v", expectedContainers, actual)
|
||||
@@ -764,8 +770,8 @@ func TestPruneInitContainers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
initContainers := []v1.Container{
|
||||
@@ -802,9 +808,9 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
|
||||
// 1. should only create the init container.
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
expected := []*cRecord{
|
||||
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
|
||||
@@ -812,24 +818,25 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
|
||||
|
||||
// 2. should not create app container because init container is still running.
|
||||
podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err = m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result = m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
|
||||
result = m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
|
||||
|
||||
// 3. should create all app containers because init container finished.
|
||||
// Stop init container instance 0.
|
||||
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, pod.UID, nil)
|
||||
sandboxIDs, err := m.getSandboxIDByPodUID(tCtx, pod.UID, nil)
|
||||
require.NoError(t, err)
|
||||
sandboxID := sandboxIDs[0]
|
||||
initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
|
||||
require.NoError(t, err)
|
||||
fakeRuntime.StopContainer(ctx, initID0, 0)
|
||||
err = fakeRuntime.StopContainer(tCtx, initID0, 0)
|
||||
require.NoError(t, err)
|
||||
// Sync again.
|
||||
podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err = m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
result = m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
expected = []*cRecord{
|
||||
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
|
||||
@@ -840,11 +847,12 @@ func TestSyncPodWithInitContainers(t *testing.T) {
|
||||
|
||||
// 4. should restart the init container if needed to create a new podsandbox
|
||||
// Stop the pod sandbox.
|
||||
fakeRuntime.StopPodSandbox(ctx, sandboxID)
|
||||
err = fakeRuntime.StopPodSandbox(tCtx, sandboxID)
|
||||
require.NoError(t, err)
|
||||
// Sync again.
|
||||
podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err = m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
result = m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
expected = []*cRecord{
|
||||
// The first init container instance is purged and no longer visible.
|
||||
@@ -939,7 +947,8 @@ func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
|
||||
}
|
||||
|
||||
func TestComputePodActions(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Creating a pair reference pod and status for the test cases to refer
|
||||
@@ -1219,8 +1228,8 @@ func TestComputePodActions(t *testing.T) {
|
||||
if test.mutateStatusFn != nil {
|
||||
test.mutateStatusFn(status)
|
||||
}
|
||||
ctx := context.Background()
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
tCtx := ktesting.Init(t)
|
||||
actions := m.computePodActions(tCtx, pod, status)
|
||||
verifyActions(t, &test.actions, &actions, desc)
|
||||
if test.resetStatusFn != nil {
|
||||
test.resetStatusFn(status)
|
||||
@@ -1280,7 +1289,8 @@ func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
|
||||
}
|
||||
|
||||
func TestComputePodActionsWithInitContainers(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Creating a pair reference pod and status for the test cases to refer
|
||||
@@ -1490,8 +1500,8 @@ func TestComputePodActionsWithInitContainers(t *testing.T) {
|
||||
if test.mutateStatusFn != nil {
|
||||
test.mutateStatusFn(status)
|
||||
}
|
||||
ctx := context.Background()
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
tCtx := ktesting.Init(t)
|
||||
actions := m.computePodActions(tCtx, pod, status)
|
||||
verifyActions(t, &test.actions, &actions, desc)
|
||||
})
|
||||
}
|
||||
@@ -1536,7 +1546,8 @@ func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus
|
||||
}
|
||||
|
||||
func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Creating a pair reference pod and status for the test cases to refer
|
||||
@@ -1896,8 +1907,8 @@ func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
|
||||
if test.mutateStatusFn != nil {
|
||||
test.mutateStatusFn(pod, status)
|
||||
}
|
||||
ctx := context.Background()
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
tCtx := ktesting.Init(t)
|
||||
actions := m.computePodActions(tCtx, pod, status)
|
||||
verifyActions(t, &test.actions, &actions, desc)
|
||||
if test.resetStatusFn != nil {
|
||||
test.resetStatusFn(status)
|
||||
@@ -1955,7 +1966,8 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
|
||||
TestComputePodActions(t)
|
||||
TestComputePodActionsWithInitContainers(t)
|
||||
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
basePod, baseStatus := makeBasePodAndStatusWithInitAndEphemeralContainers()
|
||||
@@ -2090,16 +2102,16 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
|
||||
if test.mutateStatusFn != nil {
|
||||
test.mutateStatusFn(status)
|
||||
}
|
||||
ctx := context.Background()
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
tCtx := ktesting.Init(t)
|
||||
actions := m.computePodActions(tCtx, pod, status)
|
||||
verifyActions(t, &test.actions, &actions, desc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
assert.NoError(t, err)
|
||||
fakeRuntime.ErrorOnSandboxCreate = true
|
||||
|
||||
@@ -2127,9 +2139,9 @@ func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
|
||||
// GetPodStatus and the following SyncPod will not return errors in the
|
||||
// case where the pod has been deleted. We are not adding any pods into
|
||||
// the fakePodProvider so they are 'deleted'.
|
||||
podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
podStatus, err := m.GetPodStatus(tCtx, pod.UID, pod.Name, pod.Namespace)
|
||||
assert.NoError(t, err)
|
||||
result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
|
||||
result := m.SyncPod(tCtx, pod, podStatus, []v1.Secret{}, backOff)
|
||||
// This will return an error if the pod has _not_ been deleted.
|
||||
assert.NoError(t, result.Error())
|
||||
}
|
||||
@@ -2164,7 +2176,8 @@ func makeBasePodAndStatusWithInitAndEphemeralContainers() (*v1.Pod, *kubecontain
|
||||
|
||||
func TestComputePodActionsForPodResize(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -2607,17 +2620,18 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
expectedActions := test.getExpectedPodActionsFn(pod, status)
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
actions := m.computePodActions(tCtx, pod, status)
|
||||
verifyActions(t, expectedActions, &actions, desc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePodContainerResources(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
m.machineInfo.MemoryCapacity = 17179860387 // 16GB
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -2733,7 +2747,7 @@ func TestUpdatePodContainerResources(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeRuntime.Called = []string{}
|
||||
err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate)
|
||||
err := m.updatePodContainerResources(tCtx, pod, tc.resourceName, containersToUpdate)
|
||||
require.NoError(t, err, dsc)
|
||||
|
||||
if tc.invokeUpdateResources {
|
||||
@@ -2750,7 +2764,8 @@ func TestUpdatePodContainerResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestToKubeContainerImageVolumes(t *testing.T) {
|
||||
_, _, manager, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, manager, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
const (
|
||||
@@ -2806,7 +2821,7 @@ func TestToKubeContainerImageVolumes(t *testing.T) {
|
||||
expectedError: errTest,
|
||||
},
|
||||
} {
|
||||
imageVolumes, err := manager.toKubeContainerImageVolumes(tc.pullResults, tc.container, &v1.Pod{}, syncResult)
|
||||
imageVolumes, err := manager.toKubeContainerImageVolumes(tCtx, tc.pullResults, tc.container, &v1.Pod{}, syncResult)
|
||||
if tc.expectedError != nil {
|
||||
require.EqualError(t, err, tc.expectedError.Error())
|
||||
} else {
|
||||
@@ -2817,9 +2832,10 @@ func TestToKubeContainerImageVolumes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetImageVolumes(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ImageVolume, true)
|
||||
|
||||
_, _, manager, err := createTestRuntimeManager()
|
||||
_, _, manager, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
const (
|
||||
@@ -2867,7 +2883,7 @@ func TestGetImageVolumes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
imageVolumePulls, err := manager.getImageVolumes(context.TODO(), tc.pod, nil, nil)
|
||||
imageVolumePulls, err := manager.getImageVolumes(tCtx, tc.pod, nil, nil)
|
||||
if tc.expectedError != nil {
|
||||
require.EqualError(t, err, tc.expectedError.Error())
|
||||
} else {
|
||||
@@ -2882,6 +2898,7 @@ func TestDoPodResizeAction(t *testing.T) {
|
||||
t.Skip("unsupported OS")
|
||||
}
|
||||
|
||||
tCtx := ktesting.Init(t)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
metrics.Register()
|
||||
metrics.PodResizeDurationMilliseconds.Reset()
|
||||
@@ -3011,7 +3028,7 @@ func TestDoPodResizeAction(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManagerWithErrors(tc.runtimeErrors)
|
||||
_, _, m, err := createTestRuntimeManagerWithErrors(tCtx, tc.runtimeErrors)
|
||||
require.NoError(t, err)
|
||||
m.cpuCFSQuota = true // Enforce CPU Limits
|
||||
|
||||
@@ -3100,7 +3117,7 @@ func TestDoPodResizeAction(t *testing.T) {
|
||||
actions := podActions{
|
||||
ContainersToUpdate: containersToUpdate,
|
||||
}
|
||||
resizeResult := m.doPodResizeAction(t.Context(), pod, kps, actions)
|
||||
resizeResult := m.doPodResizeAction(tCtx, pod, kps, actions)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
require.Error(t, resizeResult.Error)
|
||||
@@ -3306,7 +3323,8 @@ func TestCheckPodResize(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
pod, kps := makeBasePodAndStatus()
|
||||
@@ -3361,7 +3379,7 @@ func TestCheckPodResize(t *testing.T) {
|
||||
currentPodResources := &cm.ResourceConfig{Memory: tc.currentPodMemLimit}
|
||||
desiredPodResources := &cm.ResourceConfig{Memory: tc.desiredPodMemLimit}
|
||||
|
||||
err = m.validatePodResizeAction(t.Context(), pod, kps, currentPodResources, desiredPodResources, actions)
|
||||
err = m.validatePodResizeAction(tCtx, pod, kps, currentPodResources, desiredPodResources, actions)
|
||||
|
||||
if tc.expectedError {
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -37,10 +37,11 @@ import (
|
||||
|
||||
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
|
||||
func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
|
||||
logger := klog.FromContext(ctx)
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(ctx, pod, attempt)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
|
||||
klog.ErrorS(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
@@ -48,7 +49,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v
|
||||
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
|
||||
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
@@ -60,14 +61,14 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v
|
||||
return "", message, err
|
||||
}
|
||||
if runtimeHandler != "" {
|
||||
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
|
||||
logger.V(2).Info("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
|
||||
}
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
|
||||
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
@@ -75,9 +76,10 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v
|
||||
}
|
||||
|
||||
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
|
||||
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
|
||||
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(ctx context.Context, pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
|
||||
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
|
||||
// Refer https://github.com/kubernetes/kubernetes/issues/29871
|
||||
logger := klog.FromContext(ctx)
|
||||
podUID := string(pod.UID)
|
||||
podSandboxConfig := &runtimeapi.PodSandboxConfig{
|
||||
Metadata: &runtimeapi.PodSandboxMetadata{
|
||||
@@ -120,7 +122,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
|
||||
port := containerPortMappings[idx]
|
||||
hostPort := int32(port.HostPort)
|
||||
containerPort := int32(port.ContainerPort)
|
||||
protocol := toRuntimeProtocol(port.Protocol)
|
||||
protocol := toRuntimeProtocol(logger, port.Protocol)
|
||||
portMappings = append(portMappings, &runtimeapi.PortMapping{
|
||||
HostIp: port.HostIP,
|
||||
HostPort: hostPort,
|
||||
@@ -149,7 +151,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
|
||||
}
|
||||
|
||||
// Update config to include overhead, sandbox level resources
|
||||
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
|
||||
if err := m.applySandboxResources(ctx, pod, podSandboxConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return podSandboxConfig, nil
|
||||
@@ -279,6 +281,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod)
|
||||
|
||||
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
|
||||
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
var filter *runtimeapi.PodSandboxFilter
|
||||
if !all {
|
||||
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
@@ -291,7 +294,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all
|
||||
|
||||
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list pod sandboxes")
|
||||
logger.Error(err, "Failed to list pod sandboxes")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -299,10 +302,11 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all
|
||||
}
|
||||
|
||||
// determinePodSandboxIP determines the IP addresses of the given pod sandbox.
|
||||
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
|
||||
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(ctx context.Context, podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
|
||||
logger := klog.FromContext(ctx)
|
||||
podIPs := make([]string, 0)
|
||||
if podSandbox.Network == nil {
|
||||
klog.InfoS("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
|
||||
logger.Info("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
|
||||
return podIPs
|
||||
}
|
||||
|
||||
@@ -312,7 +316,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
|
||||
// pick primary IP
|
||||
if len(podSandbox.Network.Ip) != 0 {
|
||||
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
|
||||
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
|
||||
logger.Info("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
|
||||
return nil
|
||||
}
|
||||
podIPs = append(podIPs, podSandbox.Network.Ip)
|
||||
@@ -321,7 +325,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
|
||||
// pick additional ips, if cri reported them
|
||||
for _, podIP := range podSandbox.Network.AdditionalIps {
|
||||
if nil == netutils.ParseIPSloppy(podIP.Ip) {
|
||||
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
|
||||
logger.Info("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
|
||||
return nil
|
||||
}
|
||||
podIPs = append(podIPs, podIP.Ip)
|
||||
@@ -333,6 +337,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName
|
||||
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
|
||||
// Param state could be nil in order to get all sandboxes belonging to same pod.
|
||||
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
filter := &runtimeapi.PodSandboxFilter{
|
||||
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
|
||||
}
|
||||
@@ -343,7 +348,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, po
|
||||
}
|
||||
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
|
||||
logger.Error(err, "Failed to list sandboxes for pod", "podUID", podUID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@@ -44,7 +46,8 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod)
|
||||
return resources
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
||||
func (m *kubeGenericRuntimeManager) calculateSandboxResources(ctx context.Context, pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
||||
logger := klog.FromContext(ctx)
|
||||
opts := resourcehelper.PodResourcesOptions{
|
||||
ExcludeOverhead: true,
|
||||
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||
@@ -60,16 +63,16 @@ func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runt
|
||||
// If pod has exclusive cpu the sandbox will not have cfs quote enforced
|
||||
disableCPUQuota := utilfeature.DefaultFeatureGate.Enabled(features.DisableCPUQuotaWithExclusiveCPUs) && m.containerManager.PodHasExclusiveCPUs(pod)
|
||||
|
||||
klog.V(5).InfoS("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
|
||||
logger.V(5).Info("Enforcing CFS quota", "pod", klog.KObj(pod), "unlimited", disableCPUQuota)
|
||||
return m.calculateLinuxResources(cpuRequest, lim.Cpu(), lim.Memory(), disableCPUQuota)
|
||||
}
|
||||
|
||||
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||
func (m *kubeGenericRuntimeManager) applySandboxResources(ctx context.Context, pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||
|
||||
if config.Linux == nil {
|
||||
return nil
|
||||
}
|
||||
config.Linux.Resources = m.calculateSandboxResources(pod)
|
||||
config.Linux.Resources = m.calculateSandboxResources(ctx, pod)
|
||||
config.Linux.Overhead = m.convertOverheadToLinuxResources(pod)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -28,11 +28,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestApplySandboxResources(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
m.cpuCFSQuota = true
|
||||
m.singleProcessOOMKill = ptr.To(false)
|
||||
|
||||
@@ -168,14 +170,16 @@ func TestApplySandboxResources(t *testing.T) {
|
||||
for i, test := range tests {
|
||||
setCgroupVersionDuringTest(test.cgroupVersion)
|
||||
|
||||
m.applySandboxResources(test.pod, config)
|
||||
err = m.applySandboxResources(tCtx, test.pod, config)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedResource, config.Linux.Resources, "TestCase[%d]: %s", i, test.description)
|
||||
assert.Equal(t, test.expectedOverhead, config.Linux.Overhead, "TestCase[%d]: %s", i, test.description)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePodSandboxConfigWithLinuxSecurityContext(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
pod := newTestPodWithLinuxSecurityContext()
|
||||
|
||||
@@ -189,7 +193,7 @@ func TestGeneratePodSandboxConfigWithLinuxSecurityContext(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, 1)
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(tCtx, pod, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedLinuxPodSandboxConfig.SecurityContext.SelinuxOptions, podSandboxConfig.Linux.SecurityContext.SelinuxOptions)
|
||||
assert.Equal(t, expectedLinuxPodSandboxConfig.SecurityContext.RunAsUser, podSandboxConfig.Linux.SecurityContext.RunAsUser)
|
||||
@@ -222,7 +226,8 @@ func newSupplementalGroupsPolicyPod(supplementalGroupsPolicy *v1.SupplementalGro
|
||||
}
|
||||
|
||||
func TestGeneratePodSandboxLinuxConfigSupplementalGroupsPolicy(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -34,13 +33,15 @@ import (
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
|
||||
rctest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const testPodLogsDirectory = "/var/log/pods"
|
||||
|
||||
func TestGeneratePodSandboxConfig(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
pod := newTestPod()
|
||||
|
||||
@@ -62,7 +63,7 @@ func TestGeneratePodSandboxConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, 1)
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(tCtx, pod, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedLabels, podSandboxConfig.Labels)
|
||||
assert.Equal(t, expectedLogDirectory, podSandboxConfig.LogDirectory)
|
||||
@@ -72,8 +73,8 @@ func TestGeneratePodSandboxConfig(t *testing.T) {
|
||||
|
||||
// TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory.
|
||||
func TestCreatePodSandbox(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
pod := newTestPod()
|
||||
|
||||
@@ -84,10 +85,10 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
assert.Equal(t, os.FileMode(0755), perm)
|
||||
return nil
|
||||
}
|
||||
id, _, err := m.createPodSandbox(ctx, pod, 1)
|
||||
id, _, err := m.createPodSandbox(tCtx, pod, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{Id: id})
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(tCtx, &runtimeapi.PodSandboxFilter{Id: id})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, sandboxes, 1)
|
||||
assert.Equal(t, sandboxes[0].Id, fmt.Sprintf("%s_%s_%s_1", pod.Name, pod.Namespace, pod.UID))
|
||||
@@ -95,7 +96,8 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGeneratePodSandboxLinuxConfigSeccomp(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
@@ -139,11 +141,11 @@ func TestGeneratePodSandboxLinuxConfigSeccomp(t *testing.T) {
|
||||
|
||||
// TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled.
|
||||
func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tCtx := ktesting.Init(t)
|
||||
rcm := runtimeclass.NewManager(rctest.NewPopulatedClient())
|
||||
defer rctest.StartManagerSync(rcm)()
|
||||
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
m.runtimeClassManager = rcm
|
||||
|
||||
@@ -162,7 +164,7 @@ func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
|
||||
pod := newTestPod()
|
||||
pod.Spec.RuntimeClassName = test.rcn
|
||||
|
||||
id, _, err := m.createPodSandbox(ctx, pod, 1)
|
||||
id, _, err := m.createPodSandbox(tCtx, pod, 1)
|
||||
if test.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
@@ -214,7 +216,8 @@ func newSeccompPod(podFieldProfile, containerFieldProfile *v1.SeccompProfile, po
|
||||
}
|
||||
|
||||
func TestGeneratePodSandboxWindowsConfig_HostProcess(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
tCtx := ktesting.Init(t)
|
||||
_, _, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
const containerName = "container"
|
||||
|
||||
@@ -20,10 +20,12 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
)
|
||||
|
||||
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||
func (m *kubeGenericRuntimeManager) applySandboxResources(ctx context.Context, pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,10 +20,12 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
)
|
||||
|
||||
func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||
func (m *kubeGenericRuntimeManager) applySandboxResources(ctx context.Context, pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
@@ -104,7 +105,8 @@ func newContainerLabels(container *v1.Container, pod *v1.Pod) map[string]string
|
||||
}
|
||||
|
||||
// newContainerAnnotations creates container annotations from v1.Container and v1.Pod.
|
||||
func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount int, opts *kubecontainer.RunContainerOptions) map[string]string {
|
||||
func newContainerAnnotations(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, opts *kubecontainer.RunContainerOptions) map[string]string {
|
||||
logger := klog.FromContext(ctx)
|
||||
annotations := map[string]string{}
|
||||
|
||||
// Kubelet always overrides device plugin annotations if they are conflicting
|
||||
@@ -128,7 +130,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount
|
||||
// Using json encoding so that the PreStop handler object is readable after writing as a label
|
||||
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to marshal lifecycle PreStop handler for container", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Unable to marshal lifecycle PreStop handler for container", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
} else {
|
||||
annotations[containerPreStopHandlerLabel] = string(rawPreStop)
|
||||
}
|
||||
@@ -137,7 +139,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount
|
||||
if len(container.Ports) > 0 {
|
||||
rawContainerPorts, err := json.Marshal(container.Ports)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to marshal container ports for container", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
logger.Error(err, "Unable to marshal container ports for container", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
} else {
|
||||
annotations[containerPortsLabel] = string(rawContainerPorts)
|
||||
}
|
||||
@@ -147,12 +149,13 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount
|
||||
}
|
||||
|
||||
// getPodSandboxInfoFromLabels gets labeledPodSandboxInfo from labels.
|
||||
func getPodSandboxInfoFromLabels(labels map[string]string) *labeledPodSandboxInfo {
|
||||
func getPodSandboxInfoFromLabels(ctx context.Context, labels map[string]string) *labeledPodSandboxInfo {
|
||||
logger := klog.FromContext(ctx)
|
||||
podSandboxInfo := &labeledPodSandboxInfo{
|
||||
Labels: make(map[string]string),
|
||||
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
|
||||
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
|
||||
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
|
||||
PodName: getStringValueFromLabel(logger, labels, types.KubernetesPodNameLabel),
|
||||
PodNamespace: getStringValueFromLabel(logger, labels, types.KubernetesPodNamespaceLabel),
|
||||
PodUID: kubetypes.UID(getStringValueFromLabel(logger, labels, types.KubernetesPodUIDLabel)),
|
||||
}
|
||||
|
||||
// Remain only labels from v1.Pod
|
||||
@@ -173,46 +176,48 @@ func getPodSandboxInfoFromAnnotations(annotations map[string]string) *annotatedP
|
||||
}
|
||||
|
||||
// getContainerInfoFromLabels gets labeledContainerInfo from labels.
|
||||
func getContainerInfoFromLabels(labels map[string]string) *labeledContainerInfo {
|
||||
func getContainerInfoFromLabels(ctx context.Context, labels map[string]string) *labeledContainerInfo {
|
||||
logger := klog.FromContext(ctx)
|
||||
return &labeledContainerInfo{
|
||||
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
|
||||
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
|
||||
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
|
||||
ContainerName: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
|
||||
PodName: getStringValueFromLabel(logger, labels, types.KubernetesPodNameLabel),
|
||||
PodNamespace: getStringValueFromLabel(logger, labels, types.KubernetesPodNamespaceLabel),
|
||||
PodUID: kubetypes.UID(getStringValueFromLabel(logger, labels, types.KubernetesPodUIDLabel)),
|
||||
ContainerName: getStringValueFromLabel(logger, labels, types.KubernetesContainerNameLabel),
|
||||
}
|
||||
}
|
||||
|
||||
// getContainerInfoFromAnnotations gets annotatedContainerInfo from annotations.
|
||||
func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedContainerInfo {
|
||||
func getContainerInfoFromAnnotations(ctx context.Context, annotations map[string]string) *annotatedContainerInfo {
|
||||
logger := klog.FromContext(ctx)
|
||||
var err error
|
||||
containerInfo := &annotatedContainerInfo{
|
||||
TerminationMessagePath: getStringValueFromLabel(annotations, containerTerminationMessagePathLabel),
|
||||
TerminationMessagePolicy: v1.TerminationMessagePolicy(getStringValueFromLabel(annotations, containerTerminationMessagePolicyLabel)),
|
||||
TerminationMessagePath: getStringValueFromLabel(logger, annotations, containerTerminationMessagePathLabel),
|
||||
TerminationMessagePolicy: v1.TerminationMessagePolicy(getStringValueFromLabel(logger, annotations, containerTerminationMessagePolicyLabel)),
|
||||
}
|
||||
|
||||
if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil {
|
||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerHashLabel, "annotations", annotations)
|
||||
if containerInfo.Hash, err = getUint64ValueFromLabel(ctx, annotations, containerHashLabel); err != nil {
|
||||
logger.Error(err, "Unable to get label value from annotations", "label", containerHashLabel, "annotations", annotations)
|
||||
}
|
||||
if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil {
|
||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerRestartCountLabel, "annotations", annotations)
|
||||
if containerInfo.RestartCount, err = getIntValueFromLabel(logger, annotations, containerRestartCountLabel); err != nil {
|
||||
logger.Error(err, "Unable to get label value from annotations", "label", containerRestartCountLabel, "annotations", annotations)
|
||||
}
|
||||
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(annotations, podDeletionGracePeriodLabel); err != nil {
|
||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", podDeletionGracePeriodLabel, "annotations", annotations)
|
||||
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(logger, annotations, podDeletionGracePeriodLabel); err != nil {
|
||||
logger.Error(err, "Unable to get label value from annotations", "label", podDeletionGracePeriodLabel, "annotations", annotations)
|
||||
}
|
||||
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(annotations, podTerminationGracePeriodLabel); err != nil {
|
||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", podTerminationGracePeriodLabel, "annotations", annotations)
|
||||
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(logger, annotations, podTerminationGracePeriodLabel); err != nil {
|
||||
logger.Error(err, "Unable to get label value from annotations", "label", podTerminationGracePeriodLabel, "annotations", annotations)
|
||||
}
|
||||
|
||||
preStopHandler := &v1.LifecycleHandler{}
|
||||
if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil {
|
||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerPreStopHandlerLabel, "annotations", annotations)
|
||||
if found, err := getJSONObjectFromLabel(logger, annotations, containerPreStopHandlerLabel, preStopHandler); err != nil {
|
||||
logger.Error(err, "Unable to get label value from annotations", "label", containerPreStopHandlerLabel, "annotations", annotations)
|
||||
} else if found {
|
||||
containerInfo.PreStopHandler = preStopHandler
|
||||
}
|
||||
|
||||
containerPorts := []v1.ContainerPort{}
|
||||
if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil {
|
||||
klog.ErrorS(err, "Unable to get label value from annotations", "label", containerPortsLabel, "annotations", annotations)
|
||||
if found, err := getJSONObjectFromLabel(logger, annotations, containerPortsLabel, &containerPorts); err != nil {
|
||||
logger.Error(err, "Unable to get label value from annotations", "label", containerPortsLabel, "annotations", annotations)
|
||||
} else if found {
|
||||
containerInfo.ContainerPorts = containerPorts
|
||||
}
|
||||
@@ -220,17 +225,17 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo
|
||||
return containerInfo
|
||||
}
|
||||
|
||||
func getStringValueFromLabel(labels map[string]string, label string) string {
|
||||
func getStringValueFromLabel(logger klog.Logger, labels map[string]string, label string) string {
|
||||
if value, found := labels[label]; found {
|
||||
return value
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
klog.V(3).InfoS("Container doesn't have requested label, it may be an old or invalid container", "label", label)
|
||||
logger.V(3).Info("Container doesn't have requested label, it may be an old or invalid container", "label", label)
|
||||
// Return empty string "" for these containers, the caller will get value by other ways.
|
||||
return ""
|
||||
}
|
||||
|
||||
func getIntValueFromLabel(labels map[string]string, label string) (int, error) {
|
||||
func getIntValueFromLabel(logger klog.Logger, labels map[string]string, label string) (int, error) {
|
||||
if strValue, found := labels[label]; found {
|
||||
intValue, err := strconv.Atoi(strValue)
|
||||
if err != nil {
|
||||
@@ -240,12 +245,13 @@ func getIntValueFromLabel(labels map[string]string, label string) (int, error) {
|
||||
return intValue, nil
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
klog.V(3).InfoS("Container doesn't have requested label, it may be an old or invalid container", "label", label)
|
||||
logger.V(3).Info("Container doesn't have requested label, it may be an old or invalid container", "label", label)
|
||||
// Just set the value to 0
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, error) {
|
||||
func getUint64ValueFromLabel(ctx context.Context, labels map[string]string, label string) (uint64, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
if strValue, found := labels[label]; found {
|
||||
intValue, err := strconv.ParseUint(strValue, 16, 64)
|
||||
if err != nil {
|
||||
@@ -255,12 +261,12 @@ func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, er
|
||||
return intValue, nil
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
klog.V(3).InfoS("Container doesn't have requested label, it may be an old or invalid container", "label", label)
|
||||
logger.V(3).Info("Container doesn't have requested label, it may be an old or invalid container", "label", label)
|
||||
// Just set the value to 0
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func getInt64PointerFromLabel(labels map[string]string, label string) (*int64, error) {
|
||||
func getInt64PointerFromLabel(logger klog.Logger, labels map[string]string, label string) (*int64, error) {
|
||||
if strValue, found := labels[label]; found {
|
||||
int64Value, err := strconv.ParseInt(strValue, 10, 64)
|
||||
if err != nil {
|
||||
@@ -269,15 +275,17 @@ func getInt64PointerFromLabel(labels map[string]string, label string) (*int64, e
|
||||
return &int64Value, nil
|
||||
}
|
||||
// If the label is not found, return pointer nil.
|
||||
logger.V(4).Info("Label not found", "label", label)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getJSONObjectFromLabel returns a bool value indicating whether an object is found.
|
||||
func getJSONObjectFromLabel(labels map[string]string, label string, value interface{}) (bool, error) {
|
||||
func getJSONObjectFromLabel(logger klog.Logger, labels map[string]string, label string, value interface{}) (bool, error) {
|
||||
if strValue, found := labels[label]; found {
|
||||
err := json.Unmarshal([]byte(strValue), value)
|
||||
return found, err
|
||||
}
|
||||
// If the label is not found, return not found.
|
||||
logger.V(4).Info("Label not found", "label", label)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -27,9 +27,11 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestContainerLabels(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
deletionGracePeriod := int64(10)
|
||||
terminationGracePeriod := int64(10)
|
||||
lifecycle := &v1.Lifecycle{
|
||||
@@ -85,7 +87,7 @@ func TestContainerLabels(t *testing.T) {
|
||||
// Test whether we can get right information from label
|
||||
for _, test := range tests {
|
||||
labels := newContainerLabels(container, pod)
|
||||
containerInfo := getContainerInfoFromLabels(labels)
|
||||
containerInfo := getContainerInfoFromLabels(tCtx, labels)
|
||||
if !reflect.DeepEqual(containerInfo, test.expected) {
|
||||
t.Errorf("%v: expected %v, got %v", test.description, test.expected, containerInfo)
|
||||
}
|
||||
@@ -93,6 +95,7 @@ func TestContainerLabels(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestContainerAnnotations(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
restartCount := 5
|
||||
deletionGracePeriod := int64(10)
|
||||
terminationGracePeriod := int64(10)
|
||||
@@ -162,8 +165,8 @@ func TestContainerAnnotations(t *testing.T) {
|
||||
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
// Test whether we can get right information from label
|
||||
annotations := newContainerAnnotations(container, pod, restartCount, opts)
|
||||
containerInfo := getContainerInfoFromAnnotations(annotations)
|
||||
annotations := newContainerAnnotations(tCtx, container, pod, restartCount, opts)
|
||||
containerInfo := getContainerInfoFromAnnotations(tCtx, annotations)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
@@ -181,8 +184,8 @@ func TestContainerAnnotations(t *testing.T) {
|
||||
expected.PreStopHandler = nil
|
||||
// Because container is changed, the Hash should be updated
|
||||
expected.Hash = kubecontainer.HashContainer(container)
|
||||
annotations = newContainerAnnotations(container, pod, restartCount, opts)
|
||||
containerInfo = getContainerInfoFromAnnotations(annotations)
|
||||
annotations = newContainerAnnotations(tCtx, container, pod, restartCount, opts)
|
||||
containerInfo = getContainerInfoFromAnnotations(tCtx, annotations)
|
||||
if !reflect.DeepEqual(containerInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, containerInfo)
|
||||
}
|
||||
@@ -192,6 +195,7 @@ func TestContainerAnnotations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPodLabels(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
@@ -212,7 +216,7 @@ func TestPodLabels(t *testing.T) {
|
||||
|
||||
// Test whether we can get right information from label
|
||||
labels := newPodLabels(pod)
|
||||
podSandboxInfo := getPodSandboxInfoFromLabels(labels)
|
||||
podSandboxInfo := getPodSandboxInfoFromLabels(tCtx, labels)
|
||||
if !reflect.DeepEqual(podSandboxInfo, expected) {
|
||||
t.Errorf("expected %v, got %v", expected, podSandboxInfo)
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
@@ -31,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// verifyRunAsNonRoot verifies RunAsNonRoot.
|
||||
func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid *int64, username string) error {
|
||||
func verifyRunAsNonRoot(ctx context.Context, pod *v1.Pod, container *v1.Container, uid *int64, username string) error {
|
||||
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
|
||||
// If the option is not set, or if running as root is allowed, return nil.
|
||||
if effectiveSc == nil || effectiveSc.RunAsNonRoot == nil || !*effectiveSc.RunAsNonRoot {
|
||||
|
||||
@@ -27,9 +27,11 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestVerifyRunAsNonRoot(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
@@ -168,7 +170,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
pod.Spec.Containers[0].SecurityContext = test.sc
|
||||
err := verifyRunAsNonRoot(pod, &pod.Spec.Containers[0], test.uid, test.username)
|
||||
err := verifyRunAsNonRoot(tCtx, pod, &pod.Spec.Containers[0], test.uid, test.username)
|
||||
if test.fail {
|
||||
assert.Error(t, err, test.desc)
|
||||
} else {
|
||||
|
||||
@@ -20,12 +20,14 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -39,22 +41,23 @@ var (
|
||||
// and then optimize this logic according to the best time.
|
||||
// https://docs.google.com/document/d/1Tjxzjjuy4SQsFSUVXZbvqVb64hjNAG5CQX8bK7Yda9w
|
||||
// note: usernames on Windows are NOT case sensitive!
|
||||
func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid *int64, username string) error {
|
||||
func verifyRunAsNonRoot(ctx context.Context, pod *v1.Pod, container *v1.Container, uid *int64, username string) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
|
||||
// If the option is not set, or if running as root is allowed, return nil.
|
||||
if effectiveSc == nil || effectiveSc.RunAsNonRoot == nil || !*effectiveSc.RunAsNonRoot {
|
||||
return nil
|
||||
}
|
||||
if effectiveSc.RunAsUser != nil {
|
||||
klog.InfoS("Windows container does not support SecurityContext.RunAsUser, please use SecurityContext.WindowsOptions",
|
||||
logger.Info("Windows container does not support SecurityContext.RunAsUser, please use SecurityContext.WindowsOptions",
|
||||
"pod", klog.KObj(pod), "containerName", container.Name)
|
||||
}
|
||||
if effectiveSc.SELinuxOptions != nil {
|
||||
klog.InfoS("Windows container does not support SecurityContext.SELinuxOptions, please use SecurityContext.WindowsOptions",
|
||||
logger.Info("Windows container does not support SecurityContext.SELinuxOptions, please use SecurityContext.WindowsOptions",
|
||||
"pod", klog.KObj(pod), "containerName", container.Name)
|
||||
}
|
||||
if effectiveSc.RunAsGroup != nil {
|
||||
klog.InfoS("Windows container does not support SecurityContext.RunAsGroup", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
logger.Info("Windows container does not support SecurityContext.RunAsGroup", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
}
|
||||
// Verify that if runAsUserName is set for the pod and/or container that it is not set to 'ContainerAdministrator'
|
||||
if effectiveSc.WindowsOptions != nil {
|
||||
|
||||
@@ -20,14 +20,16 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
)
|
||||
|
||||
func TestVerifyRunAsNonRoot(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
@@ -173,7 +175,7 @@ func TestVerifyRunAsNonRoot(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
pod.Spec.Containers[0].SecurityContext = test.sc
|
||||
err := verifyRunAsNonRoot(pod, &pod.Spec.Containers[0], test.uid, test.username)
|
||||
err := verifyRunAsNonRoot(tCtx, pod, &pod.Spec.Containers[0], test.uid, test.username)
|
||||
if test.fail {
|
||||
assert.Error(t, err, test.desc)
|
||||
} else {
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -26,8 +28,10 @@ import (
|
||||
// PodSandboxChanged checks whether the spec of the pod is changed and returns
|
||||
// (changed, new attempt, original sandboxID if exist).
|
||||
func PodSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) {
|
||||
ctx := context.TODO() // This context will be passed as parameter in the future
|
||||
logger := klog.FromContext(ctx)
|
||||
if len(podStatus.SandboxStatuses) == 0 {
|
||||
klog.V(2).InfoS("No sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
logger.V(2).Info("No sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, 0, ""
|
||||
}
|
||||
|
||||
@@ -41,23 +45,23 @@ func PodSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, u
|
||||
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
|
||||
sandboxStatus := podStatus.SandboxStatuses[0]
|
||||
if readySandboxCount > 1 {
|
||||
klog.V(2).InfoS("Multiple sandboxes are ready for Pod. Need to reconcile them", "pod", klog.KObj(pod))
|
||||
logger.V(2).Info("Multiple sandboxes are ready for Pod. Need to reconcile them", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
klog.V(2).InfoS("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
logger.V(2).Info("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when network namespace changed.
|
||||
if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != NetworkNamespaceForPod(pod) {
|
||||
klog.V(2).InfoS("Sandbox for pod has changed. Need to start a new one", "pod", klog.KObj(pod))
|
||||
logger.V(2).Info("Sandbox for pod has changed. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, ""
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when the sandbox does not have an IP address.
|
||||
if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network != nil && sandboxStatus.Network.Ip == "" {
|
||||
klog.V(2).InfoS("Sandbox for pod has no IP address. Need to start a new one", "pod", klog.KObj(pod))
|
||||
logger.V(2).Info("Sandbox for pod has no IP address. Need to start a new one", "pod", klog.KObj(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user