chore: migrate kubelet lifecycle to contextual logging.

This commit is contained in:
zhangzhifei16
2025-07-22 10:14:42 +08:00
parent 405edb50ca
commit 911df655d3
6 changed files with 37 additions and 30 deletions

View File

@@ -205,6 +205,7 @@ linters:
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/.*
contextual k8s.io/kubernetes/pkg/kubelet/lifecycle/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*

View File

@@ -219,6 +219,7 @@ linters:
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/.*
contextual k8s.io/kubernetes/pkg/kubelet/lifecycle/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*

View File

@@ -51,6 +51,7 @@ contextual k8s.io/kubernetes/pkg/scheduler/.*
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/.*
contextual k8s.io/kubernetes/pkg/kubelet/lifecycle/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*

View File

@@ -70,6 +70,7 @@ func NewHandlerRunner(httpDoer kubetypes.HTTPDoer, commandRunner kubecontainer.C
}
func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) {
logger := klog.FromContext(ctx)
switch {
case handler.Exec != nil:
var msg string
@@ -77,7 +78,7 @@ func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.Cont
output, err := hr.commandRunner.RunInContainer(ctx, containerID, handler.Exec.Command, 0)
if err != nil {
msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output))
klog.V(1).ErrorS(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output))
logger.V(1).Error(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output))
}
return msg, err
case handler.HTTPGet != nil:
@@ -85,7 +86,7 @@ func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.Cont
var msg string
if err != nil {
msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err)
klog.V(1).ErrorS(err, "HTTP lifecycle hook for Container in Pod failed", "path", handler.HTTPGet.Path, "containerName", container.Name, "pod", klog.KObj(pod))
logger.V(1).Error(err, "HTTP lifecycle hook for Container in Pod failed", "path", handler.HTTPGet.Path, "containerName", container.Name, "pod", klog.KObj(pod))
}
return msg, err
case handler.Sleep != nil:
@@ -93,13 +94,13 @@ func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.Cont
var msg string
if err != nil {
msg = fmt.Sprintf("Sleep lifecycle hook (%d) for Container %q in Pod %q failed - error: %v", handler.Sleep.Seconds, container.Name, format.Pod(pod), err)
klog.V(1).ErrorS(err, "Sleep lifecycle hook for Container in Pod failed", "sleepSeconds", handler.Sleep.Seconds, "containerName", container.Name, "pod", klog.KObj(pod))
logger.V(1).Error(err, "Sleep lifecycle hook for Container in Pod failed", "sleepSeconds", handler.Sleep.Seconds, "containerName", container.Name, "pod", klog.KObj(pod))
}
return msg, err
default:
err := fmt.Errorf("invalid handler: %v", handler)
msg := fmt.Sprintf("Cannot run handler: %v", err)
klog.ErrorS(err, "Cannot run handler")
logger.Error(err, "Cannot run handler")
return msg, err
}
}
@@ -143,12 +144,13 @@ func (hr *handlerRunner) runSleepHandler(ctx context.Context, seconds int64) err
}
func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error {
logger := klog.FromContext(ctx)
host := handler.HTTPGet.Host
podIP := host
if len(host) == 0 {
status, err := hr.containerManager.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
if err != nil {
klog.ErrorS(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod))
logger.Error(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod))
return err
}
if len(status.IPs) == 0 {
@@ -166,9 +168,9 @@ func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, contai
discardHTTPRespBody(resp)
if isHTTPResponseError(err) {
klog.V(1).ErrorS(err, "HTTPS request to lifecycle hook got HTTP response, retrying with HTTP.", "pod", klog.KObj(pod), "host", req.URL.Host)
logger.V(1).Error(err, "HTTPS request to lifecycle hook got HTTP response, retrying with HTTP.", "pod", klog.KObj(pod), "host", req.URL.Host)
req := req.Clone(context.Background())
req := req.Clone(ctx)
req.URL.Scheme = "http"
req.Header.Del("Authorization")
resp, httpErr := hr.httpDoer.Do(req)

View File

@@ -38,6 +38,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/test/utils/ktesting"
)
func TestResolvePort(t *testing.T) {
@@ -112,7 +113,7 @@ func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, na
}
func TestRunHandlerExec(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
fakeCommandRunner := fakeContainerCommandRunner{}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
@@ -134,7 +135,7 @@ func TestRunHandlerExec(t *testing.T) {
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@@ -158,7 +159,7 @@ func (f *fakeHTTP) Do(req *http.Request) (*http.Response, error) {
}
func TestRunHandlerHttp(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPGetter := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
@@ -183,7 +184,7 @@ func TestRunHandlerHttp(t *testing.T) {
pod.ObjectMeta.Namespace = "nsFoo"
pod.ObjectMeta.UID = "foo-bar-quux"
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -194,7 +195,7 @@ func TestRunHandlerHttp(t *testing.T) {
}
func TestRunHandlerHttpWithHeaders(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPDoer := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
@@ -222,7 +223,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) {
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -236,7 +237,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) {
}
func TestRunHandlerHttps(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPDoer := fakeHTTP{}
fakePodStatusProvider := stubPodStatusProvider("127.0.0.1")
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
@@ -264,7 +265,7 @@ func TestRunHandlerHttps(t *testing.T) {
t.Run("consistent", func(t *testing.T) {
container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70")
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -318,13 +319,13 @@ func TestRunHandlerHTTPPort(t *testing.T) {
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
fakeHTTPDoer := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
container.Lifecycle.PostStart.HTTPGet.Port = tt.Port
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if hasError := (err != nil); hasError != tt.ExpectError {
t.Errorf("unexpected error: %v", err)
@@ -589,7 +590,7 @@ func TestRunHTTPHandler(t *testing.T) {
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
fakePodStatusProvider := stubPodStatusProvider(tt.PodIP)
container.Lifecycle.PostStart.HTTPGet = tt.HTTPGet
@@ -599,7 +600,7 @@ func TestRunHTTPHandler(t *testing.T) {
fakeHTTPDoer := fakeHTTP{}
handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil)
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Fatal(err)
}
@@ -620,7 +621,7 @@ func TestRunHTTPHandler(t *testing.T) {
}
func TestRunHandlerNil(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil)
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
podName := "podFoo"
@@ -637,14 +638,14 @@ func TestRunHandlerNil(t *testing.T) {
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Namespace = podNamespace
pod.Spec.Containers = []v1.Container{container}
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
_, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil {
t.Errorf("expect error, but got nil")
}
}
func TestRunHandlerExecFailure(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
expectedErr := fmt.Errorf("invalid command")
fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()}
handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil)
@@ -669,7 +670,7 @@ func TestRunHandlerExecFailure(t *testing.T) {
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error())
msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
msg, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil {
t.Errorf("expected error: %v", expectedErr)
}
@@ -679,7 +680,7 @@ func TestRunHandlerExecFailure(t *testing.T) {
}
func TestRunHandlerHttpFailure(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
expectedErr := fmt.Errorf("fake http error")
expectedResp := http.Response{
Body: io.NopCloser(strings.NewReader(expectedErr.Error())),
@@ -709,7 +710,7 @@ func TestRunHandlerHttpFailure(t *testing.T) {
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
expectedErrMsg := fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", "bar", containerName, format.Pod(&pod), expectedErr)
msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
msg, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err == nil {
t.Errorf("expected error: %v", expectedErr)
}
@@ -722,7 +723,7 @@ func TestRunHandlerHttpFailure(t *testing.T) {
}
func TestRunHandlerHttpsFailureFallback(t *testing.T) {
ctx := context.Background()
_, tCtx := ktesting.NewTestContext(t)
// Since prometheus' gatherer is global, other tests may have updated metrics already, so
// we need to reset them prior running this test.
@@ -772,7 +773,7 @@ func TestRunHandlerHttpsFailureFallback(t *testing.T) {
pod.ObjectMeta.Name = "podFoo"
pod.ObjectMeta.Namespace = "nsFoo"
pod.Spec.Containers = []v1.Container{container}
msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart)
msg, err := handlerRunner.Run(tCtx, containerID, &pod, &container, container.Lifecycle.PostStart)
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -856,8 +857,9 @@ func TestRunSleepHandler(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, tCtx := ktesting.NewTestContext(t)
pod.Spec.Containers[0].Lifecycle.PreStop.Sleep = &v1.SleepAction{Seconds: tt.sleepSeconds}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(tt.terminationGracePeriodSeconds)*time.Second)
ctx, cancel := context.WithTimeout(tCtx, time.Duration(tt.terminationGracePeriodSeconds)*time.Second)
defer cancel()
_, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PreStop)

View File

@@ -121,7 +121,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
logger := klog.FromContext(ctx)
node, err := w.getNodeAnyWayFunc()
if err != nil {
klog.ErrorS(err, "Cannot get Node info")
logger.Error(err, "Cannot get Node info")
return PodAdmitResult{
Admit: false,
Reason: InvalidNodeInfo,
@@ -148,7 +148,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
if rejectPodAdmissionBasedOnSupplementalGroupsPolicy(admitPod, node) {
message := fmt.Sprintf("SupplementalGroupsPolicy=%s is not supported in this node", v1.SupplementalGroupsPolicyStrict)
klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
logger.Info("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
return PodAdmitResult{
Admit: false,
Reason: SupplementalGroupsPolicyNotSupported,