Files
kubernetes/pkg/kubelet/images/image_manager.go
Sascha Grunert 0fc4b740f8 Use image pull error in message during back-off
The container status waiting reason toggles between `ImagePullBackOff`
and the actual pull error, resulting in a bad user experience for
consumers like kubectl. For example, the output of
`kubectl get pods` does return either:

```
NAME   READY   STATUS                      RESTARTS   AGE
pod    0/1     SignatureValidationFailed   0          10s
```

or

```
NAME   READY   STATUS             RESTARTS   AGE
pod    0/1     ImagePullBackOff   0          18s
```

depending in which state the image pull is. We now improve that behavior
by preserving the actual error in the `message` of the `waiting` state
from the pull during back-off:

```json
{
  "waiting": {
    "message": "Back-off pulling image \"quay.io/crio/unsigned:latest\": SignatureValidationFailed: image pull failed for quay.io/crio/unsigned:latest because the signature validation failed: Source
 image rejected: A signature was required, but no signature exists",
    "reason": "ImagePullBackOff"
  }
}
```

While the `SignatureValidationFailed` value inherits from the previous
known state:

```json
{
  "waiting": {
    "message": "image pull failed for quay.io/crio/unsigned:latest because the signature validation failed: Source image rejected: A signature was required, but no signature exists",
    "reason": "SignatureValidationFailed"
  }
}
```

Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
2024-10-18 08:47:37 +02:00

250 lines
9.8 KiB
Go

/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"context"
"fmt"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog/v2"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
crierrors "k8s.io/cri-api/pkg/errors"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/util/parsers"
)
type ImagePodPullingTimeRecorder interface {
RecordImageStartedPulling(podUID types.UID)
RecordImageFinishedPulling(podUID types.UID)
}
// imageManager provides the functionalities for image pulling.
type imageManager struct {
recorder record.EventRecorder
imageService kubecontainer.ImageService
backOff *flowcontrol.Backoff
prevPullErrMsg sync.Map
// It will check the presence of the image, and report the 'image pulling', image pulled' events correspondingly.
puller imagePuller
podPullingTimeRecorder ImagePodPullingTimeRecorder
}
var _ ImageManager = &imageManager{}
// NewImageManager instantiates a new ImageManager object.
func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, maxParallelImagePulls *int32, qps float32, burst int, podPullingTimeRecorder ImagePodPullingTimeRecorder) ImageManager {
imageService = throttleImagePulling(imageService, qps, burst)
var puller imagePuller
if serialized {
puller = newSerialImagePuller(imageService)
} else {
puller = newParallelImagePuller(imageService, maxParallelImagePulls)
}
return &imageManager{
recorder: recorder,
imageService: imageService,
backOff: imageBackOff,
puller: puller,
podPullingTimeRecorder: podPullingTimeRecorder,
}
}
// imagePullPrecheck inspects the pull policy and checks for image presence accordingly,
// returning (imageRef, error msg, err) and logging any errors.
func (m *imageManager) imagePullPrecheck(ctx context.Context, objRef *v1.ObjectReference, logPrefix string, pullPolicy v1.PullPolicy, spec *kubecontainer.ImageSpec, imgRef string) (imageRef string, msg string, err error) {
switch pullPolicy {
case v1.PullAlways:
return "", msg, nil
case v1.PullIfNotPresent:
imageRef, err = m.imageService.GetImageRef(ctx, *spec)
if err != nil {
msg = fmt.Sprintf("Failed to inspect image %q: %v", imageRef, err)
m.logIt(objRef, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrImageInspect
}
return imageRef, msg, nil
case v1.PullNever:
imageRef, err = m.imageService.GetImageRef(ctx, *spec)
if err != nil {
msg = fmt.Sprintf("Failed to inspect image %q: %v", imageRef, err)
m.logIt(objRef, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrImageInspect
}
if imageRef == "" {
msg = fmt.Sprintf("Container image %q is not present with pull policy of Never", imgRef)
m.logIt(objRef, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, klog.Warning)
return "", msg, ErrImageNeverPull
}
return imageRef, msg, nil
}
return
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (m *imageManager) logIt(objRef *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if objRef != nil {
m.recorder.Event(objRef, eventtype, event, msg)
} else {
logFn(fmt.Sprint(prefix, " ", msg))
}
}
// EnsureImageExists pulls the image for the specified pod and imgRef, and returns
// (imageRef, error message, error).
func (m *imageManager) EnsureImageExists(ctx context.Context, objRef *v1.ObjectReference, pod *v1.Pod, imgRef string, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig, podRuntimeHandler string, pullPolicy v1.PullPolicy) (imageRef, message string, err error) {
logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, imgRef)
// If the image contains no tag or digest, a default tag should be applied.
image, err := applyDefaultImageTag(imgRef)
if err != nil {
msg := fmt.Sprintf("Failed to apply default image tag %q: %v", imgRef, err)
m.logIt(objRef, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning)
return "", msg, ErrInvalidImageName
}
var podAnnotations []kubecontainer.Annotation
for k, v := range pod.GetAnnotations() {
podAnnotations = append(podAnnotations, kubecontainer.Annotation{
Name: k,
Value: v,
})
}
spec := kubecontainer.ImageSpec{
Image: image,
Annotations: podAnnotations,
RuntimeHandler: podRuntimeHandler,
}
imageRef, message, err = m.imagePullPrecheck(ctx, objRef, logPrefix, pullPolicy, &spec, imgRef)
if err != nil {
return "", message, err
}
if imageRef != "" {
msg := fmt.Sprintf("Container image %q already present on machine", imgRef)
m.logIt(objRef, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, klog.Info)
return imageRef, msg, nil
}
backOffKey := fmt.Sprintf("%s_%s", pod.UID, imgRef)
if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", imgRef)
m.logIt(objRef, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, klog.Info)
// Wrap the error from the actual pull if available.
// This information is populated to the pods
// .status.containerStatuses[*].state.waiting.message.
prevPullErrMsg, ok := m.prevPullErrMsg.Load(backOffKey)
if ok {
msg = fmt.Sprintf("%s: %s", msg, prevPullErrMsg)
}
return "", msg, ErrImagePullBackOff
}
// Ensure that the map cannot grow indefinitely.
m.prevPullErrMsg.Delete(backOffKey)
m.podPullingTimeRecorder.RecordImageStartedPulling(pod.UID)
m.logIt(objRef, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", imgRef), klog.Info)
startTime := time.Now()
pullChan := make(chan pullResult)
m.puller.pullImage(ctx, spec, pullSecrets, pullChan, podSandboxConfig)
imagePullResult := <-pullChan
if imagePullResult.err != nil {
m.logIt(objRef, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", imgRef, imagePullResult.err), klog.Warning)
m.backOff.Next(backOffKey, m.backOff.Clock.Now())
msg, err := evalCRIPullErr(imgRef, imagePullResult.err)
// Store the actual pull error for providing that information during
// the image pull back-off.
m.prevPullErrMsg.Store(backOffKey, fmt.Sprintf("%s: %s", err, msg))
return "", msg, err
}
m.podPullingTimeRecorder.RecordImageFinishedPulling(pod.UID)
imagePullDuration := time.Since(startTime).Truncate(time.Millisecond)
m.logIt(objRef, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q in %v (%v including waiting). Image size: %v bytes.",
imgRef, imagePullResult.pullDuration.Truncate(time.Millisecond), imagePullDuration, imagePullResult.imageSize), klog.Info)
metrics.ImagePullDuration.WithLabelValues(metrics.GetImageSizeBucket(imagePullResult.imageSize)).Observe(imagePullDuration.Seconds())
m.backOff.GC()
return imagePullResult.imageRef, "", nil
}
func evalCRIPullErr(imgRef string, err error) (errMsg string, errRes error) {
// Error assertions via errors.Is is not supported by gRPC (remote runtime) errors right now.
// See https://github.com/grpc/grpc-go/issues/3616
if strings.HasPrefix(err.Error(), crierrors.ErrRegistryUnavailable.Error()) {
errMsg = fmt.Sprintf(
"image pull failed for %s because the registry is unavailable%s",
imgRef,
// Trim the error name from the message to convert errors like:
// "RegistryUnavailable: a more detailed explanation" to:
// "...because the registry is unavailable: a more detailed explanation"
strings.TrimPrefix(err.Error(), crierrors.ErrRegistryUnavailable.Error()),
)
return errMsg, crierrors.ErrRegistryUnavailable
}
if strings.HasPrefix(err.Error(), crierrors.ErrSignatureValidationFailed.Error()) {
errMsg = fmt.Sprintf(
"image pull failed for %s because the signature validation failed%s",
imgRef,
// Trim the error name from the message to convert errors like:
// "SignatureValidationFailed: a more detailed explanation" to:
// "...because the signature validation failed: a more detailed explanation"
strings.TrimPrefix(err.Error(), crierrors.ErrSignatureValidationFailed.Error()),
)
return errMsg, crierrors.ErrSignatureValidationFailed
}
// Fallback for no specific error
return err.Error(), ErrImagePull
}
// applyDefaultImageTag parses a docker image string, if it doesn't contain any tag or digest,
// a default tag will be applied.
func applyDefaultImageTag(image string) (string, error) {
_, tag, digest, err := parsers.ParseImageName(image)
if err != nil {
return "", err
}
// we just concatenate the image name with the default tag here instead
if len(digest) == 0 && len(tag) > 0 && !strings.HasSuffix(image, ":"+tag) {
// we just concatenate the image name with the default tag here instead
// of using dockerref.WithTag(named, ...) because that would cause the
// image to be fully qualified as docker.io/$name if it's a short name
// (e.g. just busybox). We don't want that to happen to keep the CRI
// agnostic wrt image names and default hostnames.
image = image + ":" + tag
}
return image, nil
}