mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	fix golint failures - /pkg/kubelet/images
This commit is contained in:
		@@ -211,12 +211,8 @@ pkg/kubelet/dockershim/network/hostport/testing
 | 
			
		||||
pkg/kubelet/dockershim/network/kubenet
 | 
			
		||||
pkg/kubelet/dockershim/network/testing
 | 
			
		||||
pkg/kubelet/events
 | 
			
		||||
pkg/kubelet/images
 | 
			
		||||
pkg/kubelet/kuberuntime
 | 
			
		||||
pkg/kubelet/leaky
 | 
			
		||||
pkg/kubelet/lifecycle
 | 
			
		||||
pkg/kubelet/metrics
 | 
			
		||||
pkg/kubelet/pleg
 | 
			
		||||
pkg/kubelet/pod
 | 
			
		||||
pkg/kubelet/pod/testing
 | 
			
		||||
pkg/kubelet/preemption
 | 
			
		||||
 
 | 
			
		||||
@@ -46,5 +46,5 @@ func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets
 | 
			
		||||
	if ts.limiter.TryAccept() {
 | 
			
		||||
		return ts.ImageService.PullImage(image, secrets)
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("pull QPS exceeded.")
 | 
			
		||||
	return "", fmt.Errorf("pull QPS exceeded")
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -44,8 +44,7 @@ type StatsProvider interface {
 | 
			
		||||
	ImageFsStats() (*statsapi.FsStats, error)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Manages lifecycle of all images.
 | 
			
		||||
//
 | 
			
		||||
// ImageGCManager is an interface for managing lifecycle of all images.
 | 
			
		||||
// Implementation is thread-safe.
 | 
			
		||||
type ImageGCManager interface {
 | 
			
		||||
	// Applies the garbage collection policy. Errors include being unable to free
 | 
			
		||||
@@ -61,7 +60,7 @@ type ImageGCManager interface {
 | 
			
		||||
	DeleteUnusedImages() error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// A policy for garbage collecting images. Policy defines an allowed band in
 | 
			
		||||
// ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in
 | 
			
		||||
// which garbage collection will be run.
 | 
			
		||||
type ImageGCPolicy struct {
 | 
			
		||||
	// Any usage above this threshold will always trigger garbage collection.
 | 
			
		||||
@@ -144,6 +143,7 @@ type imageRecord struct {
 | 
			
		||||
	size int64
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewImageGCManager instantiates a new ImageGCManager object.
 | 
			
		||||
func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string) (ImageGCManager, error) {
 | 
			
		||||
	// Validate policy.
 | 
			
		||||
	if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
 | 
			
		||||
 
 | 
			
		||||
@@ -40,6 +40,7 @@ type imageManager struct {
 | 
			
		||||
 | 
			
		||||
var _ ImageManager = &imageManager{}
 | 
			
		||||
 | 
			
		||||
// NewImageManager instantiates a new ImageManager object.
 | 
			
		||||
func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, qps float32, burst int) ImageManager {
 | 
			
		||||
	imageService = throttleImagePulling(imageService, qps, burst)
 | 
			
		||||
 | 
			
		||||
@@ -112,11 +113,10 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p
 | 
			
		||||
			msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
 | 
			
		||||
			m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
 | 
			
		||||
			return imageRef, "", nil
 | 
			
		||||
		} else {
 | 
			
		||||
			msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
 | 
			
		||||
			m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
 | 
			
		||||
			return "", msg, ErrImageNeverPull
 | 
			
		||||
		}
 | 
			
		||||
		msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
 | 
			
		||||
		m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
 | 
			
		||||
		return "", msg, ErrImageNeverPull
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image)
 | 
			
		||||
@@ -132,7 +132,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p
 | 
			
		||||
	if imagePullResult.err != nil {
 | 
			
		||||
		m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), glog.Warning)
 | 
			
		||||
		m.backOff.Next(backOffKey, m.backOff.Clock.Now())
 | 
			
		||||
		if imagePullResult.err == RegistryUnavailable {
 | 
			
		||||
		if imagePullResult.err == ErrRegistryUnavailable {
 | 
			
		||||
			msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
 | 
			
		||||
			return "", msg, imagePullResult.err
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -23,22 +23,22 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// Container image pull failed, kubelet is backing off image pull
 | 
			
		||||
	// ErrImagePullBackOff - Container image pull failed, kubelet is backing off image pull
 | 
			
		||||
	ErrImagePullBackOff = errors.New("ImagePullBackOff")
 | 
			
		||||
 | 
			
		||||
	// Unable to inspect image
 | 
			
		||||
	// ErrImageInspect - Unable to inspect image
 | 
			
		||||
	ErrImageInspect = errors.New("ImageInspectError")
 | 
			
		||||
 | 
			
		||||
	// General image pull error
 | 
			
		||||
	// ErrImagePull - General image pull error
 | 
			
		||||
	ErrImagePull = errors.New("ErrImagePull")
 | 
			
		||||
 | 
			
		||||
	// Required Image is absent on host and PullPolicy is NeverPullImage
 | 
			
		||||
	// ErrImageNeverPull - Required Image is absent on host and PullPolicy is NeverPullImage
 | 
			
		||||
	ErrImageNeverPull = errors.New("ErrImageNeverPull")
 | 
			
		||||
 | 
			
		||||
	// Get http error when pulling image from registry
 | 
			
		||||
	RegistryUnavailable = errors.New("RegistryUnavailable")
 | 
			
		||||
	// ErrRegistryUnavailable - Get http error when pulling image from registry
 | 
			
		||||
	ErrRegistryUnavailable = errors.New("RegistryUnavailable")
 | 
			
		||||
 | 
			
		||||
	// Unable to parse the image name.
 | 
			
		||||
	// ErrInvalidImageName - Unable to parse the image name.
 | 
			
		||||
	ErrInvalidImageName = errors.New("InvalidImageName")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -69,7 +69,7 @@ func (f *fakePodStateProvider) IsPodTerminated(uid types.UID) bool {
 | 
			
		||||
	return !found
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
 | 
			
		||||
func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
 | 
			
		||||
	recorder := &record.FakeRecorder{}
 | 
			
		||||
	kubeRuntimeManager := &kubeGenericRuntimeManager{
 | 
			
		||||
		recorder:            recorder,
 | 
			
		||||
@@ -91,7 +91,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager)
 | 
			
		||||
	kubeRuntimeManager.containerGC = newContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager)
 | 
			
		||||
	kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
 | 
			
		||||
	kubeRuntimeManager.imagePuller = images.NewImageManager(
 | 
			
		||||
		kubecontainer.FilterEventRecorder(recorder),
 | 
			
		||||
 
 | 
			
		||||
@@ -38,7 +38,7 @@ func TestRecordOperation(t *testing.T) {
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	defer l.Close()
 | 
			
		||||
 | 
			
		||||
	prometheusUrl := "http://" + temporalServer + "/metrics"
 | 
			
		||||
	prometheusURL := "http://" + temporalServer + "/metrics"
 | 
			
		||||
	mux := http.NewServeMux()
 | 
			
		||||
	mux.Handle("/metrics", prometheus.Handler())
 | 
			
		||||
	server := &http.Server{
 | 
			
		||||
@@ -55,11 +55,11 @@ func TestRecordOperation(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		mux.ServeHTTP(w, r)
 | 
			
		||||
	}), "GET", prometheusUrl, nil, runtimeOperationsCounterExpected)
 | 
			
		||||
	}), "GET", prometheusURL, nil, runtimeOperationsCounterExpected)
 | 
			
		||||
 | 
			
		||||
	assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		mux.ServeHTTP(w, r)
 | 
			
		||||
	}), "GET", prometheusUrl, nil, runtimeOperationsLatencyExpected)
 | 
			
		||||
	}), "GET", prometheusURL, nil, runtimeOperationsLatencyExpected)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInstrumentedVersion(t *testing.T) {
 | 
			
		||||
 
 | 
			
		||||
@@ -50,10 +50,14 @@ import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// ErrCreateContainerConfig - failed to create container config
 | 
			
		||||
	ErrCreateContainerConfig = errors.New("CreateContainerConfigError")
 | 
			
		||||
	ErrCreateContainer       = errors.New("CreateContainerError")
 | 
			
		||||
	ErrPreStartHook          = errors.New("PreStartHookError")
 | 
			
		||||
	ErrPostStartHook         = errors.New("PostStartHookError")
 | 
			
		||||
	// ErrCreateContainer - failed to create container
 | 
			
		||||
	ErrCreateContainer = errors.New("CreateContainerError")
 | 
			
		||||
	// ErrPreStartHook - failed to execute PreStartHook
 | 
			
		||||
	ErrPreStartHook = errors.New("PreStartHookError")
 | 
			
		||||
	// ErrPostStartHook - failed to execute PostStartHook
 | 
			
		||||
	ErrPostStartHook = errors.New("PostStartHookError")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// recordContainerEvent should be used by the runtime manager for all container related events.
 | 
			
		||||
 
 | 
			
		||||
@@ -124,8 +124,8 @@ func TestGenerateContainerConfig(t *testing.T) {
 | 
			
		||||
	_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
 | 
			
		||||
	imageId, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil)
 | 
			
		||||
	image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageId})
 | 
			
		||||
	imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil)
 | 
			
		||||
	image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID})
 | 
			
		||||
 | 
			
		||||
	image.Uid = nil
 | 
			
		||||
	image.Username = "test"
 | 
			
		||||
 
 | 
			
		||||
@@ -56,17 +56,17 @@ func TestRemoveContainer(t *testing.T) {
 | 
			
		||||
	_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
 | 
			
		||||
	assert.Equal(t, len(fakeContainers), 1)
 | 
			
		||||
 | 
			
		||||
	containerId := fakeContainers[0].Id
 | 
			
		||||
	containerID := fakeContainers[0].Id
 | 
			
		||||
	fakeOS := m.osInterface.(*containertest.FakeOS)
 | 
			
		||||
	err = m.removeContainer(containerId)
 | 
			
		||||
	err = m.removeContainer(containerID)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	// Verify container log is removed
 | 
			
		||||
	expectedContainerLogPath := filepath.Join(podLogsRootDirectory, "12345678", "foo", "0.log")
 | 
			
		||||
	expectedContainerLogSymlink := legacyLogSymlink(containerId, "foo", "bar", "new")
 | 
			
		||||
	expectedContainerLogSymlink := legacyLogSymlink(containerID, "foo", "bar", "new")
 | 
			
		||||
	assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
 | 
			
		||||
	// Verify container is removed
 | 
			
		||||
	assert.Contains(t, fakeRuntime.Called, "RemoveContainer")
 | 
			
		||||
	containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerId})
 | 
			
		||||
	containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Empty(t, containers)
 | 
			
		||||
}
 | 
			
		||||
@@ -257,10 +257,10 @@ func TestLifeCycleHook(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fakeRunner := &containertest.FakeContainerCommandRunner{}
 | 
			
		||||
	fakeHttp := &fakeHTTP{}
 | 
			
		||||
	fakeHTTP := &fakeHTTP{}
 | 
			
		||||
 | 
			
		||||
	lcHanlder := lifecycle.NewHandlerRunner(
 | 
			
		||||
		fakeHttp,
 | 
			
		||||
		fakeHTTP,
 | 
			
		||||
		fakeRunner,
 | 
			
		||||
		nil)
 | 
			
		||||
 | 
			
		||||
@@ -277,11 +277,11 @@ func TestLifeCycleHook(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	// Configured and working HTTP hook
 | 
			
		||||
	t.Run("PreStop-HTTPGet", func(t *testing.T) {
 | 
			
		||||
		defer func() { fakeHttp.url = "" }()
 | 
			
		||||
		defer func() { fakeHTTP.url = "" }()
 | 
			
		||||
		testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
 | 
			
		||||
		m.killContainer(testPod, cID, "foo", "testKill", &gracePeriod)
 | 
			
		||||
 | 
			
		||||
		if !strings.Contains(fakeHttp.url, httpLifeCycle.PreStop.HTTPGet.Host) {
 | 
			
		||||
		if !strings.Contains(fakeHTTP.url, httpLifeCycle.PreStop.HTTPGet.Host) {
 | 
			
		||||
			t.Errorf("HTTP Prestop hook was not invoked")
 | 
			
		||||
		}
 | 
			
		||||
	})
 | 
			
		||||
@@ -295,7 +295,7 @@ func TestLifeCycleHook(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
		m.killContainer(testPod, cID, "foo", "testKill", &gracePeriodLocal)
 | 
			
		||||
 | 
			
		||||
		if strings.Contains(fakeHttp.url, httpLifeCycle.PreStop.HTTPGet.Host) {
 | 
			
		||||
		if strings.Contains(fakeHTTP.url, httpLifeCycle.PreStop.HTTPGet.Host) {
 | 
			
		||||
			t.Errorf("HTTP Should not execute when gracePeriod is 0")
 | 
			
		||||
		}
 | 
			
		||||
	})
 | 
			
		||||
 
 | 
			
		||||
@@ -39,7 +39,7 @@ type containerGC struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewContainerGC creates a new containerGC.
 | 
			
		||||
func NewContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC {
 | 
			
		||||
func newContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC {
 | 
			
		||||
	return &containerGC{
 | 
			
		||||
		client:           client,
 | 
			
		||||
		manager:          manager,
 | 
			
		||||
 
 | 
			
		||||
@@ -109,8 +109,8 @@ func TestPullWithSecrets(t *testing.T) {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dockerConfigJson := map[string]map[string]map[string]string{"auths": dockerCfg}
 | 
			
		||||
	dockerConfigJsonContent, err := json.Marshal(dockerConfigJson)
 | 
			
		||||
	dockerConfigJSON := map[string]map[string]map[string]string{"auths": dockerCfg}
 | 
			
		||||
	dockerConfigJSONContent, err := json.Marshal(dockerConfigJSON)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
@@ -153,7 +153,7 @@ func TestPullWithSecrets(t *testing.T) {
 | 
			
		||||
		},
 | 
			
		||||
		"builtin keyring secrets, but use passed with new docker config": {
 | 
			
		||||
			"ubuntu",
 | 
			
		||||
			[]v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJsonContent}}},
 | 
			
		||||
			[]v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJSONContent}}},
 | 
			
		||||
			credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
 | 
			
		||||
				"index.docker.io/v1/": {Username: "built-in", Password: "password", Provider: nil},
 | 
			
		||||
			}),
 | 
			
		||||
 
 | 
			
		||||
@@ -118,6 +118,7 @@ type kubeGenericRuntimeManager struct {
 | 
			
		||||
	legacyLogProvider LegacyLogProvider
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// KubeGenericRuntime is a interface contains interfaces for container runtime and command.
 | 
			
		||||
type KubeGenericRuntime interface {
 | 
			
		||||
	kubecontainer.Runtime
 | 
			
		||||
	kubecontainer.StreamingRuntime
 | 
			
		||||
@@ -205,7 +206,7 @@ func NewKubeGenericRuntimeManager(
 | 
			
		||||
		imagePullQPS,
 | 
			
		||||
		imagePullBurst)
 | 
			
		||||
	kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager)
 | 
			
		||||
	kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podStateProvider, kubeRuntimeManager)
 | 
			
		||||
	kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager)
 | 
			
		||||
 | 
			
		||||
	kubeRuntimeManager.versionCache = cache.NewObjectCache(
 | 
			
		||||
		func() (interface{}, error) {
 | 
			
		||||
@@ -528,7 +529,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
 | 
			
		||||
			reason = "Container failed liveness probe."
 | 
			
		||||
		} else {
 | 
			
		||||
			// Keep the container.
 | 
			
		||||
			keepCount += 1
 | 
			
		||||
			keepCount++
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -54,7 +54,7 @@ func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*
 | 
			
		||||
	// we may want to set memory capacity.
 | 
			
		||||
	machineInfo := &cadvisorapi.MachineInfo{}
 | 
			
		||||
	osInterface := &containertest.FakeOS{}
 | 
			
		||||
	manager, err := NewFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring)
 | 
			
		||||
	manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring)
 | 
			
		||||
	return fakeRuntimeService, fakeImageService, manager, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -44,9 +44,9 @@ func legacyLogSymlink(containerID string, containerName, podName, podNamespace s
 | 
			
		||||
		containerName, containerID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func logSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
 | 
			
		||||
func logSymlink(containerLogsDir, podFullName, containerName, dockerID string) string {
 | 
			
		||||
	suffix := fmt.Sprintf(".%s", legacyLogSuffix)
 | 
			
		||||
	logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)
 | 
			
		||||
	logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID)
 | 
			
		||||
	// Length of a filename cannot exceed 255 characters in ext4 on Linux.
 | 
			
		||||
	if len(logPath) > ext4MaxFileNameLen-len(suffix) {
 | 
			
		||||
		logPath = logPath[:ext4MaxFileNameLen-len(suffix)]
 | 
			
		||||
 
 | 
			
		||||
@@ -40,10 +40,10 @@ func TestLogSymLink(t *testing.T) {
 | 
			
		||||
	containerLogsDir := "/foo/bar"
 | 
			
		||||
	podFullName := randStringBytes(128)
 | 
			
		||||
	containerName := randStringBytes(70)
 | 
			
		||||
	dockerId := randStringBytes(80)
 | 
			
		||||
	dockerID := randStringBytes(80)
 | 
			
		||||
	// The file name cannot exceed 255 characters. Since .log suffix is required, the prefix cannot exceed 251 characters.
 | 
			
		||||
	expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)[:251]+".log")
 | 
			
		||||
	as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerId))
 | 
			
		||||
	expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID)[:251]+".log")
 | 
			
		||||
	as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerID))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestLegacyLogSymLink(t *testing.T) {
 | 
			
		||||
 
 | 
			
		||||
@@ -19,7 +19,7 @@ limitations under the License.
 | 
			
		||||
package leaky
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// This is used in a few places outside of Kubelet, such as indexing
 | 
			
		||||
	// PodInfraContainerName is used in a few places outside of Kubelet, such as indexing
 | 
			
		||||
	// into the container info.
 | 
			
		||||
	PodInfraContainerName = "POD"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -106,6 +106,7 @@ type podRecord struct {
 | 
			
		||||
 | 
			
		||||
type podRecords map[types.UID]*podRecord
 | 
			
		||||
 | 
			
		||||
// NewGenericPLEG instantiates a new GenericPLEG object and return it.
 | 
			
		||||
func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int,
 | 
			
		||||
	relistPeriod time.Duration, cache kubecontainer.Cache, clock clock.Clock) PodLifecycleEventGenerator {
 | 
			
		||||
	return &GenericPLEG{
 | 
			
		||||
@@ -118,7 +119,7 @@ func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Returns a channel from which the subscriber can receive PodLifecycleEvent
 | 
			
		||||
// Watch returns a channel from which the subscriber can receive PodLifecycleEvent
 | 
			
		||||
// events.
 | 
			
		||||
// TODO: support multiple subscribers.
 | 
			
		||||
func (g *GenericPLEG) Watch() chan *PodLifecycleEvent {
 | 
			
		||||
@@ -130,6 +131,8 @@ func (g *GenericPLEG) Start() {
 | 
			
		||||
	go wait.Until(g.relist, g.relistPeriod, wait.NeverStop)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Healthy check if PLEG work properly.
 | 
			
		||||
// relistThreshold is the maximum interval between two relist.
 | 
			
		||||
func (g *GenericPLEG) Healthy() (bool, error) {
 | 
			
		||||
	relistTime := g.getRelistTime()
 | 
			
		||||
	elapsed := g.clock.Since(relistTime)
 | 
			
		||||
 
 | 
			
		||||
@@ -20,16 +20,20 @@ import (
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// PodLifeCycleEventType define the event type of pod life cycle events.
 | 
			
		||||
type PodLifeCycleEventType string
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// ContainerStarted - event type when the new state of container is running.
 | 
			
		||||
	ContainerStarted PodLifeCycleEventType = "ContainerStarted"
 | 
			
		||||
	ContainerDied    PodLifeCycleEventType = "ContainerDied"
 | 
			
		||||
	// ContainerDied - event type when the new state of container is exited.
 | 
			
		||||
	ContainerDied PodLifeCycleEventType = "ContainerDied"
 | 
			
		||||
	// ContainerRemoved - event type when the old state of container is exited.
 | 
			
		||||
	ContainerRemoved PodLifeCycleEventType = "ContainerRemoved"
 | 
			
		||||
	// PodSync is used to trigger syncing of a pod when the observed change of
 | 
			
		||||
	// the state of the pod cannot be captured by any single event above.
 | 
			
		||||
	PodSync PodLifeCycleEventType = "PodSync"
 | 
			
		||||
	// Do not use the events below because they are disabled in GenericPLEG.
 | 
			
		||||
	// ContainerChanged - event type when the new state of container is unknown.
 | 
			
		||||
	ContainerChanged PodLifeCycleEventType = "ContainerChanged"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -45,6 +49,7 @@ type PodLifecycleEvent struct {
 | 
			
		||||
	Data interface{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PodLifecycleEventGenerator contains functions for generating pod life cycle events.
 | 
			
		||||
type PodLifecycleEventGenerator interface {
 | 
			
		||||
	Start()
 | 
			
		||||
	Watch() chan *PodLifecycleEvent
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user