mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 18:28:13 +00:00 
			
		
		
		
	Merge pull request #41202 from dashpole/revert-41095-deletion_pod_lifecycle
Revert "[Kubelet] Delay deletion of pod from the API server until volumes are deleted"
This commit is contained in:
		| @@ -182,7 +182,6 @@ go_test( | ||||
|         "//pkg/kubelet/server/remotecommand:go_default_library", | ||||
|         "//pkg/kubelet/server/stats:go_default_library", | ||||
|         "//pkg/kubelet/status:go_default_library", | ||||
|         "//pkg/kubelet/status/testing:go_default_library", | ||||
|         "//pkg/kubelet/types:go_default_library", | ||||
|         "//pkg/kubelet/util/queue:go_default_library", | ||||
|         "//pkg/kubelet/util/sliceutils:go_default_library", | ||||
|   | ||||
| @@ -690,7 +690,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub | ||||
| 	} | ||||
| 	klet.imageManager = imageManager | ||||
|  | ||||
| 	klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet) | ||||
| 	klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager) | ||||
|  | ||||
| 	klet.probeManager = prober.NewManager( | ||||
| 		klet.statusManager, | ||||
| @@ -715,7 +715,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub | ||||
| 		kubeCfg.EnableControllerAttachDetach, | ||||
| 		nodeName, | ||||
| 		klet.podManager, | ||||
| 		klet.statusManager, | ||||
| 		klet.kubeClient, | ||||
| 		klet.volumePluginMgr, | ||||
| 		klet.containerRuntime, | ||||
|   | ||||
| @@ -728,37 +728,6 @@ func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool { | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Returns true if all required node-level resources that a pod was consuming have been reclaimed by the kubelet. | ||||
| // Reclaiming resources is a prerequisite to deleting a pod from the API server. | ||||
| func (kl *Kubelet) OkToDeletePod(pod *v1.Pod) bool { | ||||
| 	if pod.DeletionTimestamp == nil { | ||||
| 		// We shouldnt delete pods whose DeletionTimestamp is not set | ||||
| 		return false | ||||
| 	} | ||||
| 	if !notRunning(pod.Status.ContainerStatuses) { | ||||
| 		// We shouldnt delete pods that still have running containers | ||||
| 		glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) | ||||
| 		return false | ||||
| 	} | ||||
| 	if kl.podVolumesExist(pod.UID) && !kl.kubeletConfiguration.KeepTerminatedPodVolumes { | ||||
| 		// We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes | ||||
| 		glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // notRunning returns true if every status is terminated or waiting, or the status list | ||||
| // is empty. | ||||
| func notRunning(statuses []v1.ContainerStatus) bool { | ||||
| 	for _, status := range statuses { | ||||
| 		if status.State.Terminated == nil && status.State.Waiting == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // filterOutTerminatedPods returns the given pods which the status manager | ||||
| // does not consider failed or succeeded. | ||||
| func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod { | ||||
|   | ||||
| @@ -60,7 +60,6 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/secret" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/server/stats" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| 	kubetypes "k8s.io/kubernetes/pkg/kubelet/types" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/util/queue" | ||||
| 	kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" | ||||
| @@ -181,7 +180,7 @@ func newTestKubeletWithImageList( | ||||
| 	} | ||||
| 	kubelet.secretManager = secretManager | ||||
| 	kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager) | ||||
| 	kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}) | ||||
| 	kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager) | ||||
| 	kubelet.containerRefManager = kubecontainer.NewRefManager() | ||||
| 	diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{}) | ||||
| 	if err != nil { | ||||
| @@ -263,7 +262,6 @@ func newTestKubeletWithImageList( | ||||
| 		controllerAttachDetachEnabled, | ||||
| 		kubelet.nodeName, | ||||
| 		kubelet.podManager, | ||||
| 		kubelet.statusManager, | ||||
| 		fakeKubeClient, | ||||
| 		kubelet.volumePluginMgr, | ||||
| 		fakeRuntime, | ||||
|   | ||||
| @@ -56,7 +56,6 @@ go_test( | ||||
|         "//pkg/kubelet/pod:go_default_library", | ||||
|         "//pkg/kubelet/prober/results:go_default_library", | ||||
|         "//pkg/kubelet/status:go_default_library", | ||||
|         "//pkg/kubelet/status/testing:go_default_library", | ||||
|         "//pkg/probe:go_default_library", | ||||
|         "//pkg/util/exec:go_default_library", | ||||
|         "//vendor:github.com/golang/glog", | ||||
|   | ||||
| @@ -28,7 +28,6 @@ import ( | ||||
| 	kubepod "k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/prober/results" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| 	"k8s.io/kubernetes/pkg/probe" | ||||
| 	"k8s.io/kubernetes/pkg/util/exec" | ||||
| ) | ||||
| @@ -103,7 +102,7 @@ func newTestManager() *manager { | ||||
| 	// Add test pod to pod manager, so that status manager can get the pod from pod manager if needed. | ||||
| 	podManager.AddPod(getTestPod()) | ||||
| 	m := NewManager( | ||||
| 		status.NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}), | ||||
| 		status.NewManager(&fake.Clientset{}, podManager), | ||||
| 		results.NewManager(), | ||||
| 		nil, // runner | ||||
| 		refManager, | ||||
|   | ||||
| @@ -31,7 +31,6 @@ import ( | ||||
| 	kubepod "k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/prober/results" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| 	"k8s.io/kubernetes/pkg/probe" | ||||
| 	"k8s.io/kubernetes/pkg/util/exec" | ||||
| ) | ||||
| @@ -118,7 +117,7 @@ func TestDoProbe(t *testing.T) { | ||||
| 			} | ||||
|  | ||||
| 			// Clean up. | ||||
| 			m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil), &statustest.FakePodDeletionSafetyProvider{}) | ||||
| 			m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil)) | ||||
| 			resultsManager(m, probeType).Remove(testContainerID) | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
| @@ -44,7 +44,6 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/secret" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/server/stats" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/volumemanager" | ||||
| 	"k8s.io/kubernetes/pkg/volume" | ||||
| 	volumetest "k8s.io/kubernetes/pkg/volume/testing" | ||||
| @@ -78,7 +77,7 @@ func TestRunOnce(t *testing.T) { | ||||
| 		cadvisor:            cadvisor, | ||||
| 		nodeLister:          testNodeLister{}, | ||||
| 		nodeInfo:            testNodeInfo{}, | ||||
| 		statusManager:       status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}), | ||||
| 		statusManager:       status.NewManager(nil, podManager), | ||||
| 		containerRefManager: kubecontainer.NewRefManager(), | ||||
| 		podManager:          podManager, | ||||
| 		os:                  &containertest.FakeOS{}, | ||||
| @@ -103,7 +102,6 @@ func TestRunOnce(t *testing.T) { | ||||
| 		true, | ||||
| 		kb.nodeName, | ||||
| 		kb.podManager, | ||||
| 		kb.statusManager, | ||||
| 		kb.kubeClient, | ||||
| 		kb.volumePluginMgr, | ||||
| 		fakeRuntime, | ||||
|   | ||||
| @@ -51,7 +51,6 @@ go_test( | ||||
|         "//pkg/kubelet/pod:go_default_library", | ||||
|         "//pkg/kubelet/pod/testing:go_default_library", | ||||
|         "//pkg/kubelet/secret:go_default_library", | ||||
|         "//pkg/kubelet/status/testing:go_default_library", | ||||
|         "//pkg/kubelet/types:go_default_library", | ||||
|         "//vendor:github.com/stretchr/testify/assert", | ||||
|         "//vendor:k8s.io/apimachinery/pkg/api/errors", | ||||
| @@ -71,9 +70,6 @@ filegroup( | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [ | ||||
|         ":package-srcs", | ||||
|         "//pkg/kubelet/status/testing:all-srcs", | ||||
|     ], | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
| ) | ||||
|   | ||||
| @@ -67,7 +67,6 @@ type manager struct { | ||||
| 	// Map from (mirror) pod UID to latest status version successfully sent to the API server. | ||||
| 	// apiStatusVersions must only be accessed from the sync thread. | ||||
| 	apiStatusVersions map[types.UID]uint64 | ||||
| 	podDeletionSafety PodDeletionSafetyProvider | ||||
| } | ||||
|  | ||||
| // PodStatusProvider knows how to provide status for a pod.  It's intended to be used by other components | ||||
| @@ -78,12 +77,6 @@ type PodStatusProvider interface { | ||||
| 	GetPodStatus(uid types.UID) (v1.PodStatus, bool) | ||||
| } | ||||
|  | ||||
| // An object which provides guarantees that a pod can be saftely deleted. | ||||
| type PodDeletionSafetyProvider interface { | ||||
| 	// A function which returns true if the pod can safely be deleted | ||||
| 	OkToDeletePod(pod *v1.Pod) bool | ||||
| } | ||||
|  | ||||
| // Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with | ||||
| // the latest v1.PodStatus. It also syncs updates back to the API server. | ||||
| type Manager interface { | ||||
| @@ -110,14 +103,13 @@ type Manager interface { | ||||
|  | ||||
| const syncPeriod = 10 * time.Second | ||||
|  | ||||
| func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider) Manager { | ||||
| func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Manager { | ||||
| 	return &manager{ | ||||
| 		kubeClient:        kubeClient, | ||||
| 		podManager:        podManager, | ||||
| 		podStatuses:       make(map[types.UID]versionedPodStatus), | ||||
| 		podStatusChannel:  make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses | ||||
| 		apiStatusVersions: make(map[types.UID]uint64), | ||||
| 		podDeletionSafety: podDeletionSafety, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @@ -389,7 +381,7 @@ func (m *manager) syncBatch() { | ||||
| 				} | ||||
| 				syncedUID = mirrorUID | ||||
| 			} | ||||
| 			if m.needsUpdate(syncedUID, status) || m.couldBeDeleted(uid, status.status) { | ||||
| 			if m.needsUpdate(syncedUID, status) { | ||||
| 				updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status}) | ||||
| 			} else if m.needsReconcile(uid, status.status) { | ||||
| 				// Delete the apiStatusVersions here to force an update on the pod status | ||||
| @@ -442,7 +434,11 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { | ||||
| 				// We don't handle graceful deletion of mirror pods. | ||||
| 				return | ||||
| 			} | ||||
| 			if !m.podDeletionSafety.OkToDeletePod(pod) { | ||||
| 			if pod.DeletionTimestamp == nil { | ||||
| 				return | ||||
| 			} | ||||
| 			if !notRunning(pod.Status.ContainerStatuses) { | ||||
| 				glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) | ||||
| 				return | ||||
| 			} | ||||
| 			deleteOptions := metav1.NewDeleteOptions(0) | ||||
| @@ -467,15 +463,6 @@ func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool { | ||||
| 	return !ok || latest < status.version | ||||
| } | ||||
|  | ||||
| func (m *manager) couldBeDeleted(uid types.UID, status v1.PodStatus) bool { | ||||
| 	// The pod could be a static pod, so we should translate first. | ||||
| 	pod, ok := m.podManager.GetPodByUID(uid) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	return !kubepod.IsMirrorPod(pod) && m.podDeletionSafety.OkToDeletePod(pod) | ||||
| } | ||||
|  | ||||
| // needsReconcile compares the given status with the status in the pod manager (which | ||||
| // in fact comes from apiserver), returns whether the status needs to be reconciled with | ||||
| // the apiserver. Now when pod status is inconsistent between apiserver and kubelet, | ||||
| @@ -576,6 +563,17 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { | ||||
| 	return status | ||||
| } | ||||
|  | ||||
| // notRunning returns true if every status is terminated or waiting, or the status list | ||||
| // is empty. | ||||
| func notRunning(statuses []v1.ContainerStatus) bool { | ||||
| 	for _, status := range statuses { | ||||
| 		if status.State.Terminated == nil && status.State.Waiting == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func copyStatus(source *v1.PodStatus) (v1.PodStatus, error) { | ||||
| 	clone, err := api.Scheme.DeepCopy(source) | ||||
| 	if err != nil { | ||||
|   | ||||
| @@ -39,7 +39,6 @@ import ( | ||||
| 	kubepod "k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" | ||||
| 	kubesecret "k8s.io/kubernetes/pkg/kubelet/secret" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| 	kubetypes "k8s.io/kubernetes/pkg/kubelet/types" | ||||
| ) | ||||
|  | ||||
| @@ -75,7 +74,7 @@ func (m *manager) testSyncBatch() { | ||||
| func newTestManager(kubeClient clientset.Interface) *manager { | ||||
| 	podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), kubesecret.NewFakeManager()) | ||||
| 	podManager.AddPod(getTestPod()) | ||||
| 	return NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}).(*manager) | ||||
| 	return NewManager(kubeClient, podManager).(*manager) | ||||
| } | ||||
|  | ||||
| func generateRandomMessage() string { | ||||
|   | ||||
| @@ -1,31 +0,0 @@ | ||||
| package(default_visibility = ["//visibility:public"]) | ||||
|  | ||||
| licenses(["notice"]) | ||||
|  | ||||
| load( | ||||
|     "@io_bazel_rules_go//go:def.bzl", | ||||
|     "go_library", | ||||
| ) | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["fake_pod_deletion_safety.go"], | ||||
|     tags = ["automanaged"], | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/kubelet/pod:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
| ) | ||||
| @@ -1,28 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package testing | ||||
|  | ||||
| import ( | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	kubepod "k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| ) | ||||
|  | ||||
| type FakePodDeletionSafetyProvider struct{} | ||||
|  | ||||
| func (f *FakePodDeletionSafetyProvider) OkToDeletePod(pod *v1.Pod) bool { | ||||
| 	return !kubepod.IsMirrorPod(pod) && pod.DeletionTimestamp != nil | ||||
| } | ||||
| @@ -18,7 +18,6 @@ go_library( | ||||
|         "//pkg/kubelet/config:go_default_library", | ||||
|         "//pkg/kubelet/container:go_default_library", | ||||
|         "//pkg/kubelet/pod:go_default_library", | ||||
|         "//pkg/kubelet/status:go_default_library", | ||||
|         "//pkg/kubelet/util/format:go_default_library", | ||||
|         "//pkg/kubelet/volumemanager/cache:go_default_library", | ||||
|         "//pkg/kubelet/volumemanager/populator:go_default_library", | ||||
| @@ -51,8 +50,6 @@ go_test( | ||||
|         "//pkg/kubelet/pod:go_default_library", | ||||
|         "//pkg/kubelet/pod/testing:go_default_library", | ||||
|         "//pkg/kubelet/secret:go_default_library", | ||||
|         "//pkg/kubelet/status:go_default_library", | ||||
|         "//pkg/kubelet/status/testing:go_default_library", | ||||
|         "//pkg/util/mount:go_default_library", | ||||
|         "//pkg/volume:go_default_library", | ||||
|         "//pkg/volume/testing:go_default_library", | ||||
|   | ||||
| @@ -17,7 +17,6 @@ go_library( | ||||
|         "//pkg/client/clientset_generated/clientset:go_default_library", | ||||
|         "//pkg/kubelet/container:go_default_library", | ||||
|         "//pkg/kubelet/pod:go_default_library", | ||||
|         "//pkg/kubelet/status:go_default_library", | ||||
|         "//pkg/kubelet/util/format:go_default_library", | ||||
|         "//pkg/kubelet/volumemanager/cache:go_default_library", | ||||
|         "//pkg/volume:go_default_library", | ||||
|   | ||||
| @@ -35,7 +35,6 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/client/clientset_generated/clientset" | ||||
| 	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/util/format" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" | ||||
| 	"k8s.io/kubernetes/pkg/volume" | ||||
| @@ -71,7 +70,6 @@ func NewDesiredStateOfWorldPopulator( | ||||
| 	loopSleepDuration time.Duration, | ||||
| 	getPodStatusRetryDuration time.Duration, | ||||
| 	podManager pod.Manager, | ||||
| 	podStatusProvider status.PodStatusProvider, | ||||
| 	desiredStateOfWorld cache.DesiredStateOfWorld, | ||||
| 	kubeContainerRuntime kubecontainer.Runtime, | ||||
| 	keepTerminatedPodVolumes bool) DesiredStateOfWorldPopulator { | ||||
| @@ -80,7 +78,6 @@ func NewDesiredStateOfWorldPopulator( | ||||
| 		loopSleepDuration:         loopSleepDuration, | ||||
| 		getPodStatusRetryDuration: getPodStatusRetryDuration, | ||||
| 		podManager:                podManager, | ||||
| 		podStatusProvider:         podStatusProvider, | ||||
| 		desiredStateOfWorld:       desiredStateOfWorld, | ||||
| 		pods: processedPods{ | ||||
| 			processedPods: make(map[volumetypes.UniquePodName]bool)}, | ||||
| @@ -94,7 +91,6 @@ type desiredStateOfWorldPopulator struct { | ||||
| 	loopSleepDuration         time.Duration | ||||
| 	getPodStatusRetryDuration time.Duration | ||||
| 	podManager                pod.Manager | ||||
| 	podStatusProvider         status.PodStatusProvider | ||||
| 	desiredStateOfWorld       cache.DesiredStateOfWorld | ||||
| 	pods                      processedPods | ||||
| 	kubeContainerRuntime      kubecontainer.Runtime | ||||
| @@ -138,30 +134,15 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool { | ||||
| 	podStatus, found := dswp.podStatusProvider.GetPodStatus(pod.UID) | ||||
| 	if !found { | ||||
| 		podStatus = pod.Status | ||||
| 	} | ||||
| 	return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses)) | ||||
| } | ||||
|  | ||||
| // notRunning returns true if every status is terminated or waiting, or the status list | ||||
| // is empty. | ||||
| func notRunning(statuses []v1.ContainerStatus) bool { | ||||
| 	for _, status := range statuses { | ||||
| 		if status.State.Terminated == nil && status.State.Waiting == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| func isPodTerminated(pod *v1.Pod) bool { | ||||
| 	return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded | ||||
| } | ||||
|  | ||||
| // Iterate through all pods and add to desired state of world if they don't | ||||
| // exist but should | ||||
| func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() { | ||||
| 	for _, pod := range dswp.podManager.GetPods() { | ||||
| 		if dswp.isPodTerminated(pod) { | ||||
| 		if isPodTerminated(pod) { | ||||
| 			// Do not (re)add volumes for terminated pods | ||||
| 			continue | ||||
| 		} | ||||
| @@ -179,7 +160,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { | ||||
| 		pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID) | ||||
| 		if podExists { | ||||
| 			// Skip running pods | ||||
| 			if !dswp.isPodTerminated(pod) { | ||||
| 			if !isPodTerminated(pod) { | ||||
| 				continue | ||||
| 			} | ||||
| 			if dswp.keepTerminatedPodVolumes { | ||||
|   | ||||
| @@ -33,7 +33,6 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/container" | ||||
| 	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/util/format" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator" | ||||
| @@ -152,7 +151,6 @@ func NewVolumeManager( | ||||
| 	controllerAttachDetachEnabled bool, | ||||
| 	nodeName k8stypes.NodeName, | ||||
| 	podManager pod.Manager, | ||||
| 	podStatusProvider status.PodStatusProvider, | ||||
| 	kubeClient clientset.Interface, | ||||
| 	volumePluginMgr *volume.VolumePluginMgr, | ||||
| 	kubeContainerRuntime kubecontainer.Runtime, | ||||
| @@ -193,7 +191,6 @@ func NewVolumeManager( | ||||
| 		desiredStateOfWorldPopulatorLoopSleepPeriod, | ||||
| 		desiredStateOfWorldPopulatorGetPodStatusRetryDuration, | ||||
| 		podManager, | ||||
| 		podStatusProvider, | ||||
| 		vm.desiredStateOfWorld, | ||||
| 		kubeContainerRuntime, | ||||
| 		keepTerminatedPodVolumes) | ||||
|   | ||||
| @@ -36,8 +36,6 @@ import ( | ||||
| 	kubepod "k8s.io/kubernetes/pkg/kubelet/pod" | ||||
| 	podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/secret" | ||||
| 	"k8s.io/kubernetes/pkg/kubelet/status" | ||||
| 	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" | ||||
| 	"k8s.io/kubernetes/pkg/util/mount" | ||||
| 	"k8s.io/kubernetes/pkg/volume" | ||||
| 	volumetest "k8s.io/kubernetes/pkg/volume/testing" | ||||
| @@ -189,13 +187,11 @@ func newTestVolumeManager( | ||||
| 	fakeRecorder := &record.FakeRecorder{} | ||||
| 	plugMgr := &volume.VolumePluginMgr{} | ||||
| 	plugMgr.InitPlugins([]volume.VolumePlugin{plug}, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil)) | ||||
| 	statusManager := status.NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}) | ||||
|  | ||||
| 	vm, err := NewVolumeManager( | ||||
| 		true, | ||||
| 		testHostname, | ||||
| 		podManager, | ||||
| 		statusManager, | ||||
| 		kubeClient, | ||||
| 		plugMgr, | ||||
| 		&containertest.FakeRuntime{}, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jess Frazelle
					Jess Frazelle