Merge pull request #22733 from resouer/flow-control

Automatic merge from submit-queue

Add flow control pkg

minor fix ref #15634
Refactor pkg names in back off related files
This commit is contained in:
k8s-merge-robot
2016-04-11 06:18:51 -07:00
19 changed files with 57 additions and 49 deletions

View File

@@ -25,7 +25,7 @@ import (
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
)
@@ -43,7 +43,7 @@ func NewFakeDockerManager(
osInterface kubecontainer.OSInterface,
networkPlugin network.NetworkPlugin,
runtimeHelper kubecontainer.RuntimeHelper,
httpClient kubetypes.HttpGetter, imageBackOff *util.Backoff) *DockerManager {
httpClient kubetypes.HttpGetter, imageBackOff *flowcontrol.Backoff) *DockerManager {
fakeOOMAdjuster := oom.NewFakeOOMAdjuster()
fakeProcFs := procfs.NewFakeProcFS()

View File

@@ -50,7 +50,7 @@ import (
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@@ -194,7 +194,7 @@ func NewDockerManager(
oomAdjuster *oom.OOMAdjuster,
procFs procfs.ProcFSInterface,
cpuCFSQuota bool,
imageBackOff *util.Backoff,
imageBackOff *flowcontrol.Backoff,
serializeImagePulls bool,
enableCustomMetrics bool,
hairpinMode bool,
@@ -1732,7 +1732,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
}
// Sync the running pod to match the specified desired pod.
func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) (result kubecontainer.PodSyncResult) {
func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start))
@@ -1982,7 +1982,7 @@ func getUidFromUser(id string) string {
// If all instances of a container are garbage collected, doBackOff will also return false, which means the container may be restarted before the
// backoff deadline. However, because that won't cause error and the chance is really slim, we can just ignore it for now.
// If a container is still in backoff, the function will return a brief backoff error and a detailed error message.
func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *util.Backoff) (bool, error, string) {
func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, error, string) {
var cStatus *kubecontainer.ContainerStatus
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
// TODO(random-liu): Better define backoff start point; add unit and e2e test after we finalize this. (See github issue #22240)

View File

@@ -45,6 +45,7 @@ import (
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
uexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
)
@@ -106,7 +107,7 @@ func newTestDockerManagerWithHTTPClientWithVersion(fakeHTTPClient *fakeHTTP, ver
networkPlugin,
&fakeRuntimeHelper{},
fakeHTTPClient,
util.NewBackOff(time.Second, 300*time.Second))
flowcontrol.NewBackOff(time.Second, 300*time.Second))
return dockerManager, fakeDocker
}
@@ -586,14 +587,14 @@ func generatePodInfraContainerHash(pod *api.Pod) uint64 {
// runSyncPod is a helper function to retrieve the running pods from the fake
// docker client and runs SyncPod for the given pod.
func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod, backOff *util.Backoff, expectErr bool) kubecontainer.PodSyncResult {
func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod, backOff *flowcontrol.Backoff, expectErr bool) kubecontainer.PodSyncResult {
podStatus, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
fakeDocker.ClearCalls()
if backOff == nil {
backOff = util.NewBackOff(time.Second, time.Minute)
backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
}
// api.PodStatus is not used in SyncPod now, pass in an empty one.
result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
@@ -1089,7 +1090,7 @@ func TestSyncPodBackoff(t *testing.T) {
{130, 1, 0, startCalls, false},
}
backOff := util.NewBackOff(time.Second, time.Minute)
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
backOff.Clock = fakeClock
for _, c := range tests {
fakeDocker.SetFakeContainers(dockerContainers)