Merge pull request #30838 from caesarxuchao/per-resource-orphan-behavior

Automatic merge from submit-queue

[GarbageCollector] Allow per-resource default garbage collection behavior

What's the bug:
When deleting an RC with `deleteOptions.OrphanDependents==nil`, garbage collector is supposed to treat it as `deleteOptions.OrphanDependents==true", and orphan the pods created by it. But the apiserver is not doing that.

What's in the pr:
Allow each resource to specify the default garbage collection behavior in the registry. For example, RC registry's default GC behavior is Orphan, and Pod registry's default GC behavior is CascadingDeletion.
This commit is contained in:
Kubernetes Submit Queue
2016-08-23 08:46:32 -07:00
committed by GitHub
7 changed files with 236 additions and 45 deletions

View File

@@ -168,7 +168,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
gatherMetrics(f)
})
It("[Feature:GarbageCollector] should orphan pods created by rc", func() {
It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() {
clientSet := f.Clientset_1_3
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
@@ -214,4 +214,51 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}
gatherMetrics(f)
})
It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.Clientset_1_3
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
rc := newOwnerRC(f, rcName)
By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name)
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
} else {
return false, nil
}
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
By("delete the rc")
deleteOptions := &api.DeleteOptions{}
deleteOptions.Preconditions = api.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(api.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
return false, fmt.Errorf("expect %d pods, got %d pods", e, a)
}
return false, nil
}); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("%v", err)
}
gatherMetrics(f)
})
})

View File

@@ -209,7 +209,7 @@ func TestCascadingDeletion(t *testing.T) {
go gc.Run(5, stopCh)
defer close(stopCh)
// delete one of the replication controller
if err := rcClient.Delete(toBeDeletedRCName, nil); err != nil {
if err := rcClient.Delete(toBeDeletedRCName, getNonOrphanOptions()); err != nil {
t.Fatalf("failed to delete replication controller: %v", err)
}
@@ -374,7 +374,7 @@ func TestStressingCascadingDeletion(t *testing.T) {
wg.Add(collections * 4)
rcUIDs := make(chan types.UID, collections*4)
for i := 0; i < collections; i++ {
// rc is created with empty finalizers, deleted with nil delete options, pods will be deleted
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, nil, &wg, rcUIDs)
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{api.FinalizerOrphan}, nil, &wg, rcUIDs)
@@ -397,7 +397,7 @@ func TestStressingCascadingDeletion(t *testing.T) {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
podsInEachCollection := 3
// see the comments on the calls to setupRCsPods for details
remainingGroups := 2
remainingGroups := 3
return verifyRemainingObjects(t, clientSet, ns.Name, 0, collections*podsInEachCollection*remainingGroups)
}); err != nil {
t.Fatal(err)
@@ -411,7 +411,7 @@ func TestStressingCascadingDeletion(t *testing.T) {
t.Fatal(err)
}
for _, pod := range pods.Items {
if !strings.Contains(pod.ObjectMeta.Name, "collection2-") && !strings.Contains(pod.ObjectMeta.Name, "collection4-") {
if !strings.Contains(pod.ObjectMeta.Name, "collection1-") && !strings.Contains(pod.ObjectMeta.Name, "collection2-") && !strings.Contains(pod.ObjectMeta.Name, "collection4-") {
t.Errorf("got unexpected remaining pod: %#v", pod)
}
}