mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-01 18:58:18 +00:00
Enable garbage collection of custom resources
Enhance the garbage collector to periodically refresh the resources it monitors (via discovery) to enable custom resource definition GC. This implementation caches Unstructured structs for any kinds not covered by a shared informer. The existing meta-only codec only supports compiled types; an improved codec which supports arbitrary types could be introduced to optimize caching to store only metadata for all non-informer types.
This commit is contained in:
@@ -32,6 +32,7 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
@@ -73,6 +74,8 @@ type GarbageCollector struct {
|
||||
// GC caches the owners that do not exist according to the API server.
|
||||
absentOwnerCache *UIDCache
|
||||
sharedInformers informers.SharedInformerFactory
|
||||
|
||||
workerLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewGarbageCollector(
|
||||
@@ -108,14 +111,24 @@ func NewGarbageCollector(
|
||||
sharedInformers: sharedInformers,
|
||||
ignoredResources: ignoredResources,
|
||||
}
|
||||
if err := gb.monitorsForResources(deletableResources); err != nil {
|
||||
return nil, err
|
||||
if err := gb.syncMonitors(deletableResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync all monitors: %v", err))
|
||||
}
|
||||
gc.dependencyGraphBuilder = gb
|
||||
|
||||
return gc, nil
|
||||
}
|
||||
|
||||
// resyncMonitors starts or stops resource monitors as needed to ensure that all
|
||||
// (and only) those resources present in the map are monitored.
|
||||
func (gc *GarbageCollector) resyncMonitors(deletableResources map[schema.GroupVersionResource]struct{}) error {
|
||||
if err := gc.dependencyGraphBuilder.syncMonitors(deletableResources); err != nil {
|
||||
return err
|
||||
}
|
||||
gc.dependencyGraphBuilder.startMonitors()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer gc.attemptToDelete.ShutDown()
|
||||
@@ -125,9 +138,9 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
|
||||
glog.Infof("Starting garbage collector controller")
|
||||
defer glog.Infof("Shutting down garbage collector controller")
|
||||
|
||||
gc.dependencyGraphBuilder.Run(stopCh)
|
||||
go gc.dependencyGraphBuilder.Run(stopCh)
|
||||
|
||||
if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.HasSynced) {
|
||||
if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -142,8 +155,43 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) HasSynced() bool {
|
||||
return gc.dependencyGraphBuilder.HasSynced()
|
||||
// resettableRESTMapper is a RESTMapper which is capable of resetting itself
|
||||
// from discovery.
|
||||
type resettableRESTMapper interface {
|
||||
meta.RESTMapper
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Sync periodically resyncs the garbage collector monitors with resources
|
||||
// returned found via the discoveryClient. Sync blocks, continuing to sync until
|
||||
// a message is received on stopCh.
|
||||
//
|
||||
// The discoveryClient should be the same client which underlies restMapper.
|
||||
func (gc *GarbageCollector) Sync(restMapper resettableRESTMapper, discoveryClient discovery.DiscoveryInterface, period time.Duration, stopCh <-chan struct{}) {
|
||||
wait.Until(func() {
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
// have resynced.
|
||||
gc.workerLock.Lock()
|
||||
defer gc.workerLock.Unlock()
|
||||
|
||||
restMapper.Reset()
|
||||
deletableResources, err := GetDeletableResources(discoveryClient)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
if err := gc.resyncMonitors(deletableResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err))
|
||||
return
|
||||
}
|
||||
if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync"))
|
||||
}
|
||||
}, period, stopCh)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) IsSynced() bool {
|
||||
return gc.dependencyGraphBuilder.IsSynced()
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) runAttemptToDeleteWorker() {
|
||||
@@ -153,6 +201,8 @@ func (gc *GarbageCollector) runAttemptToDeleteWorker() {
|
||||
|
||||
func (gc *GarbageCollector) attemptToDeleteWorker() bool {
|
||||
item, quit := gc.attemptToDelete.Get()
|
||||
gc.workerLock.RLock()
|
||||
defer gc.workerLock.RUnlock()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
@@ -164,13 +214,18 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
|
||||
}
|
||||
err := gc.attemptToDeleteItem(n)
|
||||
if err != nil {
|
||||
// TODO: remove this block when gc starts using dynamic RESTMapper.
|
||||
if restMappingError, ok := err.(*restMappingError); ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Ignore syncing item %#v: %s", n, restMappingError.Message()))
|
||||
// The RESTMapper is static, so no need to retry, otherwise we'll get the same error.
|
||||
return true
|
||||
if _, ok := err.(*restMappingError); ok {
|
||||
// There are at least two ways this can happen:
|
||||
// 1. The reference is to an object of a custom type that has not yet been
|
||||
// recognized by gc.restMapper (this is a transient error).
|
||||
// 2. The reference is to an invalid group/version. We don't currently
|
||||
// have a way to distinguish this from a valid type we will recognize
|
||||
// after the next discovery sync.
|
||||
// For now, record the error and retry.
|
||||
glog.V(5).Infof("error syncing item %s: %v", n, err)
|
||||
} else {
|
||||
utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err))
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("Error syncing item %#v: %v", n, err))
|
||||
// retry if garbage collection of an object failed.
|
||||
gc.attemptToDelete.AddRateLimited(item)
|
||||
}
|
||||
@@ -454,6 +509,8 @@ func (gc *GarbageCollector) runAttemptToOrphanWorker() {
|
||||
// these steps fail.
|
||||
func (gc *GarbageCollector) attemptToOrphanWorker() bool {
|
||||
item, quit := gc.attemptToOrphan.Get()
|
||||
gc.workerLock.RLock()
|
||||
defer gc.workerLock.RUnlock()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
@@ -498,3 +555,19 @@ func (gc *GarbageCollector) GraphHasUID(UIDs []types.UID) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetDeletableResources returns all resources from discoveryClient that the
|
||||
// garbage collector should recognize and work with. More specifically, all
|
||||
// preferred resources which support the 'delete' verb.
|
||||
func GetDeletableResources(discoveryClient discovery.DiscoveryInterface) (map[schema.GroupVersionResource]struct{}, error) {
|
||||
preferredResources, err := discoveryClient.ServerPreferredResources()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get supported resources from server: %v", err)
|
||||
}
|
||||
deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
|
||||
deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse resources from server: %v", err)
|
||||
}
|
||||
return deletableGroupVersionResources, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user