Replace hand-written informers with generated ones

Replace existing uses of hand-written informers with generated ones.
 Follow-up commits will switch the use of one-off informers to shared
 informers.
This commit is contained in:
Andy Goldstein
2017-02-06 13:35:50 -05:00
parent cb758738f9
commit 70c6087600
55 changed files with 936 additions and 823 deletions

View File

@@ -23,6 +23,7 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
@@ -38,9 +39,11 @@ import (
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/legacylisters"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
@@ -74,18 +77,18 @@ type DaemonSetsController struct {
syncHandler func(dsKey string) error
// A TTLCache of pod creates/deletes each ds expects to see
expectations controller.ControllerExpectationsInterface
// A store of daemon sets
dsStore *listers.StoreToDaemonSetLister
// A store of pods
podStore *listers.StoreToPodLister
// A store of nodes
nodeStore *listers.StoreToNodeLister
// dsLister can list/get daemonsets from the shared informer's store
dsLister extensionslisters.DaemonSetLister
// dsStoreSynced returns true if the daemonset store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dsStoreSynced cache.InformerSynced
// podLister get list/get pods from the shared informers's store
podLister corelisters.PodLister
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced cache.InformerSynced
// nodeLister can list/get nodes from the shared informer's store
nodeLister corelisters.NodeLister
// nodeStoreSynced returns true if the node store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
nodeStoreSynced cache.InformerSynced
@@ -96,7 +99,7 @@ type DaemonSetsController struct {
queue workqueue.RateLimitingInterface
}
func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podInformer informers.PodInformer, nodeInformer informers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
@@ -146,7 +149,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
},
DeleteFunc: dsc.deleteDaemonset,
})
dsc.dsStore = daemonSetInformer.Lister()
dsc.dsLister = daemonSetInformer.Lister()
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
@@ -156,7 +159,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod,
})
dsc.podStore = podInformer.Lister()
dsc.podLister = podInformer.Lister()
dsc.podStoreSynced = podInformer.Informer().HasSynced
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@@ -165,7 +168,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
},
)
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
dsc.nodeStore = nodeInformer.Lister()
dsc.nodeLister = nodeInformer.Lister()
dsc.syncHandler = dsc.syncDaemonSet
dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
@@ -198,6 +201,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
glog.Infof("Starting Daemon Sets controller manager")
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
@@ -258,7 +262,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon
return ds
}
}
sets, err := dsc.dsStore.GetPodDaemonSets(pod)
sets, err := dsc.dsLister.GetPodDaemonSets(pod)
if err != nil {
glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name)
return nil
@@ -272,16 +276,16 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon
}
// update lookup cache
dsc.lookupCache.Update(pod, &sets[0])
dsc.lookupCache.Update(pod, sets[0])
return &sets[0]
return sets[0]
}
// isCacheValid check if the cache is valid
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
_, exists, err := dsc.dsStore.Get(cachedDS)
_, err := dsc.dsLister.DaemonSets(cachedDS.Namespace).Get(cachedDS.Name)
// ds has been deleted or updated, cache is invalid
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
if err != nil || !isDaemonSetMatch(pod, cachedDS) {
return false
}
return true
@@ -380,14 +384,14 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
func (dsc *DaemonSetsController) addNode(obj interface{}) {
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
dsList, err := dsc.dsStore.List()
dsList, err := dsc.dsLister.List(labels.Everything())
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
node := obj.(*v1.Node)
for i := range dsList.Items {
ds := &dsList.Items[i]
for i := range dsList {
ds := dsList[i]
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
@@ -405,14 +409,14 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
// If node labels didn't change, we can ignore this update.
return
}
dsList, err := dsc.dsStore.List()
dsList, err := dsc.dsLister.List(labels.Everything())
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
for i := range dsList.Items {
ds := &dsList.Items[i]
for i := range dsList {
ds := dsList[i]
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
if err != nil {
continue
@@ -434,7 +438,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
if err != nil {
return nil, err
}
daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(selector)
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
if err != nil {
return nodeToDaemonPods, err
}
@@ -456,14 +460,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
nodeList, err := dsc.nodeStore.List()
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
}
var nodesNeedingDaemonPods, podsToDelete []string
var failedPodsObserved int
for _, node := range nodeList.Items {
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(&node, ds)
for i := range nodeList {
node := nodeList[i]
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
}
@@ -615,14 +620,15 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
nodeList, err := dsc.nodeStore.List()
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
}
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
for _, node := range nodeList.Items {
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(&node, ds)
for i := range nodeList {
node := nodeList[i]
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return err
}
@@ -661,16 +667,19 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := dsc.dsStore.Store.GetByKey(key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
return err
}
if !exists {
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(3).Infof("daemon set has been deleted %v", key)
dsc.expectations.DeleteExpectations(key)
return nil
}
ds := obj.(*extensions.DaemonSet)
if err != nil {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
@@ -733,8 +742,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
pods := []*v1.Pod{}
for _, m := range dsc.podStore.Indexer.List() {
pod := m.(*v1.Pod)
podList, err := dsc.podLister.List(labels.Everything())
if err != nil {
return false, false, false, err
}
for i := range podList {
pod := podList[i]
if pod.Spec.NodeName != node.Name {
continue
}
@@ -807,7 +820,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
}
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []extensions.DaemonSet
type byCreationTimestamp []*extensions.DaemonSet
func (o byCreationTimestamp) Len() int { return len(o) }
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }