Merge pull request #83501 from yastij/remove-node-cond-dep

remove Get/Set node condition dependency for the ccm controllers
This commit is contained in:
Kubernetes Prow Robot
2019-10-04 16:31:26 -07:00
committed by GitHub
9 changed files with 82 additions and 30 deletions

View File

@@ -15,7 +15,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/controller/cloud",
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/util/node:go_default_library",
@@ -34,6 +33,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@@ -46,7 +46,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/api:go_default_library",

View File

@@ -35,6 +35,7 @@ import (
"k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry"
cloudprovider "k8s.io/cloud-provider"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
"k8s.io/klog"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@@ -234,7 +235,7 @@ func (cnc *CloudNodeController) initializeNode(node *v1.Node) {
// Since there are node taints, do we still need this?
// This condition marks the node as unusable until routes are initialized in the cloud provider
if cnc.cloud.ProviderName() == "gce" {
if err := nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
if err := cloudnodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
Reason: "NoRouteCreated",

View File

@@ -21,7 +21,7 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
@@ -31,7 +31,6 @@ import (
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/testutil"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
@@ -197,7 +196,7 @@ func TestNodeInitialized(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
@@ -262,7 +261,7 @@ func TestNodeIgnored(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
@@ -334,7 +333,7 @@ func TestGCECondition(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
@@ -419,7 +418,7 @@ func TestZoneInitialized(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
@@ -509,7 +508,7 @@ func TestNodeAddresses(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
@@ -622,7 +621,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
@@ -837,7 +836,7 @@ func TestNodeAddressesNotUpdate(t *testing.T) {
},
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
@@ -912,7 +911,7 @@ func TestNodeProviderID(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
@@ -995,7 +994,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) {
DeleteWaitChan: make(chan struct{}),
}
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
factory := informers.NewSharedInformerFactory(fnh, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},

View File

@@ -34,9 +34,9 @@ import (
v1lister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/controller"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
@@ -133,7 +133,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes() {
for _, node := range nodes {
// Default NodeReady status to v1.ConditionUnknown
status := v1.ConditionUnknown
if _, c := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
if _, c := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeReady); c != nil {
status = c.Status
}

View File

@@ -14,9 +14,7 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/controller/route",
deps = [
"//pkg/controller/util/node:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@@ -32,6 +30,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@@ -41,8 +40,6 @@ go_test(
srcs = ["route_controller_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
@@ -51,6 +48,7 @@ go_test(
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/fake:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
],
)

View File

@@ -40,9 +40,8 @@ import (
"k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry"
cloudprovider "k8s.io/cloud-provider"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
"k8s.io/kubernetes/pkg/util/metrics"
utilnode "k8s.io/kubernetes/pkg/util/node"
)
const (
@@ -290,7 +289,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
}
func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreated bool) error {
_, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
_, condition := cloudnodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable)
if routesCreated && condition != nil && condition.Status == v1.ConditionFalse {
klog.V(2).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name)
return nil
@@ -311,7 +310,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
// patch in the retry loop.
currentTime := metav1.Now()
if routesCreated {
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionFalse,
Reason: "RouteCreated",
@@ -319,7 +318,7 @@ func (rc *RouteController) updateNetworkingCondition(node *v1.Node, routesCreate
LastTransitionTime: currentTime,
})
} else {
err = utilnode.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
err = cloudnodeutil.SetNodeCondition(rc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
Reason: "NoRouteCreated",

View File

@@ -30,8 +30,7 @@ import (
core "k8s.io/client-go/testing"
cloudprovider "k8s.io/cloud-provider"
fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/kubernetes/pkg/controller"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
cloudnodeutil "k8s.io/cloud-provider/node/helpers"
)
func alwaysReady() bool { return true }
@@ -66,7 +65,7 @@ func TestIsResponsibleForRoute(t *testing.T) {
t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR)
}
client := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
informerFactory := informers.NewSharedInformerFactory(client, 0)
rc := New(nil, nil, informerFactory.Core().V1().Nodes(), myClusterName, []*net.IPNet{cidr})
rc.nodeListerSynced = alwaysReady
route := &cloudprovider.Route{
@@ -367,7 +366,7 @@ func TestReconcile(t *testing.T) {
cidrs = append(cidrs, cidrv6)
}
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, controller.NoResyncPeriodFunc())
informerFactory := informers.NewSharedInformerFactory(testCase.clientset, 0)
rc := New(routes, testCase.clientset, informerFactory.Core().V1().Nodes(), cluster, cidrs)
rc.nodeListerSynced = alwaysReady
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
@@ -376,7 +375,7 @@ func TestReconcile(t *testing.T) {
for _, action := range testCase.clientset.Actions() {
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
node := action.(core.UpdateAction).GetObject().(*v1.Node)
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
_, condition := cloudnodeutil.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
if condition == nil {
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
} else {