mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 03:08:15 +00:00
Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -44,7 +44,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -69,7 +69,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -48,9 +48,9 @@ func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter {
|
||||
}
|
||||
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"})
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
|
||||
Interface: k8s.CoreV1().Events(""),
|
||||
})
|
||||
@@ -70,7 +70,7 @@ func (a *adapter) Alias(ctx context.Context, nodeName string) (*net.IPNet, error
|
||||
case 1:
|
||||
break
|
||||
default:
|
||||
glog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
|
||||
klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs)
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(cidrs[0])
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -121,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list all nodes: %v", err)
|
||||
klog.Errorf("Failed to list all nodes: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -10,7 +10,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cidr_set_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
deps = ["//vendor/k8s.io/klog:go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func TestCIDRSetFullyAllocated(t *testing.T) {
|
||||
@@ -478,17 +478,17 @@ func TestGetBitforCIDR(t *testing.T) {
|
||||
|
||||
got, err := cs.getIndexForCIDR(subnetCIDR)
|
||||
if err == nil && tc.expectErr {
|
||||
glog.Errorf("expected error but got null for %v", tc.description)
|
||||
klog.Errorf("expected error but got null for %v", tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !tc.expectErr {
|
||||
glog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
klog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if got != tc.expectedBit {
|
||||
glog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -84,13 +84,13 @@ var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
||||
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
||||
func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
gceCloud, ok := cloud.(*gce.Cloud)
|
||||
@@ -127,15 +127,15 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||
})
|
||||
|
||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
return ca, nil
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting cloud CIDR allocator")
|
||||
defer glog.Infof("Shutting down cloud CIDR allocator")
|
||||
klog.Infof("Starting cloud CIDR allocator")
|
||||
defer klog.Infof("Shutting down cloud CIDR allocator")
|
||||
|
||||
if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) {
|
||||
return
|
||||
@@ -153,22 +153,22 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
||||
select {
|
||||
case workItem, ok := <-ca.nodeUpdateChannel:
|
||||
if !ok {
|
||||
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := ca.updateCIDRAllocation(workItem); err == nil {
|
||||
glog.V(3).Infof("Updated CIDR for %q", workItem)
|
||||
klog.V(3).Infof("Updated CIDR for %q", workItem)
|
||||
} else {
|
||||
glog.Errorf("Error updating CIDR for %q: %v", workItem, err)
|
||||
klog.Errorf("Error updating CIDR for %q: %v", workItem, err)
|
||||
if canRetry, timeout := ca.retryParams(workItem); canRetry {
|
||||
glog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
|
||||
klog.V(2).Infof("Retrying update for %q after %v", workItem, timeout)
|
||||
time.AfterFunc(timeout, func() {
|
||||
// Requeue the failed node for update again.
|
||||
ca.nodeUpdateChannel <- workItem
|
||||
})
|
||||
continue
|
||||
}
|
||||
glog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
|
||||
klog.Errorf("Exceeded retry count for %q, dropping from queue", workItem)
|
||||
}
|
||||
ca.removeNodeFromProcessing(workItem)
|
||||
case <-stopChan:
|
||||
@@ -193,7 +193,7 @@ func (ca *cloudCIDRAllocator) retryParams(nodeName string) (bool, time.Duration)
|
||||
|
||||
entry, ok := ca.nodesInProcessing[nodeName]
|
||||
if !ok {
|
||||
glog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
|
||||
klog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName)
|
||||
return false, 0
|
||||
}
|
||||
|
||||
@@ -231,11 +231,11 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
if !ca.insertNodeToProcessing(node.Name) {
|
||||
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Putting node %s into the work queue", node.Name)
|
||||
klog.V(4).Infof("Putting node %s into the work queue", node.Name)
|
||||
ca.nodeUpdateChannel <- node.Name
|
||||
return nil
|
||||
}
|
||||
@@ -247,7 +247,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil // node no longer available, skip processing
|
||||
}
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -267,11 +267,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
podCIDR := cidr.String()
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
// We don't return here, in order to set the NetworkUnavailable condition later below.
|
||||
} else {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
klog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
// We fall through and set the CIDR despite this error. This
|
||||
// implements the same logic as implemented in the
|
||||
// rangeAllocator.
|
||||
@@ -280,14 +280,14 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
}
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -299,13 +299,13 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
LastTransitionTime: metav1.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error setting route status for node %v: %v", node.Name, err)
|
||||
klog.Errorf("Error setting route status for node %v: %v", node.Name, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
glog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
|
||||
klog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
@@ -99,7 +99,7 @@ func NewController(
|
||||
// registers the informers for node changes. This will start synchronization
|
||||
// of the node and cloud CIDR range allocations.
|
||||
func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
glog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
klog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
|
||||
nodes, err := listNodes(c.adapter.k8s)
|
||||
if err != nil {
|
||||
@@ -110,9 +110,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err == nil {
|
||||
c.set.Occupy(cidrRange)
|
||||
glog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
|
||||
klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
|
||||
} else {
|
||||
glog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
|
||||
klog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ func (c *Controller) onAdd(node *v1.Node) error {
|
||||
c.syncers[node.Name] = syncer
|
||||
go syncer.Loop(nil)
|
||||
} else {
|
||||
glog.Warningf("Add for node %q that already exists", node.Name)
|
||||
klog.Warningf("Add for node %q that already exists", node.Name)
|
||||
}
|
||||
syncer.Update(node)
|
||||
|
||||
@@ -194,7 +194,7 @@ func (c *Controller) onUpdate(_, node *v1.Node) error {
|
||||
if sync, ok := c.syncers[node.Name]; ok {
|
||||
sync.Update(node)
|
||||
} else {
|
||||
glog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
klog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
return fmt.Errorf("unknown node %q", node.Name)
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func (c *Controller) onDelete(node *v1.Node) error {
|
||||
syncer.Delete(node)
|
||||
delete(c.syncers, node.Name)
|
||||
} else {
|
||||
glog.Warningf("Node %q was already deleted", node.Name)
|
||||
klog.Warningf("Node %q was already deleted", node.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -69,13 +69,13 @@ type rangeAllocator struct {
|
||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
if client == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
klog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize)
|
||||
@@ -96,16 +96,16 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
if serviceCIDR != nil {
|
||||
ra.filterOutServiceRange(serviceCIDR)
|
||||
} else {
|
||||
glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
|
||||
klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.")
|
||||
}
|
||||
|
||||
if nodeList != nil {
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Spec.PodCIDR == "" {
|
||||
glog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
klog.Infof("Node %v has no CIDR, ignoring", node.Name)
|
||||
continue
|
||||
} else {
|
||||
glog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
klog.Infof("Node %v has CIDR %s, occupying it in CIDR map",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
if err := ra.occupyCIDR(&node); err != nil {
|
||||
@@ -154,8 +154,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
func (r *rangeAllocator) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting range CIDR allocator")
|
||||
defer glog.Infof("Shutting down range CIDR allocator")
|
||||
klog.Infof("Starting range CIDR allocator")
|
||||
defer klog.Infof("Shutting down range CIDR allocator")
|
||||
|
||||
if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) {
|
||||
return
|
||||
@@ -173,7 +173,7 @@ func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
|
||||
select {
|
||||
case workItem, ok := <-r.nodeCIDRUpdateChannel:
|
||||
if !ok {
|
||||
glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed")
|
||||
return
|
||||
}
|
||||
if err := r.updateCIDRAllocation(workItem); err != nil {
|
||||
@@ -225,7 +225,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
if !r.insertNodeToProcessing(node.Name) {
|
||||
glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
@@ -238,7 +238,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
return fmt.Errorf("failed to allocate cidr: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR)
|
||||
r.nodeCIDRUpdateChannel <- nodeAndCIDR{
|
||||
nodeName: node.Name,
|
||||
cidr: podCIDR,
|
||||
@@ -255,7 +255,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
klog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR)
|
||||
if err = r.cidrs.Release(podCIDR); err != nil {
|
||||
return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
|
||||
}
|
||||
|
||||
if err := r.cidrs.Occupy(serviceCIDR); err != nil {
|
||||
glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
klog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,37 +289,37 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
||||
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
if err := r.cidrs.Release(data.cidr); err != nil {
|
||||
glog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
klog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
||||
// them in case when we don't know if request went through.
|
||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||
if !apierrors.IsServerTimeout(err) {
|
||||
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
klog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
|
||||
glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -8,7 +8,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ go_test(
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
@@ -120,7 +120,7 @@ func New(c controller, cloudAlias cloudAlias, kubeAPI kubeAPI, mode NodeSyncMode
|
||||
// Loop runs the sync loop for a given node. done is an optional channel that
|
||||
// is closed when the Loop() returns.
|
||||
func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
glog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
klog.V(2).Infof("Starting sync loop for node %q", sync.nodeName)
|
||||
|
||||
defer func() {
|
||||
if done != nil {
|
||||
@@ -130,13 +130,13 @@ func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer := time.NewTimer(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
|
||||
for {
|
||||
select {
|
||||
case op, more := <-sync.opChan:
|
||||
if !more {
|
||||
glog.V(2).Infof("Stopping sync loop")
|
||||
klog.V(2).Infof("Stopping sync loop")
|
||||
return
|
||||
}
|
||||
sync.c.ReportResult(op.run(sync))
|
||||
@@ -144,13 +144,13 @@ func (sync *NodeSync) Loop(done chan struct{}) {
|
||||
<-delayTimer.C
|
||||
}
|
||||
case <-delayTimer.C:
|
||||
glog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
klog.V(4).Infof("Running resync for node %q", sync.nodeName)
|
||||
sync.c.ReportResult((&updateOp{}).run(sync))
|
||||
}
|
||||
|
||||
timeout := sync.c.ResyncTimeout()
|
||||
delayTimer.Reset(timeout)
|
||||
glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,15 +190,15 @@ func (op *updateOp) String() string {
|
||||
}
|
||||
|
||||
func (op *updateOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running updateOp %+v", op)
|
||||
klog.V(3).Infof("Running updateOp %+v", op)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
if op.node == nil {
|
||||
glog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
klog.V(3).Infof("Getting node spec for %q", sync.nodeName)
|
||||
node, err := sync.kubeAPI.Node(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
klog.Errorf("Error getting node %q spec: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
op.node = node
|
||||
@@ -206,7 +206,7 @@ func (op *updateOp) run(sync *NodeSync) error {
|
||||
|
||||
aliasRange, err := sync.cloudAlias.Alias(ctx, sync.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
klog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -228,14 +228,14 @@ func (op *updateOp) run(sync *NodeSync) error {
|
||||
// match.
|
||||
func (op *updateOp) validateRange(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error {
|
||||
if node.Spec.PodCIDR != aliasRange.String() {
|
||||
glog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
klog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)",
|
||||
node.Spec.PodCIDR, aliasRange)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(node.Name, MismatchEvent,
|
||||
"Node.Spec.PodCIDR != cloud alias (%v != %v)", node.Spec.PodCIDR, aliasRange)
|
||||
// User intervention is required in this case, as this is most likely due
|
||||
// to the user mucking around with their VM aliases on the side.
|
||||
} else {
|
||||
glog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
klog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -249,26 +249,26 @@ func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, nod
|
||||
return fmt.Errorf("cannot sync from cloud in mode %q", sync.mode)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
klog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange)
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, aliasRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
klog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange)
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
klog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -283,27 +283,27 @@ func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, nod
|
||||
|
||||
_, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v",
|
||||
node.Spec.PodCIDR, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.set.Occupy(aliasRange); err != nil {
|
||||
glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, aliasRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
klog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
klog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v",
|
||||
node.Name, node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
@@ -326,21 +326,21 @@ func (op *updateOp) allocateRange(ctx context.Context, sync *NodeSync, node *v1.
|
||||
// is no durable record of the range. The missing space will be
|
||||
// recovered on the next restart of the controller.
|
||||
if err := sync.cloudAlias.AddAlias(ctx, node.Name, cidrRange); err != nil {
|
||||
glog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
klog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, cidrRange); err != nil {
|
||||
glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil {
|
||||
glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
klog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -358,15 +358,15 @@ func (op *deleteOp) String() string {
|
||||
}
|
||||
|
||||
func (op *deleteOp) run(sync *NodeSync) error {
|
||||
glog.V(3).Infof("Running deleteOp %+v", op)
|
||||
klog.V(3).Infof("Running deleteOp %+v", op)
|
||||
if op.node.Spec.PodCIDR == "" {
|
||||
glog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
klog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR, err)
|
||||
sync.kubeAPI.EmitNodeWarningEvent(op.node.Name, InvalidPodCIDR,
|
||||
"Node %q has an invalid PodCIDR: %q", op.node.Name, op.node.Spec.PodCIDR)
|
||||
@@ -374,7 +374,7 @@ func (op *deleteOp) run(sync *NodeSync) error {
|
||||
}
|
||||
|
||||
sync.set.Release(cidrRange)
|
||||
glog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
klog.V(2).Infof("Node %q was deleted, releasing CIDR range %v",
|
||||
op.node.Name, op.node.Spec.PodCIDR)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
|
||||
@@ -88,7 +88,7 @@ func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args ..
|
||||
}
|
||||
|
||||
func (f *fakeAPIs) ReportResult(err error) {
|
||||
glog.V(2).Infof("ReportResult %v", err)
|
||||
klog.V(2).Infof("ReportResult %v", err)
|
||||
f.results = append(f.results, err)
|
||||
if f.reportChan != nil {
|
||||
f.reportChan <- struct{}{}
|
||||
@@ -104,7 +104,7 @@ func (f *fakeAPIs) ResyncTimeout() time.Duration {
|
||||
|
||||
func (f *fakeAPIs) dumpTrace() {
|
||||
for i, x := range f.calls {
|
||||
glog.Infof("trace %v: %v", i, x)
|
||||
klog.Infof("trace %v: %v", i, x)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
@@ -90,13 +90,13 @@ func NewNodeIpamController(
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting Controller")
|
||||
klog.Fatalf("kubeClient is nil when starting Controller")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
|
||||
glog.Infof("Sending events to api server.")
|
||||
klog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: kubeClient.CoreV1().Events(""),
|
||||
@@ -107,13 +107,13 @@ func NewNodeIpamController(
|
||||
}
|
||||
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
klog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if allocatorType != ipam.CloudAllocatorType {
|
||||
// Cloud CIDR allocator does not rely on clusterCIDR or nodeCIDRMaskSize for allocation.
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
klog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,10 +141,10 @@ func NewNodeIpamController(
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
klog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
klog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
@@ -165,8 +165,8 @@ func NewNodeIpamController(
|
||||
func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting ipam controller")
|
||||
defer glog.Infof("Shutting down ipam controller")
|
||||
klog.Infof("Starting ipam controller")
|
||||
defer klog.Infof("Shutting down ipam controller")
|
||||
|
||||
if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced) {
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user