mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Delete all pods based on condition transition time.
This commit is contained in:
		@@ -39,8 +39,18 @@ const (
 | 
				
			|||||||
	// sync node status in this case, but will monitor node status updated from kubelet. If
 | 
						// sync node status in this case, but will monitor node status updated from kubelet. If
 | 
				
			||||||
	// it doesn't receive update for this amount of time, it will start posting node NotReady
 | 
						// it doesn't receive update for this amount of time, it will start posting node NotReady
 | 
				
			||||||
	// condition. The amount of time when NodeController start evicting pods is controlled
 | 
						// condition. The amount of time when NodeController start evicting pods is controlled
 | 
				
			||||||
	// via flag 'pod_eviction_timeout'. Note: be cautious when changing nodeMonitorGracePeriod,
 | 
						// via flag 'pod_eviction_timeout'.
 | 
				
			||||||
	// it must work with kubelet.nodeStatusUpdateFrequency.
 | 
						// Note: be cautious when changing the constant, it must work with nodeStatusUpdateFrequency
 | 
				
			||||||
 | 
						// in kubelet. There are several constraints:
 | 
				
			||||||
 | 
						// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
 | 
				
			||||||
 | 
						//    N means number of retries allowed for kubelet to post node status. It is pointless
 | 
				
			||||||
 | 
						//    to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
 | 
				
			||||||
 | 
						//    will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
 | 
				
			||||||
 | 
						// 2. nodeMonitorGracePeriod can't be too large for user experience - larger value takes
 | 
				
			||||||
 | 
						//    longer for user to see up-to-date node status.
 | 
				
			||||||
 | 
						// 3. nodeStatusUpdateFrequency needs to be large enough for Kubelet to generate node
 | 
				
			||||||
 | 
						//    status. Kubelet may fail to update node status reliablly if the value is too small,
 | 
				
			||||||
 | 
						//    as it takes time to gather all necessary node information.
 | 
				
			||||||
	nodeMonitorGracePeriod = 8 * time.Second
 | 
						nodeMonitorGracePeriod = 8 * time.Second
 | 
				
			||||||
	// The constant is used if sync_nodes_status=False, and for node startup. When node
 | 
						// The constant is used if sync_nodes_status=False, and for node startup. When node
 | 
				
			||||||
	// is just created, e.g. cluster bootstrap or node creation, we give a longer grace period.
 | 
						// is just created, e.g. cluster bootstrap or node creation, we give a longer grace period.
 | 
				
			||||||
@@ -94,15 +104,15 @@ func NewNodeController(
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Run creates initial node list and start syncing instances from cloudprovider, if any.
 | 
					// Run creates initial node list and start syncing instances from cloudprovider, if any.
 | 
				
			||||||
// It also starts syncing cluster node status.
 | 
					// It also starts syncing or monitoring cluster node status.
 | 
				
			||||||
// 1. RegisterNodes() is called only once to register all initial nodes (from cloudprovider
 | 
					// 1. RegisterNodes() is called only once to register all initial nodes (from cloudprovider
 | 
				
			||||||
//    or from command line flag). To make cluster bootstrap faster, node controller populates
 | 
					//    or from command line flag). To make cluster bootstrap faster, node controller populates
 | 
				
			||||||
//    node addresses.
 | 
					//    node addresses.
 | 
				
			||||||
// 2. SyncCloudNodes() is called periodically (if enabled) to sync instances from cloudprovider.
 | 
					// 2. SyncCloudNodes() is called periodically (if enabled) to sync instances from cloudprovider.
 | 
				
			||||||
//    Node created here will only have specs.
 | 
					//    Node created here will only have specs.
 | 
				
			||||||
// 3. SyncNodeStatus() is called periodically (if enabled) to sync node status for nodes in
 | 
					// 3. Depending on how k8s is configured, there are two ways of syncing the node status:
 | 
				
			||||||
//    k8s cluster. If not enabled, MonitorNodeStatus() is called otherwise to monitor node
 | 
					//   3.1 SyncProbedNodeStatus() is called periodically to sync node status for nodes in k8s cluster.
 | 
				
			||||||
//    status posted from kubelet.
 | 
					//   3.2 MonitorNodeStatus() is called periodically to monitor node status posted from kubelet.
 | 
				
			||||||
func (s *NodeController) Run(period time.Duration, syncNodeList, syncNodeStatus bool) {
 | 
					func (s *NodeController) Run(period time.Duration, syncNodeList, syncNodeStatus bool) {
 | 
				
			||||||
	// Register intial set of nodes with their status set.
 | 
						// Register intial set of nodes with their status set.
 | 
				
			||||||
	var nodes *api.NodeList
 | 
						var nodes *api.NodeList
 | 
				
			||||||
@@ -139,7 +149,7 @@ func (s *NodeController) Run(period time.Duration, syncNodeList, syncNodeStatus
 | 
				
			|||||||
	// Start syncing or monitoring node status.
 | 
						// Start syncing or monitoring node status.
 | 
				
			||||||
	if syncNodeStatus {
 | 
						if syncNodeStatus {
 | 
				
			||||||
		go util.Forever(func() {
 | 
							go util.Forever(func() {
 | 
				
			||||||
			if err = s.SyncNodeStatus(); err != nil {
 | 
								if err = s.SyncProbedNodeStatus(); err != nil {
 | 
				
			||||||
				glog.Errorf("Error syncing status: %v", err)
 | 
									glog.Errorf("Error syncing status: %v", err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}, period)
 | 
							}, period)
 | 
				
			||||||
@@ -227,8 +237,8 @@ func (s *NodeController) SyncCloudNodes() error {
 | 
				
			|||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// SyncNodeStatus synchronizes cluster nodes status to master server.
 | 
					// SyncProbedNodeStatus synchronizes cluster nodes status to master server.
 | 
				
			||||||
func (s *NodeController) SyncNodeStatus() error {
 | 
					func (s *NodeController) SyncProbedNodeStatus() error {
 | 
				
			||||||
	nodes, err := s.kubeClient.Nodes().List()
 | 
						nodes, err := s.kubeClient.Nodes().List()
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
@@ -415,46 +425,71 @@ func (s *NodeController) MonitorNodeStatus() error {
 | 
				
			|||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for i := range nodes.Items {
 | 
						for i := range nodes.Items {
 | 
				
			||||||
		node := &nodes.Items[i]
 | 
					 | 
				
			||||||
		// Precompute all condition times to avoid deep copy of node status (We'll modify node for
 | 
					 | 
				
			||||||
		// updating, and NodeStatus.Conditions is an array, which makes assignment copy not useful).
 | 
					 | 
				
			||||||
		latestConditionTime := s.latestConditionTime(node, api.NodeReady)
 | 
					 | 
				
			||||||
		var gracePeriod time.Duration
 | 
							var gracePeriod time.Duration
 | 
				
			||||||
		if latestConditionTime == node.CreationTimestamp {
 | 
							var lastReadyCondition api.NodeCondition
 | 
				
			||||||
			gracePeriod = nodeStartupGracePeriod
 | 
							node := &nodes.Items[i]
 | 
				
			||||||
		} else {
 | 
					 | 
				
			||||||
			gracePeriod = nodeMonitorGracePeriod
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		latestFullConditionTime := s.latestConditionTimeWithStatus(node, api.NodeReady, api.ConditionFull)
 | 
					 | 
				
			||||||
		// Grace period has passed, post node NotReady condition to master, without contacting kubelet.
 | 
					 | 
				
			||||||
		if util.Now().After(latestConditionTime.Add(gracePeriod)) {
 | 
					 | 
				
			||||||
		readyCondition := s.getCondition(node, api.NodeReady)
 | 
							readyCondition := s.getCondition(node, api.NodeReady)
 | 
				
			||||||
		if readyCondition == nil {
 | 
							if readyCondition == nil {
 | 
				
			||||||
 | 
								// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
 | 
				
			||||||
 | 
								// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
 | 
				
			||||||
 | 
								// to node.CreationTimestamp to avoid handle the corner case.
 | 
				
			||||||
 | 
								lastReadyCondition = api.NodeCondition{
 | 
				
			||||||
 | 
									Type:               api.NodeReady,
 | 
				
			||||||
 | 
									Status:             api.ConditionUnknown,
 | 
				
			||||||
 | 
									LastProbeTime:      node.CreationTimestamp,
 | 
				
			||||||
 | 
									LastTransitionTime: node.CreationTimestamp,
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								gracePeriod = nodeStartupGracePeriod
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								// If ready condition is not nil, make a copy of it, since we may modify it in place later.
 | 
				
			||||||
 | 
								lastReadyCondition = *readyCondition
 | 
				
			||||||
 | 
								gracePeriod = nodeMonitorGracePeriod
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// Check last time when NodeReady was updated.
 | 
				
			||||||
 | 
							if util.Now().After(lastReadyCondition.LastProbeTime.Add(gracePeriod)) {
 | 
				
			||||||
 | 
								// NodeReady condition was last set longer ago than gracePeriod, so update it to Unknown
 | 
				
			||||||
 | 
								// (regardless of its current value) in the master, without contacting kubelet.
 | 
				
			||||||
 | 
								if readyCondition == nil {
 | 
				
			||||||
 | 
									glog.V(2).Infof("node %v is never updated by kubelet")
 | 
				
			||||||
				node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
 | 
									node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
 | 
				
			||||||
					Type:               api.NodeReady,
 | 
										Type:               api.NodeReady,
 | 
				
			||||||
					Status:             api.ConditionNone,
 | 
										Status:             api.ConditionUnknown,
 | 
				
			||||||
					Reason:             fmt.Sprintf("Kubelet never posted node status"),
 | 
										Reason:             fmt.Sprintf("Kubelet never posted node status"),
 | 
				
			||||||
					LastProbeTime:      util.Now(),
 | 
										LastProbeTime:      util.Now(),
 | 
				
			||||||
					LastTransitionTime: util.Now(),
 | 
										LastTransitionTime: util.Now(),
 | 
				
			||||||
				})
 | 
									})
 | 
				
			||||||
			} else {
 | 
								} else {
 | 
				
			||||||
				readyCondition.Status = api.ConditionNone
 | 
									// Note here the out-dated condition can be the one posted by nodecontroller
 | 
				
			||||||
				readyCondition.Reason = fmt.Sprintf("Kubelet stop posting node status")
 | 
									// itself before. We keep posting the status to keep LastProbeTime fresh.
 | 
				
			||||||
 | 
									glog.V(2).Infof("node %v hasn't been updated for a while, last ready condition is %+v", node.Name, readyCondition)
 | 
				
			||||||
 | 
									readyCondition.Status = api.ConditionUnknown
 | 
				
			||||||
 | 
									readyCondition.Reason = fmt.Sprintf("Kubelet stopped posting node status")
 | 
				
			||||||
				readyCondition.LastProbeTime = util.Now()
 | 
									readyCondition.LastProbeTime = util.Now()
 | 
				
			||||||
				if readyCondition.Status == api.ConditionFull {
 | 
									if lastReadyCondition.Status != api.ConditionUnknown {
 | 
				
			||||||
					readyCondition.LastTransitionTime = util.Now()
 | 
										readyCondition.LastTransitionTime = util.Now()
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			glog.V(2).Infof("updating node %v, whose status hasn't been updated by kubelet for a long time", node.Name)
 | 
					 | 
				
			||||||
			_, err = s.kubeClient.Nodes().Update(node)
 | 
								_, err = s.kubeClient.Nodes().Update(node)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				glog.Errorf("error updating node %s: %v", node.Name, err)
 | 
									glog.Errorf("error updating node %s: %v", node.Name, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		// Eviction timeout! Evict all pods on the unhealthy node.
 | 
					
 | 
				
			||||||
		if util.Now().After(latestFullConditionTime.Add(s.podEvictionTimeout)) {
 | 
							if readyCondition != nil {
 | 
				
			||||||
 | 
								// Check eviction timeout.
 | 
				
			||||||
 | 
								if lastReadyCondition.Status == api.ConditionNone &&
 | 
				
			||||||
 | 
									util.Now().After(lastReadyCondition.LastTransitionTime.Add(s.podEvictionTimeout)) {
 | 
				
			||||||
 | 
									// Node stays in not ready for at least 'podEvictionTimeout' - evict all pods on the unhealthy node.
 | 
				
			||||||
				s.deletePods(node.Name)
 | 
									s.deletePods(node.Name)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
								if lastReadyCondition.Status == api.ConditionUnknown &&
 | 
				
			||||||
 | 
									util.Now().After(lastReadyCondition.LastTransitionTime.Add(s.podEvictionTimeout-gracePeriod)) {
 | 
				
			||||||
 | 
									// Same as above. Note however, since condition unknown is posted by node controller, which means we
 | 
				
			||||||
 | 
									// need to substract monitoring grace period in order to get the real 'podEvictionTimeout'.
 | 
				
			||||||
 | 
									s.deletePods(node.Name)
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -555,30 +590,3 @@ func (s *NodeController) getCondition(node *api.Node, conditionType api.NodeCond
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
// latestConditionTime returns the latest condition timestamp for the node, regardless of condition status.
 | 
					 | 
				
			||||||
// If nothing matches, the node creation timestamp will be returned.
 | 
					 | 
				
			||||||
func (s *NodeController) latestConditionTime(node *api.Node, conditionType api.NodeConditionType) util.Time {
 | 
					 | 
				
			||||||
	readyTime := node.ObjectMeta.CreationTimestamp
 | 
					 | 
				
			||||||
	for _, condition := range node.Status.Conditions {
 | 
					 | 
				
			||||||
		if condition.Type == conditionType &&
 | 
					 | 
				
			||||||
			condition.LastProbeTime.After(readyTime.Time) {
 | 
					 | 
				
			||||||
			readyTime = condition.LastProbeTime
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return readyTime
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
// latestConditionTimeWithStatus returns the latest condition timestamp for the node, with given condition status.
 | 
					 | 
				
			||||||
// If nothing matches, the node creation timestamp will be returned.
 | 
					 | 
				
			||||||
func (s *NodeController) latestConditionTimeWithStatus(node *api.Node, conditionType api.NodeConditionType, conditionStatus api.ConditionStatus) util.Time {
 | 
					 | 
				
			||||||
	readyTime := node.ObjectMeta.CreationTimestamp
 | 
					 | 
				
			||||||
	for _, condition := range node.Status.Conditions {
 | 
					 | 
				
			||||||
		if condition.Type == conditionType &&
 | 
					 | 
				
			||||||
			condition.Status == conditionStatus &&
 | 
					 | 
				
			||||||
			condition.LastProbeTime.After(readyTime.Time) {
 | 
					 | 
				
			||||||
			readyTime = condition.LastProbeTime
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return readyTime
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -688,7 +688,7 @@ func TestPopulateNodeAddresses(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncNodeStatus(t *testing.T) {
 | 
					func TestSyncProbedNodeStatus(t *testing.T) {
 | 
				
			||||||
	table := []struct {
 | 
						table := []struct {
 | 
				
			||||||
		fakeNodeHandler      *FakeNodeHandler
 | 
							fakeNodeHandler      *FakeNodeHandler
 | 
				
			||||||
		fakeKubeletClient    *FakeKubeletClient
 | 
							fakeKubeletClient    *FakeKubeletClient
 | 
				
			||||||
@@ -755,7 +755,7 @@ func TestSyncNodeStatus(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	for _, item := range table {
 | 
						for _, item := range table {
 | 
				
			||||||
		nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, item.fakeNodeHandler, item.fakeKubeletClient, 10, time.Minute)
 | 
							nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, item.fakeNodeHandler, item.fakeKubeletClient, 10, time.Minute)
 | 
				
			||||||
		if err := nodeController.SyncNodeStatus(); err != nil {
 | 
							if err := nodeController.SyncProbedNodeStatus(); err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
 | 
							if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
 | 
				
			||||||
@@ -779,7 +779,7 @@ func TestSyncNodeStatus(t *testing.T) {
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		// Second sync will also update the node.
 | 
							// Second sync will also update the node.
 | 
				
			||||||
		item.fakeNodeHandler.RequestCount = 0
 | 
							item.fakeNodeHandler.RequestCount = 0
 | 
				
			||||||
		if err := nodeController.SyncNodeStatus(); err != nil {
 | 
							if err := nodeController.SyncProbedNodeStatus(); err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
 | 
							if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
 | 
				
			||||||
@@ -788,7 +788,7 @@ func TestSyncNodeStatus(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncNodeStatusTransitionTime(t *testing.T) {
 | 
					func TestSyncProbedNodeStatusTransitionTime(t *testing.T) {
 | 
				
			||||||
	table := []struct {
 | 
						table := []struct {
 | 
				
			||||||
		fakeNodeHandler              *FakeNodeHandler
 | 
							fakeNodeHandler              *FakeNodeHandler
 | 
				
			||||||
		fakeKubeletClient            *FakeKubeletClient
 | 
							fakeKubeletClient            *FakeKubeletClient
 | 
				
			||||||
@@ -870,7 +870,7 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
 | 
				
			|||||||
		nodeController.lookupIP = func(host string) ([]net.IP, error) {
 | 
							nodeController.lookupIP = func(host string) ([]net.IP, error) {
 | 
				
			||||||
			return nil, fmt.Errorf("lookup %v: no such host", host)
 | 
								return nil, fmt.Errorf("lookup %v: no such host", host)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if err := nodeController.SyncNodeStatus(); err != nil {
 | 
							if err := nodeController.SyncProbedNodeStatus(); err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if item.expectedRequestCount != item.fakeNodeHandler.RequestCount {
 | 
							if item.expectedRequestCount != item.fakeNodeHandler.RequestCount {
 | 
				
			||||||
@@ -894,7 +894,7 @@ func TestSyncNodeStatusTransitionTime(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestSyncNodeStatusEvictPods(t *testing.T) {
 | 
					func TestSyncProbedNodeStatusEvictPods(t *testing.T) {
 | 
				
			||||||
	table := []struct {
 | 
						table := []struct {
 | 
				
			||||||
		fakeNodeHandler      *FakeNodeHandler
 | 
							fakeNodeHandler      *FakeNodeHandler
 | 
				
			||||||
		fakeKubeletClient    *FakeKubeletClient
 | 
							fakeKubeletClient    *FakeKubeletClient
 | 
				
			||||||
@@ -1032,7 +1032,7 @@ func TestSyncNodeStatusEvictPods(t *testing.T) {
 | 
				
			|||||||
		nodeController.lookupIP = func(host string) ([]net.IP, error) {
 | 
							nodeController.lookupIP = func(host string) ([]net.IP, error) {
 | 
				
			||||||
			return nil, fmt.Errorf("lookup %v: no such host", host)
 | 
								return nil, fmt.Errorf("lookup %v: no such host", host)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if err := nodeController.SyncNodeStatus(); err != nil {
 | 
							if err := nodeController.SyncProbedNodeStatus(); err != nil {
 | 
				
			||||||
			t.Errorf("unexpected error: %v", err)
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if item.expectedRequestCount != item.fakeNodeHandler.RequestCount {
 | 
							if item.expectedRequestCount != item.fakeNodeHandler.RequestCount {
 | 
				
			||||||
@@ -1044,11 +1044,159 @@ func TestSyncNodeStatusEvictPods(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestMonitorNodeStatus(t *testing.T) {
 | 
					func TestMonitorNodeStatusEvictPods(t *testing.T) {
 | 
				
			||||||
 | 
						table := []struct {
 | 
				
			||||||
 | 
							fakeNodeHandler   *FakeNodeHandler
 | 
				
			||||||
 | 
							expectedEvictPods bool
 | 
				
			||||||
 | 
						}{
 | 
				
			||||||
 | 
							// Node created recently, with no status (happens only at cluster startup).
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								fakeNodeHandler: &FakeNodeHandler{
 | 
				
			||||||
 | 
									Existing: []*api.Node{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
 | 
												Name:              "node0",
 | 
				
			||||||
 | 
												CreationTimestamp: util.Now(),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Fake: client.Fake{
 | 
				
			||||||
 | 
										PodsList: api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectedEvictPods: false,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							// Node created long time ago, with not ready status updated by kubelet for a short time.
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								fakeNodeHandler: &FakeNodeHandler{
 | 
				
			||||||
 | 
									Existing: []*api.Node{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
 | 
												Name:              "node0",
 | 
				
			||||||
 | 
												CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
											Status: api.NodeStatus{
 | 
				
			||||||
 | 
												Conditions: []api.NodeCondition{
 | 
				
			||||||
 | 
													{
 | 
				
			||||||
 | 
														Type:               api.NodeReady,
 | 
				
			||||||
 | 
														Status:             api.ConditionNone,
 | 
				
			||||||
 | 
														LastProbeTime:      util.Now(),
 | 
				
			||||||
 | 
														LastTransitionTime: util.Now(),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
 | 
												},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Fake: client.Fake{
 | 
				
			||||||
 | 
										PodsList: api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectedEvictPods: false,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							// Node created long time ago, with not ready status updated by kubelet for a long time.
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								fakeNodeHandler: &FakeNodeHandler{
 | 
				
			||||||
 | 
									Existing: []*api.Node{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
 | 
												Name:              "node0",
 | 
				
			||||||
 | 
												CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
											Status: api.NodeStatus{
 | 
				
			||||||
 | 
												Conditions: []api.NodeCondition{
 | 
				
			||||||
 | 
													{
 | 
				
			||||||
 | 
														Type:               api.NodeReady,
 | 
				
			||||||
 | 
														Status:             api.ConditionUnknown,
 | 
				
			||||||
 | 
														LastProbeTime:      util.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
														LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
 | 
												},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Fake: client.Fake{
 | 
				
			||||||
 | 
										PodsList: api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectedEvictPods: true,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							// Node created long time ago, with unknown status updated by node controller for a short time.
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								fakeNodeHandler: &FakeNodeHandler{
 | 
				
			||||||
 | 
									Existing: []*api.Node{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
 | 
												Name:              "node0",
 | 
				
			||||||
 | 
												CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
											Status: api.NodeStatus{
 | 
				
			||||||
 | 
												Conditions: []api.NodeCondition{
 | 
				
			||||||
 | 
													{
 | 
				
			||||||
 | 
														Type:               api.NodeReady,
 | 
				
			||||||
 | 
														Status:             api.ConditionUnknown,
 | 
				
			||||||
 | 
														LastProbeTime:      util.Now(),
 | 
				
			||||||
 | 
														LastTransitionTime: util.Now(),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
 | 
												},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Fake: client.Fake{
 | 
				
			||||||
 | 
										PodsList: api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectedEvictPods: false,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							// Node created long time ago, with unknown status updated by node controller for a long time.
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								fakeNodeHandler: &FakeNodeHandler{
 | 
				
			||||||
 | 
									Existing: []*api.Node{
 | 
				
			||||||
 | 
										{
 | 
				
			||||||
 | 
											ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
 | 
												Name:              "node0",
 | 
				
			||||||
 | 
												CreationTimestamp: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
											Status: api.NodeStatus{
 | 
				
			||||||
 | 
												Conditions: []api.NodeCondition{
 | 
				
			||||||
 | 
													{
 | 
				
			||||||
 | 
														Type:               api.NodeReady,
 | 
				
			||||||
 | 
														Status:             api.ConditionUnknown,
 | 
				
			||||||
 | 
														LastProbeTime:      util.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
														LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
													},
 | 
				
			||||||
 | 
												},
 | 
				
			||||||
 | 
											},
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
									Fake: client.Fake{
 | 
				
			||||||
 | 
										PodsList: api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								expectedEvictPods: true,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, item := range table {
 | 
				
			||||||
 | 
							nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, nil, 10, 5*time.Minute)
 | 
				
			||||||
 | 
							if err := nodeController.MonitorNodeStatus(); err != nil {
 | 
				
			||||||
 | 
								t.Errorf("unexpected error: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							podEvicted := false
 | 
				
			||||||
 | 
							for _, action := range item.fakeNodeHandler.Actions {
 | 
				
			||||||
 | 
								if action.Action == "delete-pod" {
 | 
				
			||||||
 | 
									podEvicted = true
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if item.expectedEvictPods != podEvicted {
 | 
				
			||||||
 | 
								t.Errorf("expected pod eviction: %+v, got %+v", item.expectedEvictPods, podEvicted)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
 | 
				
			||||||
	table := []struct {
 | 
						table := []struct {
 | 
				
			||||||
		fakeNodeHandler      *FakeNodeHandler
 | 
							fakeNodeHandler      *FakeNodeHandler
 | 
				
			||||||
		expectedRequestCount int
 | 
							expectedRequestCount int
 | 
				
			||||||
		expectedEvictPods    bool
 | 
					 | 
				
			||||||
		expectedNodes        []*api.Node
 | 
							expectedNodes        []*api.Node
 | 
				
			||||||
	}{
 | 
						}{
 | 
				
			||||||
		// Node created long time ago, with no status.
 | 
							// Node created long time ago, with no status.
 | 
				
			||||||
@@ -1067,7 +1215,6 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedRequestCount: 2, // List+Update
 | 
								expectedRequestCount: 2, // List+Update
 | 
				
			||||||
			expectedEvictPods:    true,
 | 
					 | 
				
			||||||
			expectedNodes: []*api.Node{
 | 
								expectedNodes: []*api.Node{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					ObjectMeta: api.ObjectMeta{
 | 
										ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
@@ -1078,7 +1225,7 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
						Conditions: []api.NodeCondition{
 | 
											Conditions: []api.NodeCondition{
 | 
				
			||||||
							{
 | 
												{
 | 
				
			||||||
								Type:               api.NodeReady,
 | 
													Type:               api.NodeReady,
 | 
				
			||||||
								Status:             api.ConditionNone,
 | 
													Status:             api.ConditionUnknown,
 | 
				
			||||||
								Reason:             fmt.Sprintf("Kubelet never posted node status"),
 | 
													Reason:             fmt.Sprintf("Kubelet never posted node status"),
 | 
				
			||||||
								LastProbeTime:      util.Time{},
 | 
													LastProbeTime:      util.Time{},
 | 
				
			||||||
								LastTransitionTime: util.Time{},
 | 
													LastTransitionTime: util.Time{},
 | 
				
			||||||
@@ -1104,7 +1251,6 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedRequestCount: 1, // List
 | 
								expectedRequestCount: 1, // List
 | 
				
			||||||
			expectedEvictPods:    false,
 | 
					 | 
				
			||||||
			expectedNodes:        nil,
 | 
								expectedNodes:        nil,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
		// Node created long time ago, with status updated long time ago.
 | 
							// Node created long time ago, with status updated long time ago.
 | 
				
			||||||
@@ -1133,7 +1279,6 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedRequestCount: 2, // List+Update
 | 
								expectedRequestCount: 2, // List+Update
 | 
				
			||||||
			expectedEvictPods:    true,
 | 
					 | 
				
			||||||
			expectedNodes: []*api.Node{
 | 
								expectedNodes: []*api.Node{
 | 
				
			||||||
				{
 | 
									{
 | 
				
			||||||
					ObjectMeta: api.ObjectMeta{
 | 
										ObjectMeta: api.ObjectMeta{
 | 
				
			||||||
@@ -1144,8 +1289,8 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
						Conditions: []api.NodeCondition{
 | 
											Conditions: []api.NodeCondition{
 | 
				
			||||||
							{
 | 
												{
 | 
				
			||||||
								Type:               api.NodeReady,
 | 
													Type:               api.NodeReady,
 | 
				
			||||||
								Status:             api.ConditionNone,
 | 
													Status:             api.ConditionUnknown,
 | 
				
			||||||
								Reason:             fmt.Sprintf("Kubelet stop posting node status"),
 | 
													Reason:             fmt.Sprintf("Kubelet stopped posting node status"),
 | 
				
			||||||
								LastProbeTime:      util.Time{},
 | 
													LastProbeTime:      util.Time{},
 | 
				
			||||||
								LastTransitionTime: util.Time{},
 | 
													LastTransitionTime: util.Time{},
 | 
				
			||||||
							},
 | 
												},
 | 
				
			||||||
@@ -1180,7 +1325,6 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			expectedRequestCount: 1, // List
 | 
								expectedRequestCount: 1, // List
 | 
				
			||||||
			expectedEvictPods:    false,
 | 
					 | 
				
			||||||
			expectedNodes:        nil,
 | 
								expectedNodes:        nil,
 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -1209,15 +1353,6 @@ func TestMonitorNodeStatus(t *testing.T) {
 | 
				
			|||||||
		if !reflect.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) {
 | 
							if !reflect.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) {
 | 
				
			||||||
			t.Errorf("expected nodes %+v, got %+v", item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0])
 | 
								t.Errorf("expected nodes %+v, got %+v", item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0])
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		podEvicted := false
 | 
					 | 
				
			||||||
		for _, action := range item.fakeNodeHandler.Actions {
 | 
					 | 
				
			||||||
			if action.Action == "delete-pod" {
 | 
					 | 
				
			||||||
				podEvicted = true
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if item.expectedEvictPods != podEvicted {
 | 
					 | 
				
			||||||
			t.Errorf("expected pod eviction: %+v, got %+v", item.expectedEvictPods, podEvicted)
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -75,8 +75,18 @@ const (
 | 
				
			|||||||
	initialNodeStatusUpdateFrequency = 100 * time.Millisecond
 | 
						initialNodeStatusUpdateFrequency = 100 * time.Millisecond
 | 
				
			||||||
	nodeStatusUpdateFrequencyInc     = 500 * time.Millisecond
 | 
						nodeStatusUpdateFrequencyInc     = 500 * time.Millisecond
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Node status update frequency and retry count. Note: be cautious when changing nodeStatusUpdateFrequency,
 | 
						// Node status update frequency and retry count.
 | 
				
			||||||
	// it must work with nodecontroller.nodeMonitorGracePeriod.
 | 
						// Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod
 | 
				
			||||||
 | 
						// in nodecontroller. There are several constraints:
 | 
				
			||||||
 | 
						// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
 | 
				
			||||||
 | 
						//    N means number of retries allowed for kubelet to post node status. It is pointless
 | 
				
			||||||
 | 
						//    to make nodeMonitorGracePeriod be less than nodeStatusUpdateFrequency, since there
 | 
				
			||||||
 | 
						//    will only be fresh values from Kubelet at an interval of nodeStatusUpdateFrequency.
 | 
				
			||||||
 | 
						// 2. nodeMonitorGracePeriod can't be too large for user experience - larger value takes
 | 
				
			||||||
 | 
						//    longer for user to see up-to-date node status.
 | 
				
			||||||
 | 
						// 3. nodeStatusUpdateFrequency needs to be large enough for Kubelet to generate node
 | 
				
			||||||
 | 
						//    status. Kubelet may fail to update node status reliablly if the value is too small,
 | 
				
			||||||
 | 
						//    as it takes time to gather all necessary node information.
 | 
				
			||||||
	nodeStatusUpdateFrequency = 2 * time.Second
 | 
						nodeStatusUpdateFrequency = 2 * time.Second
 | 
				
			||||||
	nodeStatusUpdateRetry     = 5
 | 
						nodeStatusUpdateRetry     = 5
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
@@ -1837,20 +1847,23 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
 | 
				
			|||||||
		node.Spec.Capacity = CapacityFromMachineInfo(info)
 | 
							node.Spec.Capacity = CapacityFromMachineInfo(info)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						currentTime := util.Now()
 | 
				
			||||||
	newCondition := api.NodeCondition{
 | 
						newCondition := api.NodeCondition{
 | 
				
			||||||
		Type:          api.NodeReady,
 | 
							Type:          api.NodeReady,
 | 
				
			||||||
		Status:        api.ConditionFull,
 | 
							Status:        api.ConditionFull,
 | 
				
			||||||
		Reason:        fmt.Sprintf("kubelet is posting ready status"),
 | 
							Reason:        fmt.Sprintf("kubelet is posting ready status"),
 | 
				
			||||||
		LastProbeTime: util.Now(),
 | 
							LastProbeTime: currentTime,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	updated := false
 | 
						updated := false
 | 
				
			||||||
	for i := range node.Status.Conditions {
 | 
						for i := range node.Status.Conditions {
 | 
				
			||||||
		if node.Status.Conditions[i].Type == api.NodeReady {
 | 
							if node.Status.Conditions[i].Type == api.NodeReady {
 | 
				
			||||||
 | 
								newCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
 | 
				
			||||||
			node.Status.Conditions[i] = newCondition
 | 
								node.Status.Conditions[i] = newCondition
 | 
				
			||||||
			updated = true
 | 
								updated = true
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if !updated {
 | 
						if !updated {
 | 
				
			||||||
 | 
							newCondition.LastTransitionTime = currentTime
 | 
				
			||||||
		node.Status.Conditions = append(node.Status.Conditions, newCondition)
 | 
							node.Status.Conditions = append(node.Status.Conditions, newCondition)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -3106,6 +3106,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
 | 
				
			|||||||
					Status:             api.ConditionFull,
 | 
										Status:             api.ConditionFull,
 | 
				
			||||||
					Reason:             fmt.Sprintf("kubelet is posting ready status"),
 | 
										Reason:             fmt.Sprintf("kubelet is posting ready status"),
 | 
				
			||||||
					LastProbeTime:      util.Time{},
 | 
										LastProbeTime:      util.Time{},
 | 
				
			||||||
 | 
										LastTransitionTime: util.Time{},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			NodeInfo: api.NodeSystemInfo{
 | 
								NodeInfo: api.NodeSystemInfo{
 | 
				
			||||||
@@ -3128,7 +3129,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
 | 
				
			|||||||
	if updatedNode.Status.Conditions[0].LastProbeTime.IsZero() {
 | 
						if updatedNode.Status.Conditions[0].LastProbeTime.IsZero() {
 | 
				
			||||||
		t.Errorf("unexpected zero last probe timestamp")
 | 
							t.Errorf("unexpected zero last probe timestamp")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if updatedNode.Status.Conditions[0].LastTransitionTime.IsZero() {
 | 
				
			||||||
 | 
							t.Errorf("unexpected zero last transition timestamp")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	updatedNode.Status.Conditions[0].LastProbeTime = util.Time{}
 | 
						updatedNode.Status.Conditions[0].LastProbeTime = util.Time{}
 | 
				
			||||||
 | 
						updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
 | 
				
			||||||
	if !reflect.DeepEqual(expectedNode, updatedNode) {
 | 
						if !reflect.DeepEqual(expectedNode, updatedNode) {
 | 
				
			||||||
		t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
 | 
							t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -3155,6 +3160,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
 | 
				
			|||||||
						Status:             api.ConditionFull,
 | 
											Status:             api.ConditionFull,
 | 
				
			||||||
						Reason:             fmt.Sprintf("kubelet is posting ready status"),
 | 
											Reason:             fmt.Sprintf("kubelet is posting ready status"),
 | 
				
			||||||
						LastProbeTime:      util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
											LastProbeTime:      util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
 | 
											LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
@@ -3177,6 +3183,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
 | 
				
			|||||||
					Status:             api.ConditionFull,
 | 
										Status:             api.ConditionFull,
 | 
				
			||||||
					Reason:             fmt.Sprintf("kubelet is posting ready status"),
 | 
										Reason:             fmt.Sprintf("kubelet is posting ready status"),
 | 
				
			||||||
					LastProbeTime:      util.Time{}, // placeholder
 | 
										LastProbeTime:      util.Time{}, // placeholder
 | 
				
			||||||
 | 
										LastTransitionTime: util.Time{}, // placeholder
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			NodeInfo: api.NodeSystemInfo{
 | 
								NodeInfo: api.NodeSystemInfo{
 | 
				
			||||||
@@ -3196,11 +3203,16 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
 | 
				
			|||||||
	if !ok {
 | 
						if !ok {
 | 
				
			||||||
		t.Errorf("unexpected object type")
 | 
							t.Errorf("unexpected object type")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
 | 
				
			||||||
	if reflect.DeepEqual(updatedNode.Status.Conditions[0].LastProbeTime, util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)) {
 | 
						if reflect.DeepEqual(updatedNode.Status.Conditions[0].LastProbeTime, util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)) {
 | 
				
			||||||
		t.Errorf("expected \n%v\n, got \n%v", updatedNode.Status.Conditions[0].LastProbeTime,
 | 
							t.Errorf("expected \n%v\n, got \n%v", util.Now(), util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if !reflect.DeepEqual(updatedNode.Status.Conditions[0].LastTransitionTime, util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)) {
 | 
				
			||||||
 | 
							t.Errorf("expected \n%v\n, got \n%v", updatedNode.Status.Conditions[0].LastTransitionTime,
 | 
				
			||||||
			util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
 | 
								util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	updatedNode.Status.Conditions[0].LastProbeTime = util.Time{}
 | 
						updatedNode.Status.Conditions[0].LastProbeTime = util.Time{}
 | 
				
			||||||
 | 
						updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
 | 
				
			||||||
	if !reflect.DeepEqual(expectedNode, updatedNode) {
 | 
						if !reflect.DeepEqual(expectedNode, updatedNode) {
 | 
				
			||||||
		t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
 | 
							t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user