mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #59727 from wgliang/master.time
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. should use time.Since instead of time.Now().Sub **What this PR does / why we need it**: should use time.Since instead of time.Now().Sub **Special notes for your reviewer**:
This commit is contained in:
		@@ -45,7 +45,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat
 | 
				
			|||||||
	return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
 | 
						return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
 | 
				
			||||||
		start := time.Now()
 | 
							start := time.Now()
 | 
				
			||||||
		gce.operationPollRateLimiter.Accept()
 | 
							gce.operationPollRateLimiter.Accept()
 | 
				
			||||||
		duration := time.Now().Sub(start)
 | 
							duration := time.Since(start)
 | 
				
			||||||
		if duration > 5*time.Second {
 | 
							if duration > 5*time.Second {
 | 
				
			||||||
			glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
 | 
								glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
@@ -57,7 +57,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		done := opIsDone(pollOp)
 | 
							done := opIsDone(pollOp)
 | 
				
			||||||
		if done {
 | 
							if done {
 | 
				
			||||||
			duration := time.Now().Sub(opStart)
 | 
								duration := time.Since(opStart)
 | 
				
			||||||
			if duration > 1*time.Minute {
 | 
								if duration > 1*time.Minute {
 | 
				
			||||||
				// Log the JSON. It's cleaner than the %v structure.
 | 
									// Log the JSON. It's cleaner than the %v structure.
 | 
				
			||||||
				enc, err := pollOp.MarshalJSON()
 | 
									enc, err := pollOp.MarshalJSON()
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool {
 | 
				
			|||||||
func (tc *TokenCleaner) syncFunc(key string) error {
 | 
					func (tc *TokenCleaner) syncFunc(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -169,7 +169,7 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
 | 
				
			|||||||
func (cc *CertificateController) syncFunc(key string) error {
 | 
					func (cc *CertificateController) syncFunc(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	csr, err := cc.csrLister.Get(key)
 | 
						csr, err := cc.csrLister.Get(key)
 | 
				
			||||||
	if errors.IsNotFound(err) {
 | 
						if errors.IsNotFound(err) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1135,7 +1135,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
 | 
				
			|||||||
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
 | 
					func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool {
 | 
				
			|||||||
func (dc *DisruptionController) sync(key string) error {
 | 
					func (dc *DisruptionController) sync(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -385,7 +385,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
 | 
				
			|||||||
func (e *EndpointController) syncService(key string) error {
 | 
					func (e *EndpointController) syncService(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -434,7 +434,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
 | 
				
			|||||||
func (jm *JobController) syncJob(key string) (bool, error) {
 | 
					func (jm *JobController) syncJob(key string) (bool, error) {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ns, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						ns, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -163,7 +163,7 @@ func (nm *NamespaceController) worker() {
 | 
				
			|||||||
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
 | 
					func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, err := nm.lister.Get(key)
 | 
						namespace, err := nm.lister.Get(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -297,7 +297,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
 | 
				
			|||||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
 | 
					func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -173,13 +173,13 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
 | 
				
			|||||||
					// Ensure that we don't have more than maxConcurrentRouteCreations
 | 
										// Ensure that we don't have more than maxConcurrentRouteCreations
 | 
				
			||||||
					// CreateRoute calls in flight.
 | 
										// CreateRoute calls in flight.
 | 
				
			||||||
					rateLimiter <- struct{}{}
 | 
										rateLimiter <- struct{}{}
 | 
				
			||||||
					glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
 | 
										glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
 | 
				
			||||||
					err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
 | 
										err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
 | 
				
			||||||
					<-rateLimiter
 | 
										<-rateLimiter
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					rc.updateNetworkingCondition(nodeName, err == nil)
 | 
										rc.updateNetworkingCondition(nodeName, err == nil)
 | 
				
			||||||
					if err != nil {
 | 
										if err != nil {
 | 
				
			||||||
						msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err)
 | 
											msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
 | 
				
			||||||
						if rc.recorder != nil {
 | 
											if rc.recorder != nil {
 | 
				
			||||||
							rc.recorder.Eventf(
 | 
												rc.recorder.Eventf(
 | 
				
			||||||
								&v1.ObjectReference{
 | 
													&v1.ObjectReference{
 | 
				
			||||||
@@ -218,9 +218,9 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
 | 
				
			|||||||
					defer wg.Done()
 | 
										defer wg.Done()
 | 
				
			||||||
					glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
 | 
										glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
 | 
				
			||||||
					if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
 | 
										if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
 | 
				
			||||||
						glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime), err)
 | 
											glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
 | 
				
			||||||
					} else {
 | 
										} else {
 | 
				
			||||||
						glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime))
 | 
											glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
				}(route, time.Now())
 | 
									}(route, time.Now())
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool {
 | 
				
			|||||||
func (c *ServiceAccountsController) syncNamespace(key string) error {
 | 
					func (c *ServiceAccountsController) syncNamespace(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ns, err := c.nsLister.Get(key)
 | 
						ns, err := c.nsLister.Get(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -415,7 +415,7 @@ func (ssc *StatefulSetController) worker() {
 | 
				
			|||||||
func (ssc *StatefulSetController) sync(key string) error {
 | 
					func (ssc *StatefulSetController) sync(key string) error {
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
						namespace, name, err := cache.SplitMetaNamespaceKey(key)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -145,7 +145,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
 | 
				
			|||||||
	glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
 | 
						glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
 | 
						pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -123,7 +123,7 @@ func (c *Controller) processPV(pvName string) error {
 | 
				
			|||||||
	glog.V(4).Infof("Processing PV %s", pvName)
 | 
						glog.V(4).Infof("Processing PV %s", pvName)
 | 
				
			||||||
	startTime := time.Now()
 | 
						startTime := time.Now()
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Now().Sub(startTime))
 | 
							glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pv, err := c.pvLister.Get(pvName)
 | 
						pv, err := c.pvLister.Get(pvName)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -337,7 +337,7 @@ func (p *progressReporter) start() {
 | 
				
			|||||||
			case <-ticker.C:
 | 
								case <-ticker.C:
 | 
				
			||||||
				progress, timestamp := p.progress.get()
 | 
									progress, timestamp := p.progress.get()
 | 
				
			||||||
				// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
 | 
									// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
 | 
				
			||||||
				if time.Now().Sub(timestamp) > p.imagePullProgressDeadline {
 | 
									if time.Since(timestamp) > p.imagePullProgressDeadline {
 | 
				
			||||||
					glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
 | 
										glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
 | 
				
			||||||
					p.cancel()
 | 
										p.cancel()
 | 
				
			||||||
					return
 | 
										return
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -503,7 +503,8 @@ func translateTimestamp(timestamp metav1.Time) string {
 | 
				
			|||||||
	if timestamp.IsZero() {
 | 
						if timestamp.IsZero() {
 | 
				
			||||||
		return "<unknown>"
 | 
							return "<unknown>"
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return duration.ShortHumanDuration(time.Now().Sub(timestamp.Time))
 | 
					
 | 
				
			||||||
 | 
						return duration.ShortHumanDuration(time.Since(timestamp.Time))
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var (
 | 
					var (
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -159,7 +159,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		if !sessionAffinityReset {
 | 
							if !sessionAffinityReset {
 | 
				
			||||||
			sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
 | 
								sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
 | 
				
			||||||
			if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
 | 
								if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
 | 
				
			||||||
				// Affinity wins.
 | 
									// Affinity wins.
 | 
				
			||||||
				endpoint := sessionAffinity.endpoint
 | 
									endpoint := sessionAffinity.endpoint
 | 
				
			||||||
				sessionAffinity.lastUsed = time.Now()
 | 
									sessionAffinity.lastUsed = time.Now()
 | 
				
			||||||
@@ -378,7 +378,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
 | 
				
			|||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for ip, affinity := range state.affinity.affinityMap {
 | 
						for ip, affinity := range state.affinity.affinityMap {
 | 
				
			||||||
		if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
 | 
							if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
 | 
				
			||||||
			glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
 | 
								glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
 | 
				
			||||||
			delete(state.affinity.affinityMap, ip)
 | 
								delete(state.affinity.affinityMap, ip)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -149,7 +149,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
 | 
				
			|||||||
		}
 | 
							}
 | 
				
			||||||
		if !sessionAffinityReset {
 | 
							if !sessionAffinityReset {
 | 
				
			||||||
			sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
 | 
								sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
 | 
				
			||||||
			if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
 | 
								if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
 | 
				
			||||||
				// Affinity wins.
 | 
									// Affinity wins.
 | 
				
			||||||
				endpoint := sessionAffinity.endpoint
 | 
									endpoint := sessionAffinity.endpoint
 | 
				
			||||||
				sessionAffinity.lastUsed = time.Now()
 | 
									sessionAffinity.lastUsed = time.Now()
 | 
				
			||||||
@@ -366,7 +366,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
 | 
				
			|||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	for ip, affinity := range state.affinity.affinityMap {
 | 
						for ip, affinity := range state.affinity.affinityMap {
 | 
				
			||||||
		if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
 | 
							if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
 | 
				
			||||||
			glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
 | 
								glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
 | 
				
			||||||
			delete(state.affinity.affinityMap, ip)
 | 
								delete(state.affinity.affinityMap, ip)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -399,7 +399,7 @@ func (l *SSHTunnelList) Dial(net, addr string) (net.Conn, error) {
 | 
				
			|||||||
	id := mathrand.Int63() // So you can match begins/ends in the log.
 | 
						id := mathrand.Int63() // So you can match begins/ends in the log.
 | 
				
			||||||
	glog.Infof("[%x: %v] Dialing...", id, addr)
 | 
						glog.Infof("[%x: %v] Dialing...", id, addr)
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Now().Sub(start))
 | 
							glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start))
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
	tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0])
 | 
						tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0])
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -215,7 +215,7 @@ func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc re
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		routeFunc(request, response)
 | 
							routeFunc(request, response)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now))
 | 
							MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -237,7 +237,7 @@ func InstrumentHandlerFunc(verb, resource, subresource, scope string, handler ht
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		handler(w, req)
 | 
							handler(w, req)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now))
 | 
							MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -95,7 +95,7 @@ func (c *cachedGetter) Token() (string, error) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	var err error
 | 
						var err error
 | 
				
			||||||
	// no token or exceeds the TTL
 | 
						// no token or exceeds the TTL
 | 
				
			||||||
	if c.token == "" || time.Now().Sub(c.born) > c.ttl {
 | 
						if c.token == "" || time.Since(c.born) > c.ttl {
 | 
				
			||||||
		c.token, err = c.tokenGetter.Token()
 | 
							c.token, err = c.tokenGetter.Token()
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return "", fmt.Errorf("failed to get token: %s", err)
 | 
								return "", fmt.Errorf("failed to get token: %s", err)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -1141,7 +1141,7 @@ func deleteNS(c clientset.Interface, dynamicClient dynamic.DynamicInterface, nam
 | 
				
			|||||||
		// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
 | 
							// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
 | 
				
			||||||
		return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
 | 
							return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime))
 | 
						Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime))
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -3053,13 +3053,13 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte
 | 
				
			|||||||
	if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil {
 | 
						if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil {
 | 
				
			||||||
		return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
 | 
							return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	deleteTime := time.Now().Sub(startTime)
 | 
						deleteTime := time.Since(startTime)
 | 
				
			||||||
	Logf("Deleting %v %s took: %v", kind, name, deleteTime)
 | 
						Logf("Deleting %v %s took: %v", kind, name, deleteTime)
 | 
				
			||||||
	err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
 | 
						err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
 | 
							return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	terminatePodTime := time.Now().Sub(startTime) - deleteTime
 | 
						terminatePodTime := time.Since(startTime) - deleteTime
 | 
				
			||||||
	Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
 | 
						Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
 | 
				
			||||||
	// this is to relieve namespace controller's pressure when deleting the
 | 
						// this is to relieve namespace controller's pressure when deleting the
 | 
				
			||||||
	// namespace after a test.
 | 
						// namespace after a test.
 | 
				
			||||||
@@ -3067,7 +3067,7 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
 | 
							return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	gcPodTime := time.Now().Sub(startTime) - terminatePodTime
 | 
						gcPodTime := time.Since(startTime) - terminatePodTime
 | 
				
			||||||
	Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
 | 
						Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@@ -3105,7 +3105,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
 | 
				
			|||||||
	if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
 | 
						if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	deleteTime := time.Now().Sub(startTime)
 | 
						deleteTime := time.Since(startTime)
 | 
				
			||||||
	Logf("Deleting %v %s took: %v", kind, name, deleteTime)
 | 
						Logf("Deleting %v %s took: %v", kind, name, deleteTime)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var interval, timeout time.Duration
 | 
						var interval, timeout time.Duration
 | 
				
			||||||
@@ -3129,7 +3129,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
 | 
							return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	terminatePodTime := time.Now().Sub(startTime) - deleteTime
 | 
						terminatePodTime := time.Since(startTime) - deleteTime
 | 
				
			||||||
	Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
 | 
						Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = waitForPodsGone(ps, interval, 10*time.Minute)
 | 
						err = waitForPodsGone(ps, interval, 10*time.Minute)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -276,7 +276,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
 | 
				
			|||||||
	logStopCh := make(chan struct{})
 | 
						logStopCh := make(chan struct{})
 | 
				
			||||||
	go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
 | 
						go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
 | 
				
			||||||
	wg.Wait()
 | 
						wg.Wait()
 | 
				
			||||||
	startupTime := time.Now().Sub(startTime)
 | 
						startupTime := time.Since(startTime)
 | 
				
			||||||
	close(logStopCh)
 | 
						close(logStopCh)
 | 
				
			||||||
	framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
 | 
						framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
 | 
				
			||||||
	framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
 | 
						framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -29,11 +29,11 @@ func main() {
 | 
				
			|||||||
	started := time.Now()
 | 
						started := time.Now()
 | 
				
			||||||
	http.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
 | 
						http.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
 | 
				
			||||||
		w.WriteHeader(200)
 | 
							w.WriteHeader(200)
 | 
				
			||||||
		data := (time.Now().Sub(started)).String()
 | 
							data := (time.Since(started)).String()
 | 
				
			||||||
		w.Write([]byte(data))
 | 
							w.Write([]byte(data))
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
	http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
 | 
						http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
 | 
				
			||||||
		duration := time.Now().Sub(started)
 | 
							duration := time.Since(started)
 | 
				
			||||||
		if duration.Seconds() > 10 {
 | 
							if duration.Seconds() > 10 {
 | 
				
			||||||
			w.WriteHeader(500)
 | 
								w.WriteHeader(500)
 | 
				
			||||||
			w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))
 | 
								w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -45,7 +45,7 @@ func main() {
 | 
				
			|||||||
	duration := time.Duration(*durationSec) * time.Second
 | 
						duration := time.Duration(*durationSec) * time.Second
 | 
				
			||||||
	start := time.Now()
 | 
						start := time.Now()
 | 
				
			||||||
	first := systemstat.GetProcCPUSample()
 | 
						first := systemstat.GetProcCPUSample()
 | 
				
			||||||
	for time.Now().Sub(start) < duration {
 | 
						for time.Since(start) < duration {
 | 
				
			||||||
		cpu := systemstat.GetProcCPUAverage(first, systemstat.GetProcCPUSample(), systemstat.GetUptime().Uptime)
 | 
							cpu := systemstat.GetProcCPUAverage(first, systemstat.GetProcCPUSample(), systemstat.GetUptime().Uptime)
 | 
				
			||||||
		if cpu.TotalPct < millicoresPct {
 | 
							if cpu.TotalPct < millicoresPct {
 | 
				
			||||||
			doSomething()
 | 
								doSomething()
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user