mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 12:18:16 +00:00 
			
		
		
		
	kubernetes: fix printf format errors
These are all flagged by Go 1.11's more accurate printf checking in go vet, which runs as part of go test. Lubomir I. Ivanov <neolit123@gmail.com> applied ammend for: pkg/cloudprovider/provivers/vsphere/nodemanager.go
This commit is contained in:
		
				
					committed by
					
						
						Lubomir I. Ivanov
					
				
			
			
				
	
			
			
			
						parent
						
							3b269e182d
						
					
				
				
					commit
					2bd91dda64
				
			@@ -97,7 +97,7 @@ func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// Make sure the node name we're passed will work with Kubelet
 | 
						// Make sure the node name we're passed will work with Kubelet
 | 
				
			||||||
	if opts.nodeRegOpts.Name != "" && opts.nodeRegOpts.Name != opts.defaultHostname {
 | 
						if opts.nodeRegOpts.Name != "" && opts.nodeRegOpts.Name != opts.defaultHostname {
 | 
				
			||||||
		glog.V(1).Info("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name)
 | 
							glog.V(1).Infof("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name)
 | 
				
			||||||
		kubeletFlags["hostname-override"] = opts.nodeRegOpts.Name
 | 
							kubeletFlags["hostname-override"] = opts.nodeRegOpts.Name
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -2358,7 +2358,7 @@ func (c *Cloud) GetVolumeLabels(volumeName KubernetesVolumeID) (map[string]strin
 | 
				
			|||||||
	labels := make(map[string]string)
 | 
						labels := make(map[string]string)
 | 
				
			||||||
	az := aws.StringValue(info.AvailabilityZone)
 | 
						az := aws.StringValue(info.AvailabilityZone)
 | 
				
			||||||
	if az == "" {
 | 
						if az == "" {
 | 
				
			||||||
		return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId)
 | 
							return nil, fmt.Errorf("volume did not have AZ information: %q", aws.StringValue(info.VolumeId))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	labels[kubeletapis.LabelZoneFailureDomain] = az
 | 
						labels[kubeletapis.LabelZoneFailureDomain] = az
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -119,7 +119,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type
 | 
				
			|||||||
	info, err := disk.describeVolume()
 | 
						info, err := disk.describeVolume()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Warning("Error describing volume %s with %v", diskName, err)
 | 
							glog.Warningf("Error describing volume %s with %v", diskName, err)
 | 
				
			||||||
		awsDiskInfo.volumeState = "unknown"
 | 
							awsDiskInfo.volumeState = "unknown"
 | 
				
			||||||
		return awsDiskInfo, false, err
 | 
							return awsDiskInfo, false, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -845,9 +845,9 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro
 | 
				
			|||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): start", resourceGroupName)
 | 
						glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName)
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q,%q): end", resourceGroupName)
 | 
							glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID)
 | 
						mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID)
 | 
				
			||||||
@@ -876,9 +876,9 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context,
 | 
				
			|||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
 | 
						glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs)
 | 
				
			||||||
	defer func() {
 | 
						defer func() {
 | 
				
			||||||
		glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%q): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
 | 
							glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs)
 | 
				
			||||||
	}()
 | 
						}()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID)
 | 
						mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -262,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
 | 
				
			|||||||
func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
 | 
					func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) {
 | 
				
			||||||
	isInternal := requiresInternalLoadBalancer(service)
 | 
						isInternal := requiresInternalLoadBalancer(service)
 | 
				
			||||||
	serviceName := getServiceName(service)
 | 
						serviceName := getServiceName(service)
 | 
				
			||||||
	glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal)
 | 
						glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
 | 
				
			||||||
	vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
 | 
						vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
 | 
							glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
 | 
				
			||||||
@@ -842,7 +842,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
 | 
				
			|||||||
// This entails adding required, missing SecurityRules and removing stale rules.
 | 
					// This entails adding required, missing SecurityRules and removing stale rules.
 | 
				
			||||||
func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
 | 
					func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) {
 | 
				
			||||||
	serviceName := getServiceName(service)
 | 
						serviceName := getServiceName(service)
 | 
				
			||||||
	glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q lbName=%q", serviceName, clusterName)
 | 
						glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ports := service.Spec.Ports
 | 
						ports := service.Spec.Ports
 | 
				
			||||||
	if ports == nil {
 | 
						if ports == nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -475,7 +475,7 @@ func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineSca
 | 
				
			|||||||
			glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
 | 
								glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr)
 | 
				
			||||||
			return false, nil
 | 
								return false, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		glog.V(4).Info("backoff: success for scale set %q", name)
 | 
							glog.V(4).Infof("backoff: success for scale set %q", name)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if cached != nil {
 | 
							if cached != nil {
 | 
				
			||||||
			exists = true
 | 
								exists = true
 | 
				
			||||||
@@ -845,7 +845,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
				ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
 | 
									ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID)
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it")
 | 
										glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID)
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -99,7 +99,7 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
			for _, vm := range vms {
 | 
								for _, vm := range vms {
 | 
				
			||||||
				if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
 | 
									if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
 | 
				
			||||||
					glog.Warningf("failed to get computerName for vmssVM (%q)", vm.Name)
 | 
										glog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -169,7 +169,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) {
 | 
				
			|||||||
	if am.isManagedAddress(addr) {
 | 
						if am.isManagedAddress(addr) {
 | 
				
			||||||
		// The address with this name is checked at the beginning of 'HoldAddress()', but for some reason
 | 
							// The address with this name is checked at the beginning of 'HoldAddress()', but for some reason
 | 
				
			||||||
		// it was re-created by this point. May be possible that two controllers are running.
 | 
							// it was re-created by this point. May be possible that two controllers are running.
 | 
				
			||||||
		glog.Warning("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
 | 
							glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP)
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		// If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it.
 | 
							// If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it.
 | 
				
			||||||
		glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)
 | 
							glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -176,13 +176,13 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error {
 | 
				
			|||||||
				defer cancel()
 | 
									defer cancel()
 | 
				
			||||||
				vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID)
 | 
									vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID)
 | 
				
			||||||
				if err != nil {
 | 
									if err != nil {
 | 
				
			||||||
					glog.V(4).Infof("Error %q while looking for vm=%+v in vc=%s and datacenter=%s",
 | 
										glog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v",
 | 
				
			||||||
						err, node.Name, vm, res.vc, res.datacenter.Name())
 | 
											vm, res.vc, res.datacenter.Name(), err)
 | 
				
			||||||
					if err != vclib.ErrNoVMFound {
 | 
										if err != vclib.ErrNoVMFound {
 | 
				
			||||||
						setGlobalErr(err)
 | 
											setGlobalErr(err)
 | 
				
			||||||
					} else {
 | 
										} else {
 | 
				
			||||||
						glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
 | 
											glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s",
 | 
				
			||||||
							node.Name, res.vc, res.datacenter.Name(), err)
 | 
												node.Name, res.vc, res.datacenter.Name())
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
					continue
 | 
										continue
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
@@ -309,7 +309,7 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
 | 
				
			|||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			return nil, err
 | 
								return nil, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		glog.V(4).Infof("Updated NodeInfo %q for node %q.", nodeInfo, nodeName)
 | 
							glog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName)
 | 
				
			||||||
		nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID})
 | 
							nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID})
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nodeDetails, nil
 | 
						return nodeDetails, nil
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -102,7 +102,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
 | 
				
			|||||||
	dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
 | 
						dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		// Create a dummy VM
 | 
							// Create a dummy VM
 | 
				
			||||||
		glog.V(1).Info("Creating Dummy VM: %q", dummyVMFullName)
 | 
							glog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName)
 | 
				
			||||||
		dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
 | 
							dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			glog.Errorf("Failed to create Dummy VM. err: %v", err)
 | 
								glog.Errorf("Failed to create Dummy VM. err: %v", err)
 | 
				
			||||||
@@ -132,7 +132,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
 | 
				
			|||||||
		fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
 | 
							fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err)
 | 
				
			||||||
		if fileAlreadyExist {
 | 
							if fileAlreadyExist {
 | 
				
			||||||
			//Skip error and continue to detach the disk as the disk was already created on the datastore.
 | 
								//Skip error and continue to detach the disk as the disk was already created on the datastore.
 | 
				
			||||||
			glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath)
 | 
								glog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath)
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
 | 
								glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err)
 | 
				
			||||||
			return "", err
 | 
								return "", err
 | 
				
			||||||
@@ -143,7 +143,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto
 | 
				
			|||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
 | 
							if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist {
 | 
				
			||||||
			// Skip error if disk was already detached from the dummy VM but still present on the datastore.
 | 
								// Skip error if disk was already detached from the dummy VM but still present on the datastore.
 | 
				
			||||||
			glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath)
 | 
								glog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath)
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
 | 
								glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err)
 | 
				
			||||||
			return "", err
 | 
								return "", err
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -846,7 +846,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
 | 
				
			|||||||
			if err == nil {
 | 
								if err == nil {
 | 
				
			||||||
				glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
 | 
									glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
 | 
				
			||||||
				diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
 | 
									diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
 | 
				
			||||||
				glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
 | 
									glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -963,7 +963,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
 | 
				
			|||||||
				volPath,
 | 
									volPath,
 | 
				
			||||||
				vSphereInstance)
 | 
									vSphereInstance)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		glog.V(4).Infof("DiskIsAttached result: %q and error: %q, for volume: %q", attached, err, volPath)
 | 
							glog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath)
 | 
				
			||||||
		return attached, err
 | 
							return attached, err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	requestTime := time.Now()
 | 
						requestTime := time.Now()
 | 
				
			||||||
@@ -1054,7 +1054,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
 | 
				
			|||||||
			return nodesToRetry, nil
 | 
								return nodesToRetry, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
 | 
							glog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes)
 | 
				
			||||||
		// Create context
 | 
							// Create context
 | 
				
			||||||
		ctx, cancel := context.WithCancel(context.Background())
 | 
							ctx, cancel := context.WithCancel(context.Background())
 | 
				
			||||||
		defer cancel()
 | 
							defer cancel()
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -479,7 +479,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
 | 
				
			|||||||
			return nodesToRetry, err
 | 
								return nodesToRetry, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		nodeUUID = strings.ToLower(nodeUUID)
 | 
							nodeUUID = strings.ToLower(nodeUUID)
 | 
				
			||||||
		glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %s", nodeName, nodeUUID, vmMoMap)
 | 
							glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
 | 
				
			||||||
		vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
 | 
							vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return nodesToRetry, nil
 | 
						return nodesToRetry, nil
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -460,7 +460,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	switch {
 | 
						switch {
 | 
				
			||||||
	case len(solid) != 0:
 | 
						case len(solid) != 0:
 | 
				
			||||||
		glog.V(2).Infof("object %s has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
 | 
							glog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
 | 
				
			||||||
		if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
 | 
							if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
 | 
				
			||||||
			return nil
 | 
								return nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -505,7 +505,7 @@ func (adc *attachDetachController) processVolumesInUse(
 | 
				
			|||||||
		err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted)
 | 
							err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			glog.Warningf(
 | 
								glog.Warningf(
 | 
				
			||||||
				"SetVolumeMountedByNode(%q, %q, %q) returned an error: %v",
 | 
									"SetVolumeMountedByNode(%q, %q, %v) returned an error: %v",
 | 
				
			||||||
				attachedVolume.VolumeName, nodeName, mounted, err)
 | 
									attachedVolume.VolumeName, nodeName, mounted, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -68,7 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		glog.V(10).Infof(
 | 
							glog.V(10).Infof(
 | 
				
			||||||
			"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
 | 
								"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
 | 
				
			||||||
			volumeSpec.Name,
 | 
								volumeSpec.Name(),
 | 
				
			||||||
			pvName,
 | 
								pvName,
 | 
				
			||||||
			podNamespace,
 | 
								podNamespace,
 | 
				
			||||||
			pvcSource.ClaimName,
 | 
								pvcSource.ClaimName,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -163,7 +163,7 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
 | 
				
			|||||||
	pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
 | 
						pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
 | 
				
			||||||
	_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
 | 
						_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name)
 | 
							glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
 | 
				
			||||||
		return err
 | 
							return err
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)
 | 
						glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -277,7 +277,7 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
 | 
				
			|||||||
	case *mountMode == v1.MountPropagationNone:
 | 
						case *mountMode == v1.MountPropagationNone:
 | 
				
			||||||
		return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
 | 
							return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		return 0, fmt.Errorf("invalid MountPropagation mode: %q", mountMode)
 | 
							return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -513,7 +513,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		glog.V(5).Infof(
 | 
							glog.V(5).Infof(
 | 
				
			||||||
			"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
 | 
								"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
 | 
				
			||||||
			volumeSpec.Name,
 | 
								volumeSpec.Name(),
 | 
				
			||||||
			pvName,
 | 
								pvName,
 | 
				
			||||||
			podNamespace,
 | 
								podNamespace,
 | 
				
			||||||
			pvcSource.ClaimName,
 | 
								pvcSource.ClaimName,
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -581,7 +581,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
 | 
				
			|||||||
		err = ipset.DestroySet(set.name)
 | 
							err = ipset.DestroySet(set.name)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			if !utilipset.IsNotFoundError(err) {
 | 
								if !utilipset.IsNotFoundError(err) {
 | 
				
			||||||
				glog.Errorf("Error removing ipset %s, error: %v", set, err)
 | 
									glog.Errorf("Error removing ipset %s, error: %v", set.name, err)
 | 
				
			||||||
				encounteredError = true
 | 
									encounteredError = true
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -552,7 +552,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if len(pttype) > 0 {
 | 
						if len(pttype) > 0 {
 | 
				
			||||||
		glog.V(4).Infof("Disk %s detected partition table type: %s", pttype)
 | 
							glog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype)
 | 
				
			||||||
		// Returns a special non-empty string as filesystem type, then kubelet
 | 
							// Returns a special non-empty string as filesystem type, then kubelet
 | 
				
			||||||
		// will not format it.
 | 
							// will not format it.
 | 
				
			||||||
		return "unknown data, probably partitions", nil
 | 
							return "unknown data, probably partitions", nil
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -610,7 +610,7 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID
 | 
				
			|||||||
		return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
 | 
							return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID")
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
 | 
						glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return expected.isAttached, expected.instanceID, expected.ret
 | 
						return expected.isAttached, expected.instanceID, expected.ret
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -365,7 +365,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri
 | 
				
			|||||||
		if err.Error() != volumepathhandler.ErrDeviceNotFound {
 | 
							if err.Error() != volumepathhandler.ErrDeviceNotFound {
 | 
				
			||||||
			return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
 | 
								return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		glog.Warning("fc: loopback for destination path: %s not found", dstPath)
 | 
							glog.Warningf("fc: loopback for destination path: %s not found", dstPath)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Detach volume from kubelet node
 | 
						// Detach volume from kubelet node
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -516,7 +516,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string)
 | 
				
			|||||||
		if err.Error() != volumepathhandler.ErrDeviceNotFound {
 | 
							if err.Error() != volumepathhandler.ErrDeviceNotFound {
 | 
				
			||||||
			return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
 | 
								return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		glog.Warning("iscsi: loopback for device: %s not found", device)
 | 
							glog.Warningf("iscsi: loopback for device: %s not found", device)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	// Detach a volume from kubelet node
 | 
						// Detach a volume from kubelet node
 | 
				
			||||||
	err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found)
 | 
						err = util.detachISCSIDisk(c.exec, portals, iqn, iface, volName, initiatorName, found)
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -211,7 +211,7 @@ func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource
 | 
				
			|||||||
	newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB)
 | 
						newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB)
 | 
				
			||||||
	if vol.Spec.Size >= newSizeInBytes {
 | 
						if vol.Spec.Size >= newSizeInBytes {
 | 
				
			||||||
		glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
 | 
							glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
 | 
				
			||||||
			"requested size: %d. Skipping resize.", vol.Spec.Size, newSizeInBytes)
 | 
								"requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes)
 | 
				
			||||||
		return nil
 | 
							return nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -140,7 +140,7 @@ func getDefaultClass(lister storagelisters.StorageClassLister) (*storage.Storage
 | 
				
			|||||||
		return nil, nil
 | 
							return nil, nil
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if len(defaultClasses) > 1 {
 | 
						if len(defaultClasses) > 1 {
 | 
				
			||||||
		glog.V(4).Infof("getDefaultClass %s defaults found", len(defaultClasses))
 | 
							glog.V(4).Infof("getDefaultClass %d defaults found", len(defaultClasses))
 | 
				
			||||||
		return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses)))
 | 
							return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses)))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return defaultClasses[0], nil
 | 
						return defaultClasses[0], nil
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -166,5 +166,5 @@ func NewInternalError(reason string) InternalError {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewInternalErrorf(format string, a ...interface{}) InternalError {
 | 
					func NewInternalErrorf(format string, a ...interface{}) InternalError {
 | 
				
			||||||
	return InternalError{fmt.Sprintf(format, a)}
 | 
						return InternalError{fmt.Sprintf(format, a...)}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user