mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #105839 from nilo19/bug/cherry-pick-856
fix: remove VMSS and VMSS instances from SLB backend pool only when necessary
This commit is contained in:
		@@ -278,6 +278,8 @@ type Cloud struct {
 | 
			
		||||
	nodeResourceGroups map[string]string
 | 
			
		||||
	// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
 | 
			
		||||
	unmanagedNodes sets.String
 | 
			
		||||
	// excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
 | 
			
		||||
	excludeLoadBalancerNodes sets.String
 | 
			
		||||
	// nodeInformerSynced is for determining if the informer has synced.
 | 
			
		||||
	nodeInformerSynced cache.InformerSynced
 | 
			
		||||
 | 
			
		||||
@@ -343,6 +345,7 @@ func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) {
 | 
			
		||||
		nodeZones:                map[string]sets.String{},
 | 
			
		||||
		nodeResourceGroups:       map[string]string{},
 | 
			
		||||
		unmanagedNodes:           sets.NewString(),
 | 
			
		||||
		excludeLoadBalancerNodes: sets.NewString(),
 | 
			
		||||
		routeCIDRs:               map[string]string{},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -746,10 +749,6 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
 | 
			
		||||
		UpdateFunc: func(prev, obj interface{}) {
 | 
			
		||||
			prevNode := prev.(*v1.Node)
 | 
			
		||||
			newNode := obj.(*v1.Node)
 | 
			
		||||
			if newNode.Labels[v1.LabelFailureDomainBetaZone] ==
 | 
			
		||||
				prevNode.Labels[v1.LabelFailureDomainBetaZone] {
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
			if newNode.Labels[v1.LabelTopologyZone] ==
 | 
			
		||||
				prevNode.Labels[v1.LabelTopologyZone] {
 | 
			
		||||
				return
 | 
			
		||||
@@ -817,6 +816,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
 | 
			
		||||
		managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
 | 
			
		||||
		if ok && managed == "false" {
 | 
			
		||||
			az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
			az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Remove from excludeLoadBalancerNodes cache.
 | 
			
		||||
		if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
 | 
			
		||||
			az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -843,6 +848,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
 | 
			
		||||
		managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
 | 
			
		||||
		if ok && managed == "false" {
 | 
			
		||||
			az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
			az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Add to excludeLoadBalancerNodes cache.
 | 
			
		||||
		if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
 | 
			
		||||
			az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -948,16 +959,23 @@ func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
 | 
			
		||||
	return sets.NewString(az.unmanagedNodes.List()...), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
 | 
			
		||||
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
 | 
			
		||||
	labels := node.ObjectMeta.Labels
 | 
			
		||||
	if rg, ok := labels[externalResourceGroupLabel]; ok && !strings.EqualFold(rg, az.ResourceGroup) {
 | 
			
		||||
		return true
 | 
			
		||||
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
 | 
			
		||||
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
 | 
			
		||||
	// Kubelet won't set az.nodeInformerSynced, always return nil.
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
 | 
			
		||||
		return true
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false
 | 
			
		||||
	// Return true if the node is in external resource group.
 | 
			
		||||
	if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.excludeLoadBalancerNodes.Has(nodeName), nil
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -70,6 +70,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
 | 
			
		||||
		nodeInformerSynced:       func() bool { return true },
 | 
			
		||||
		nodeResourceGroups:       map[string]string{},
 | 
			
		||||
		unmanagedNodes:           sets.NewString(),
 | 
			
		||||
		excludeLoadBalancerNodes: sets.NewString(),
 | 
			
		||||
		routeCIDRs:               map[string]string{},
 | 
			
		||||
		eventRecorder:            &record.FakeRecorder{},
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -335,7 +335,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer,
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		// decouple the backendPool from the node
 | 
			
		||||
		err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
 | 
			
		||||
		err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
@@ -1075,15 +1075,6 @@ func (az *Cloud) findFrontendIPConfigOfService(
 | 
			
		||||
	return nil, false, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool {
 | 
			
		||||
	for _, node := range nodes {
 | 
			
		||||
		if strings.EqualFold(nodeName, node.Name) {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
 | 
			
		||||
// This also reconciles the Service's Ports  with the LoadBalancer config.
 | 
			
		||||
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
 | 
			
		||||
@@ -1139,7 +1130,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
 | 
			
		||||
						// would not be in the `nodes` slice. We need to check the nodes that
 | 
			
		||||
						// have been added to the LB's backendpool, find the unwanted ones and
 | 
			
		||||
						// delete them from the pool.
 | 
			
		||||
						if !nodeNameInNodes(nodeName, nodes) {
 | 
			
		||||
						shouldExcludeLoadBalancer, err := az.ShouldNodeExcludedFromLoadBalancer(nodeName)
 | 
			
		||||
						if err != nil {
 | 
			
		||||
							klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
 | 
			
		||||
							return nil, err
 | 
			
		||||
						}
 | 
			
		||||
						if shouldExcludeLoadBalancer {
 | 
			
		||||
							klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName)
 | 
			
		||||
							// construct a backendPool that only contains the IP config of the node to be deleted
 | 
			
		||||
							backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
 | 
			
		||||
@@ -1156,7 +1152,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
 | 
			
		||||
						}
 | 
			
		||||
						vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
 | 
			
		||||
						// decouple the backendPool from the node
 | 
			
		||||
						err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
 | 
			
		||||
						err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
 | 
			
		||||
						if err != nil {
 | 
			
		||||
							return nil, err
 | 
			
		||||
						}
 | 
			
		||||
@@ -1440,7 +1436,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
 | 
			
		||||
					// do nothing for availability set
 | 
			
		||||
					lb.BackendAddressPools = nil
 | 
			
		||||
				}
 | 
			
		||||
				err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
 | 
			
		||||
				err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
 | 
			
		||||
					return nil, err
 | 
			
		||||
 
 | 
			
		||||
@@ -915,7 +915,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if as.ShouldNodeExcludedFromLoadBalancer(node) {
 | 
			
		||||
		shouldExcludeLoadBalancer, err := as.ShouldNodeExcludedFromLoadBalancer(localNodeName)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		if shouldExcludeLoadBalancer {
 | 
			
		||||
			klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
@@ -940,7 +945,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
 | 
			
		||||
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
 | 
			
		||||
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
 | 
			
		||||
	// Returns nil if backend address pools already deleted.
 | 
			
		||||
	if backendAddressPools == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
 
 | 
			
		||||
@@ -34,6 +34,7 @@ import (
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
 | 
			
		||||
@@ -1414,6 +1415,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
 | 
			
		||||
		name           string
 | 
			
		||||
		service        *v1.Service
 | 
			
		||||
		nodes          []*v1.Node
 | 
			
		||||
		excludeLBNodes []string
 | 
			
		||||
		nodeName       string
 | 
			
		||||
		backendPoolID  string
 | 
			
		||||
		nicName        string
 | 
			
		||||
@@ -1442,6 +1444,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
 | 
			
		||||
			name:           "EnsureHostsInPool should skip if node is master node",
 | 
			
		||||
			service:        &v1.Service{},
 | 
			
		||||
			nodeName:       "vm2",
 | 
			
		||||
			excludeLBNodes: []string{"vm2"},
 | 
			
		||||
			nodes: []*v1.Node{
 | 
			
		||||
				{
 | 
			
		||||
					ObjectMeta: meta.ObjectMeta{
 | 
			
		||||
@@ -1458,6 +1461,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
 | 
			
		||||
			name:           "EnsureHostsInPool should skip if node is in external resource group",
 | 
			
		||||
			service:        &v1.Service{},
 | 
			
		||||
			nodeName:       "vm3",
 | 
			
		||||
			excludeLBNodes: []string{"vm3"},
 | 
			
		||||
			nodes: []*v1.Node{
 | 
			
		||||
				{
 | 
			
		||||
					ObjectMeta: meta.ObjectMeta{
 | 
			
		||||
@@ -1474,6 +1478,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
 | 
			
		||||
			name:           "EnsureHostsInPool should skip if node is unmanaged",
 | 
			
		||||
			service:        &v1.Service{},
 | 
			
		||||
			nodeName:       "vm4",
 | 
			
		||||
			excludeLBNodes: []string{"vm4"},
 | 
			
		||||
			nodes: []*v1.Node{
 | 
			
		||||
				{
 | 
			
		||||
					ObjectMeta: meta.ObjectMeta{
 | 
			
		||||
@@ -1518,6 +1523,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) {
 | 
			
		||||
	for _, test := range testCases {
 | 
			
		||||
		cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
 | 
			
		||||
		cloud.Config.ExcludeMasterFromStandardLB = to.BoolPtr(true)
 | 
			
		||||
		cloud.excludeLoadBalancerNodes = sets.NewString(test.excludeLBNodes...)
 | 
			
		||||
 | 
			
		||||
		testVM := buildDefaultTestVirtualMachine(availabilitySetID, []string{test.nicID})
 | 
			
		||||
		testNIC := buildDefaultTestInterface(false, []string{backendAddressPoolID})
 | 
			
		||||
@@ -1744,7 +1750,7 @@ func TestStandardEnsureBackendPoolDeleted(t *testing.T) {
 | 
			
		||||
		mockNICClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
 | 
			
		||||
		cloud.InterfacesClient = mockNICClient
 | 
			
		||||
 | 
			
		||||
		err := cloud.VMSet.EnsureBackendPoolDeleted(&service, backendPoolID, vmSetName, test.backendAddressPools)
 | 
			
		||||
		err := cloud.VMSet.EnsureBackendPoolDeleted(&service, backendPoolID, vmSetName, test.backendAddressPools, true)
 | 
			
		||||
		assert.NoError(t, err, test.desc)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -3070,17 +3070,6 @@ func TestCanCombineSharedAndPrivateRulesInSameGroup(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO: sanity check if the same IP address incorrectly gets put in twice?
 | 
			
		||||
// (shouldn't happen but...)
 | 
			
		||||
 | 
			
		||||
// func TestIfServiceIsEditedFromOwnRuleToSharedRuleThenOwnRuleIsDeletedAndSharedRuleIsCreated(t *testing.T) {
 | 
			
		||||
// 	t.Error()
 | 
			
		||||
// }
 | 
			
		||||
 | 
			
		||||
// func TestIfServiceIsEditedFromSharedRuleToOwnRuleThenItIsRemovedFromSharedRuleAndOwnRuleIsCreated(t *testing.T) {
 | 
			
		||||
// 	t.Error()
 | 
			
		||||
// }
 | 
			
		||||
 | 
			
		||||
func TestGetResourceGroupFromDiskURI(t *testing.T) {
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		diskURL        string
 | 
			
		||||
@@ -3245,6 +3234,7 @@ func TestUpdateNodeCaches(t *testing.T) {
 | 
			
		||||
	az.nodeZones = map[string]sets.String{zone: nodesInZone}
 | 
			
		||||
	az.nodeResourceGroups = map[string]string{"prevNode": "rg"}
 | 
			
		||||
	az.unmanagedNodes = sets.NewString("prevNode")
 | 
			
		||||
	az.excludeLoadBalancerNodes = sets.NewString("prevNode")
 | 
			
		||||
	az.nodeNames = sets.NewString("prevNode")
 | 
			
		||||
 | 
			
		||||
	prevNode := v1.Node{
 | 
			
		||||
@@ -3263,6 +3253,7 @@ func TestUpdateNodeCaches(t *testing.T) {
 | 
			
		||||
	assert.Equal(t, 0, len(az.nodeZones[zone]))
 | 
			
		||||
	assert.Equal(t, 0, len(az.nodeResourceGroups))
 | 
			
		||||
	assert.Equal(t, 0, len(az.unmanagedNodes))
 | 
			
		||||
	assert.Equal(t, 0, len(az.excludeLoadBalancerNodes))
 | 
			
		||||
	assert.Equal(t, 0, len(az.nodeNames))
 | 
			
		||||
 | 
			
		||||
	newNode := v1.Node{
 | 
			
		||||
@@ -3271,6 +3262,7 @@ func TestUpdateNodeCaches(t *testing.T) {
 | 
			
		||||
				v1.LabelTopologyZone:         zone,
 | 
			
		||||
				externalResourceGroupLabel:   "true",
 | 
			
		||||
				managedByAzureLabel:          "false",
 | 
			
		||||
				v1.LabelNodeExcludeBalancers: "true",
 | 
			
		||||
			},
 | 
			
		||||
			Name: "newNode",
 | 
			
		||||
		},
 | 
			
		||||
@@ -3280,6 +3272,7 @@ func TestUpdateNodeCaches(t *testing.T) {
 | 
			
		||||
	assert.Equal(t, 1, len(az.nodeZones[zone]))
 | 
			
		||||
	assert.Equal(t, 1, len(az.nodeResourceGroups))
 | 
			
		||||
	assert.Equal(t, 1, len(az.unmanagedNodes))
 | 
			
		||||
	assert.Equal(t, 1, len(az.excludeLoadBalancerNodes))
 | 
			
		||||
	assert.Equal(t, 1, len(az.nodeNames))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -65,7 +65,7 @@ type VMSet interface {
 | 
			
		||||
	// participating in the specified LoadBalancer Backend Pool.
 | 
			
		||||
	EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
 | 
			
		||||
	// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
 | 
			
		||||
	EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
 | 
			
		||||
	EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
 | 
			
		||||
 | 
			
		||||
	// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
 | 
			
		||||
	AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error
 | 
			
		||||
 
 | 
			
		||||
@@ -745,11 +745,16 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
 | 
			
		||||
		nodeName := nodes[nx].Name
 | 
			
		||||
		shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(nodeName)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		if shouldExcludeLoadBalancer {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		nodeName := nodes[nx].Name
 | 
			
		||||
		ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
@@ -1106,7 +1111,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if ss.ShouldNodeExcludedFromLoadBalancer(node) {
 | 
			
		||||
			shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(node.Name)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
			if shouldExcludeLoadBalancer {
 | 
			
		||||
				klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
@@ -1249,7 +1259,12 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if ss.ShouldNodeExcludedFromLoadBalancer(node) {
 | 
			
		||||
		shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		if shouldExcludeLoadBalancer {
 | 
			
		||||
			klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
@@ -1569,7 +1584,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
 | 
			
		||||
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
 | 
			
		||||
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
 | 
			
		||||
	// Returns nil if backend address pools already deleted.
 | 
			
		||||
	if backendAddressPools == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
@@ -1681,10 +1696,12 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Ensure the backendPoolID is also deleted on VMSS itself.
 | 
			
		||||
	if deleteFromVMSet {
 | 
			
		||||
		err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	isOperationSucceeded = true
 | 
			
		||||
	return nil
 | 
			
		||||
 
 | 
			
		||||
@@ -28,6 +28,7 @@ import (
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
 | 
			
		||||
@@ -1406,12 +1407,14 @@ func TestGetAgentPoolScaleSets(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		description       string
 | 
			
		||||
		excludeLBNodes    []string
 | 
			
		||||
		nodes             []*v1.Node
 | 
			
		||||
		expectedVMSSNames *[]string
 | 
			
		||||
		expectedErr       error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			description:    "getAgentPoolScaleSets should return the correct vmss names",
 | 
			
		||||
			excludeLBNodes: []string{"vmss-vm-000000", "vmss-vm-000001"},
 | 
			
		||||
			nodes: []*v1.Node{
 | 
			
		||||
				{
 | 
			
		||||
					ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
@@ -1433,11 +1436,30 @@ func TestGetAgentPoolScaleSets(t *testing.T) {
 | 
			
		||||
			},
 | 
			
		||||
			expectedVMSSNames: &[]string{"vmss"},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description:    "getAgentPoolScaleSets should return the correct vmss names",
 | 
			
		||||
			excludeLBNodes: []string{"vmss-vm-000001"},
 | 
			
		||||
			nodes: []*v1.Node{
 | 
			
		||||
				{
 | 
			
		||||
					ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
						Name:   "vmss-vm-000001",
 | 
			
		||||
						Labels: map[string]string{v1.LabelNodeExcludeBalancers: "true"},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
						Name: "vmss-vm-000002",
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedVMSSNames: &[]string{"vmss"},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testCases {
 | 
			
		||||
		ss, err := newTestScaleSet(ctrl)
 | 
			
		||||
		assert.NoError(t, err, "unexpected error when creating test VMSS")
 | 
			
		||||
		ss.excludeLoadBalancerNodes = sets.NewString(test.excludeLBNodes...)
 | 
			
		||||
 | 
			
		||||
		expectedVMSS := buildTestVMSS(testVMSSName, "vmss-vm-")
 | 
			
		||||
		mockVMSSClient := ss.cloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
 | 
			
		||||
@@ -2471,7 +2493,7 @@ func TestEnsureBackendPoolDeleted(t *testing.T) {
 | 
			
		||||
		mockVMSSVMClient.EXPECT().List(gomock.Any(), ss.ResourceGroup, testVMSSName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
 | 
			
		||||
		mockVMSSVMClient.EXPECT().UpdateVMs(gomock.Any(), ss.ResourceGroup, testVMSSName, gomock.Any(), gomock.Any()).Return(test.vmClientErr).Times(test.expectedVMSSVMPutTimes)
 | 
			
		||||
 | 
			
		||||
		err = ss.EnsureBackendPoolDeleted(&v1.Service{}, test.backendpoolID, testVMSSName, test.backendAddressPools)
 | 
			
		||||
		err = ss.EnsureBackendPoolDeleted(&v1.Service{}, test.backendpoolID, testVMSSName, test.backendAddressPools, true)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, test.description+", but an error occurs")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -2551,7 +2573,7 @@ func TestEnsureBackendPoolDeletedConcurrently(t *testing.T) {
 | 
			
		||||
		i := i
 | 
			
		||||
		id := id
 | 
			
		||||
		testFunc = append(testFunc, func() error {
 | 
			
		||||
			return ss.EnsureBackendPoolDeleted(&v1.Service{}, id, testVMSSNames[i], backendAddressPools)
 | 
			
		||||
			return ss.EnsureBackendPoolDeleted(&v1.Service{}, id, testVMSSNames[i], backendAddressPools, true)
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
	errs := utilerrors.AggregateGoroutines(testFunc...)
 | 
			
		||||
 
 | 
			
		||||
@@ -210,17 +210,17 @@ func (mr *MockVMSetMockRecorder) EnsureHostInPool(service, nodeName, backendPool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnsureBackendPoolDeleted mocks base method
 | 
			
		||||
func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
 | 
			
		||||
func (m *MockVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools)
 | 
			
		||||
	ret := m.ctrl.Call(m, "EnsureBackendPoolDeleted", service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
 | 
			
		||||
	ret0, _ := ret[0].(error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnsureBackendPoolDeleted indicates an expected call of EnsureBackendPoolDeleted
 | 
			
		||||
func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools interface{}) *gomock.Call {
 | 
			
		||||
func (mr *MockVMSetMockRecorder) EnsureBackendPoolDeleted(service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools)
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureBackendPoolDeleted", reflect.TypeOf((*MockVMSet)(nil).EnsureBackendPoolDeleted), service, backendPoolID, vmSetName, backendAddressPools, deleteFromVMSet)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AttachDisk mocks base method
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user