mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 18:28:13 +00:00 
			
		
		
		
	Merge pull request #45523 from colemickens/cmpr-cpfix3
Automatic merge from submit-queue azure: load balancer: support UDP, fix multiple loadBalancerSourceRanges support, respect sessionAffinity **What this PR does / why we need it**: 1. Adds support for UDP ports 2. Fixes support for multiple `loadBalancerSourceRanges` 3. Adds support the Service spec's `sessionAffinity` 4. Removes dead code from the Instances file **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #43683 **Special notes for your reviewer**: n/a **Release note**: ```release-note azure: add support for UDP ports azure: fix support for multiple `loadBalancerSourceRanges` azure: support the Service spec's `sessionAffinity` ```
This commit is contained in:
		| @@ -54,7 +54,6 @@ go_test( | ||||
|     deps = [ | ||||
|         "//pkg/api/v1:go_default_library", | ||||
|         "//pkg/api/v1/service:go_default_library", | ||||
|         "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", | ||||
|         "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", | ||||
|         "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", | ||||
|         "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", | ||||
|   | ||||
| @@ -19,7 +19,6 @@ package azure | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"regexp" | ||||
|  | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	"k8s.io/kubernetes/pkg/cloudprovider" | ||||
| @@ -123,24 +122,6 @@ func (az *Cloud) listAllNodesInResourceGroup() ([]compute.VirtualMachine, error) | ||||
|  | ||||
| } | ||||
|  | ||||
| func filterNodes(nodes []compute.VirtualMachine, filter string) ([]compute.VirtualMachine, error) { | ||||
| 	filteredNodes := []compute.VirtualMachine{} | ||||
|  | ||||
| 	re, err := regexp.Compile(filter) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	for _, node := range nodes { | ||||
| 		// search tags | ||||
| 		if re.MatchString(*node.Name) { | ||||
| 			filteredNodes = append(filteredNodes, node) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return filteredNodes, nil | ||||
| } | ||||
|  | ||||
| // mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name | ||||
| // This is a simple string cast. | ||||
| func mapNodeNameToVMName(nodeName types.NodeName) string { | ||||
|   | ||||
| @@ -503,10 +503,11 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration | ||||
| 	} else { | ||||
| 		ports = []v1.ServicePort{} | ||||
| 	} | ||||
| 	expectedProbes := make([]network.Probe, len(ports)) | ||||
| 	expectedRules := make([]network.LoadBalancingRule, len(ports)) | ||||
| 	for i, port := range ports { | ||||
| 		lbRuleName := getRuleName(service, port) | ||||
|  | ||||
| 	var expectedProbes []network.Probe | ||||
| 	var expectedRules []network.LoadBalancingRule | ||||
| 	for _, port := range ports { | ||||
| 		lbRuleName := getLoadBalancerRuleName(service, port) | ||||
|  | ||||
| 		transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) | ||||
| 		if err != nil { | ||||
| @@ -514,9 +515,16 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration | ||||
| 		} | ||||
|  | ||||
| 		if serviceapi.NeedsHealthCheck(service) { | ||||
| 			if port.Protocol == v1.ProtocolUDP { | ||||
| 				// ERROR: this isn't supported | ||||
| 				// health check (aka source ip preservation) is not | ||||
| 				// compatible with UDP (it uses an HTTP check) | ||||
| 				return lb, false, fmt.Errorf("services requiring health checks are incompatible with UDP ports") | ||||
| 			} | ||||
|  | ||||
| 			podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service) | ||||
|  | ||||
| 			expectedProbes[i] = network.Probe{ | ||||
| 			expectedProbes = append(expectedProbes, network.Probe{ | ||||
| 				Name: &lbRuleName, | ||||
| 				ProbePropertiesFormat: &network.ProbePropertiesFormat{ | ||||
| 					RequestPath:       to.StringPtr(podPresencePath), | ||||
| @@ -525,37 +533,49 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration | ||||
| 					IntervalInSeconds: to.Int32Ptr(5), | ||||
| 					NumberOfProbes:    to.Int32Ptr(2), | ||||
| 				}, | ||||
| 			} | ||||
| 		} else { | ||||
| 			expectedProbes[i] = network.Probe{ | ||||
| 			}) | ||||
| 		} else if port.Protocol != v1.ProtocolUDP { | ||||
| 			// we only add the expected probe if we're doing TCP | ||||
| 			expectedProbes = append(expectedProbes, network.Probe{ | ||||
| 				Name: &lbRuleName, | ||||
| 				ProbePropertiesFormat: &network.ProbePropertiesFormat{ | ||||
| 					Protocol:          probeProto, | ||||
| 					Protocol:          *probeProto, | ||||
| 					Port:              to.Int32Ptr(port.NodePort), | ||||
| 					IntervalInSeconds: to.Int32Ptr(5), | ||||
| 					NumberOfProbes:    to.Int32Ptr(2), | ||||
| 				}, | ||||
| 			} | ||||
| 			}) | ||||
| 		} | ||||
|  | ||||
| 		expectedRules[i] = network.LoadBalancingRule{ | ||||
| 		loadDistribution := network.Default | ||||
| 		if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { | ||||
| 			loadDistribution = network.SourceIP | ||||
| 		} | ||||
| 		expectedRule := network.LoadBalancingRule{ | ||||
| 			Name: &lbRuleName, | ||||
| 			LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ | ||||
| 				Protocol: transportProto, | ||||
| 				Protocol: *transportProto, | ||||
| 				FrontendIPConfiguration: &network.SubResource{ | ||||
| 					ID: to.StringPtr(lbFrontendIPConfigID), | ||||
| 				}, | ||||
| 				BackendAddressPool: &network.SubResource{ | ||||
| 					ID: to.StringPtr(lbBackendPoolID), | ||||
| 				}, | ||||
| 				Probe: &network.SubResource{ | ||||
| 					ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), | ||||
| 				}, | ||||
| 				LoadDistribution: loadDistribution, | ||||
| 				FrontendPort:     to.Int32Ptr(port.Port), | ||||
| 				BackendPort:      to.Int32Ptr(port.Port), | ||||
| 				EnableFloatingIP: to.BoolPtr(true), | ||||
| 			}, | ||||
| 		} | ||||
|  | ||||
| 		// we didn't construct the probe objects for UDP because they're not used/needed/allowed | ||||
| 		if port.Protocol != v1.ProtocolUDP { | ||||
| 			expectedRule.Probe = &network.SubResource{ | ||||
| 				ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		expectedRules = append(expectedRules, expectedRule) | ||||
| 	} | ||||
|  | ||||
| 	// remove unwanted probes | ||||
| @@ -670,17 +690,17 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st | ||||
| 	expectedSecurityRules := make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes)) | ||||
|  | ||||
| 	for i, port := range ports { | ||||
| 		securityRuleName := getRuleName(service, port) | ||||
| 		_, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) | ||||
| 		if err != nil { | ||||
| 			return sg, false, err | ||||
| 		} | ||||
| 		for j := range sourceAddressPrefixes { | ||||
| 			ix := i*len(sourceAddressPrefixes) + j | ||||
| 			securityRuleName := getSecurityRuleName(service, port, sourceAddressPrefixes[j]) | ||||
| 			expectedSecurityRules[ix] = network.SecurityRule{ | ||||
| 				Name: to.StringPtr(securityRuleName), | ||||
| 				SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ | ||||
| 					Protocol:                 securityProto, | ||||
| 					Protocol:                 *securityProto, | ||||
| 					SourcePortRange:          to.StringPtr("*"), | ||||
| 					DestinationPortRange:     to.StringPtr(strconv.Itoa(int(port.Port))), | ||||
| 					SourceAddressPrefix:      to.StringPtr(sourceAddressPrefixes[j]), | ||||
|   | ||||
| @@ -25,7 +25,6 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/api/v1" | ||||
| 	serviceapi "k8s.io/kubernetes/pkg/api/v1/service" | ||||
|  | ||||
| 	"github.com/Azure/azure-sdk-for-go/arm/compute" | ||||
| 	"github.com/Azure/azure-sdk-for-go/arm/network" | ||||
| 	"github.com/Azure/go-autorest/autorest/to" | ||||
| ) | ||||
| @@ -35,11 +34,18 @@ var testClusterName = "testCluster" | ||||
| // Test additional of a new service/port. | ||||
| func TestReconcileLoadBalancerAddPort(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80) | ||||
| 	configProperties := getTestPublicFipConfigurationProperties() | ||||
| 	lb := getTestLoadBalancer() | ||||
| 	nodes := []*v1.Node{} | ||||
|  | ||||
| 	svc.Spec.Ports = append(svc.Spec.Ports, v1.ServicePort{ | ||||
| 		Name:     fmt.Sprintf("port-udp-%d", 1234), | ||||
| 		Protocol: v1.ProtocolUDP, | ||||
| 		Port:     1234, | ||||
| 		NodePort: getBackendPort(1234), | ||||
| 	}) | ||||
|  | ||||
| 	lb, updated, err := az.reconcileLoadBalancer(lb, &configProperties, testClusterName, &svc, nodes) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error: %q", err) | ||||
| @@ -59,7 +65,7 @@ func TestReconcileLoadBalancerAddPort(t *testing.T) { | ||||
|  | ||||
| func TestReconcileLoadBalancerNodeHealth(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80) | ||||
| 	svc.Annotations = map[string]string{ | ||||
| 		serviceapi.BetaAnnotationExternalTraffic:     serviceapi.AnnotationValueExternalTrafficLocal, | ||||
| 		serviceapi.BetaAnnotationHealthCheckNodePort: "32456", | ||||
| @@ -89,7 +95,7 @@ func TestReconcileLoadBalancerNodeHealth(t *testing.T) { | ||||
| // Test removing all services results in removing the frontend ip configuration | ||||
| func TestReconcileLoadBalancerRemoveService(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80, 443) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80, 443) | ||||
| 	lb := getTestLoadBalancer() | ||||
| 	configProperties := getTestPublicFipConfigurationProperties() | ||||
| 	nodes := []*v1.Node{} | ||||
| @@ -120,7 +126,7 @@ func TestReconcileLoadBalancerRemoveService(t *testing.T) { | ||||
| // Test removing all service ports results in removing the frontend ip configuration | ||||
| func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80) | ||||
| 	lb := getTestLoadBalancer() | ||||
| 	configProperties := getTestPublicFipConfigurationProperties() | ||||
| 	nodes := []*v1.Node{} | ||||
| @@ -131,7 +137,7 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) | ||||
| 	} | ||||
| 	validateLoadBalancer(t, lb, svc) | ||||
|  | ||||
| 	svcUpdated := getTestService("servicea") | ||||
| 	svcUpdated := getTestService("servicea", v1.ProtocolTCP) | ||||
| 	lb, updated, err = az.reconcileLoadBalancer(lb, nil, testClusterName, &svcUpdated, nodes) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error: %q", err) | ||||
| @@ -152,13 +158,13 @@ func TestReconcileLoadBalancerRemoveAllPortsRemovesFrontendConfig(t *testing.T) | ||||
| // Test removal of a port from an existing service. | ||||
| func TestReconcileLoadBalancerRemovesPort(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80, 443) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80, 443) | ||||
| 	configProperties := getTestPublicFipConfigurationProperties() | ||||
| 	nodes := []*v1.Node{} | ||||
|  | ||||
| 	existingLoadBalancer := getTestLoadBalancer(svc) | ||||
|  | ||||
| 	svcUpdated := getTestService("servicea", 80) | ||||
| 	svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80) | ||||
| 	updatedLoadBalancer, _, err := az.reconcileLoadBalancer(existingLoadBalancer, &configProperties, testClusterName, &svcUpdated, nodes) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error: %q", err) | ||||
| @@ -170,8 +176,8 @@ func TestReconcileLoadBalancerRemovesPort(t *testing.T) { | ||||
| // Test reconciliation of multiple services on same port | ||||
| func TestReconcileLoadBalancerMultipleServices(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc1 := getTestService("servicea", 80, 443) | ||||
| 	svc2 := getTestService("serviceb", 80) | ||||
| 	svc1 := getTestService("servicea", v1.ProtocolTCP, 80, 443) | ||||
| 	svc2 := getTestService("serviceb", v1.ProtocolTCP, 80) | ||||
| 	configProperties := getTestPublicFipConfigurationProperties() | ||||
| 	nodes := []*v1.Node{} | ||||
|  | ||||
| @@ -192,7 +198,7 @@ func TestReconcileLoadBalancerMultipleServices(t *testing.T) { | ||||
|  | ||||
| func TestReconcileSecurityGroupNewServiceAddsPort(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc1 := getTestService("serviceea", 80) | ||||
| 	svc1 := getTestService("serviceea", v1.ProtocolTCP, 80) | ||||
|  | ||||
| 	sg := getTestSecurityGroup() | ||||
|  | ||||
| @@ -219,8 +225,8 @@ func TestReconcileSecurityGroupNewInternalServiceAddsPort(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func TestReconcileSecurityGroupRemoveService(t *testing.T) { | ||||
| 	service1 := getTestService("servicea", 81) | ||||
| 	service2 := getTestService("serviceb", 82) | ||||
| 	service1 := getTestService("servicea", v1.ProtocolTCP, 81) | ||||
| 	service2 := getTestService("serviceb", v1.ProtocolTCP, 82) | ||||
|  | ||||
| 	sg := getTestSecurityGroup(service1, service2) | ||||
|  | ||||
| @@ -236,11 +242,11 @@ func TestReconcileSecurityGroupRemoveService(t *testing.T) { | ||||
|  | ||||
| func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80, 443) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80, 443) | ||||
|  | ||||
| 	sg := getTestSecurityGroup(svc) | ||||
|  | ||||
| 	svcUpdated := getTestService("servicea", 80) | ||||
| 	svcUpdated := getTestService("servicea", v1.ProtocolTCP, 80) | ||||
| 	sg, _, err := az.reconcileSecurityGroup(sg, testClusterName, &svcUpdated, true) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpected error: %q", err) | ||||
| @@ -251,10 +257,10 @@ func TestReconcileSecurityGroupRemoveServiceRemovesPort(t *testing.T) { | ||||
|  | ||||
| func TestReconcileSecurityWithSourceRanges(t *testing.T) { | ||||
| 	az := getTestCloud() | ||||
| 	svc := getTestService("servicea", 80, 443) | ||||
| 	svc := getTestService("servicea", v1.ProtocolTCP, 80, 443) | ||||
| 	svc.Spec.LoadBalancerSourceRanges = []string{ | ||||
| 		"192.168.0.1/24", | ||||
| 		"10.0.0.1/32", | ||||
| 		"192.168.0.0/24", | ||||
| 		"10.0.0.0/32", | ||||
| 	} | ||||
|  | ||||
| 	sg := getTestSecurityGroup(svc) | ||||
| @@ -291,12 +297,12 @@ func getTestPublicFipConfigurationProperties() network.FrontendIPConfigurationPr | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func getTestService(identifier string, requestedPorts ...int32) v1.Service { | ||||
| func getTestService(identifier string, proto v1.Protocol, requestedPorts ...int32) v1.Service { | ||||
| 	ports := []v1.ServicePort{} | ||||
| 	for _, port := range requestedPorts { | ||||
| 		ports = append(ports, v1.ServicePort{ | ||||
| 			Name:     fmt.Sprintf("port-%d", port), | ||||
| 			Protocol: v1.ProtocolTCP, | ||||
| 			Name:     fmt.Sprintf("port-tcp-%d", port), | ||||
| 			Protocol: proto, | ||||
| 			Port:     port, | ||||
| 			NodePort: getBackendPort(port), | ||||
| 		}) | ||||
| @@ -317,7 +323,7 @@ func getTestService(identifier string, requestedPorts ...int32) v1.Service { | ||||
| } | ||||
|  | ||||
| func getInternalTestService(identifier string, requestedPorts ...int32) v1.Service { | ||||
| 	svc := getTestService(identifier, requestedPorts...) | ||||
| 	svc := getTestService(identifier, v1.ProtocolTCP, requestedPorts...) | ||||
| 	svc.Annotations[ServiceAnnotationLoadBalancerInternal] = "true" | ||||
|  | ||||
| 	return svc | ||||
| @@ -329,7 +335,7 @@ func getTestLoadBalancer(services ...v1.Service) network.LoadBalancer { | ||||
|  | ||||
| 	for _, service := range services { | ||||
| 		for _, port := range service.Spec.Ports { | ||||
| 			ruleName := getRuleName(&service, port) | ||||
| 			ruleName := getLoadBalancerRuleName(&service, port) | ||||
| 			rules = append(rules, network.LoadBalancingRule{ | ||||
| 				Name: to.StringPtr(ruleName), | ||||
| 				LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ | ||||
| @@ -371,10 +377,9 @@ func getTestSecurityGroup(services ...v1.Service) network.SecurityGroup { | ||||
|  | ||||
| 	for _, service := range services { | ||||
| 		for _, port := range service.Spec.Ports { | ||||
| 			ruleName := getRuleName(&service, port) | ||||
|  | ||||
| 			sources := getServiceSourceRanges(&service) | ||||
| 			for _, src := range sources { | ||||
| 				ruleName := getSecurityRuleName(&service, port, src) | ||||
| 				rules = append(rules, network.SecurityRule{ | ||||
| 					Name: to.StringPtr(ruleName), | ||||
| 					SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ | ||||
| @@ -398,13 +403,14 @@ func getTestSecurityGroup(services ...v1.Service) network.SecurityGroup { | ||||
| func validateLoadBalancer(t *testing.T, loadBalancer network.LoadBalancer, services ...v1.Service) { | ||||
| 	expectedRuleCount := 0 | ||||
| 	expectedFrontendIPCount := 0 | ||||
| 	expectedProbeCount := 0 | ||||
| 	for _, svc := range services { | ||||
| 		if len(svc.Spec.Ports) > 0 { | ||||
| 			expectedFrontendIPCount++ | ||||
| 		} | ||||
| 		for _, wantedRule := range svc.Spec.Ports { | ||||
| 			expectedRuleCount++ | ||||
| 			wantedRuleName := getRuleName(&svc, wantedRule) | ||||
| 			wantedRuleName := getLoadBalancerRuleName(&svc, wantedRule) | ||||
| 			foundRule := false | ||||
| 			for _, actualRule := range *loadBalancer.LoadBalancingRules { | ||||
| 				if strings.EqualFold(*actualRule.Name, wantedRuleName) && | ||||
| @@ -418,6 +424,12 @@ func validateLoadBalancer(t *testing.T, loadBalancer network.LoadBalancer, servi | ||||
| 				t.Errorf("Expected load balancer rule but didn't find it: %q", wantedRuleName) | ||||
| 			} | ||||
|  | ||||
| 			// if UDP rule, there is no probe | ||||
| 			if wantedRule.Protocol == v1.ProtocolUDP { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			expectedProbeCount++ | ||||
| 			foundProbe := false | ||||
| 			if serviceapi.NeedsHealthCheck(&svc) { | ||||
| 				path, port := serviceapi.GetServiceHealthCheckPathPort(&svc) | ||||
| @@ -457,8 +469,9 @@ func validateLoadBalancer(t *testing.T, loadBalancer network.LoadBalancer, servi | ||||
| 	if lenRules != expectedRuleCount { | ||||
| 		t.Errorf("Expected the loadbalancer to have %d rules. Found %d.\n%v", expectedRuleCount, lenRules, loadBalancer.LoadBalancingRules) | ||||
| 	} | ||||
|  | ||||
| 	lenProbes := len(*loadBalancer.Probes) | ||||
| 	if lenProbes != expectedRuleCount { | ||||
| 	if lenProbes != expectedProbeCount { | ||||
| 		t.Errorf("Expected the loadbalancer to have %d probes. Found %d.", expectedRuleCount, lenProbes) | ||||
| 	} | ||||
| } | ||||
| @@ -468,8 +481,8 @@ func validateSecurityGroup(t *testing.T, securityGroup network.SecurityGroup, se | ||||
| 	for _, svc := range services { | ||||
| 		for _, wantedRule := range svc.Spec.Ports { | ||||
| 			sources := getServiceSourceRanges(&svc) | ||||
| 			wantedRuleName := getRuleName(&svc, wantedRule) | ||||
| 			for _, source := range sources { | ||||
| 				wantedRuleName := getSecurityRuleName(&svc, wantedRule, source) | ||||
| 				expectedRuleCount++ | ||||
| 				foundRule := false | ||||
| 				for _, actualRule := range *securityGroup.SecurityRules { | ||||
| @@ -542,22 +555,28 @@ func TestProtocolTranslationTCP(t *testing.T) { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	if transportProto != network.TransportProtocolTCP { | ||||
| 	if *transportProto != network.TransportProtocolTCP { | ||||
| 		t.Errorf("Expected TCP LoadBalancer Rule Protocol. Got %v", transportProto) | ||||
| 	} | ||||
| 	if securityGroupProto != network.TCP { | ||||
| 	if *securityGroupProto != network.TCP { | ||||
| 		t.Errorf("Expected TCP SecurityGroup Protocol. Got %v", transportProto) | ||||
| 	} | ||||
| 	if probeProto != network.ProbeProtocolTCP { | ||||
| 	if *probeProto != network.ProbeProtocolTCP { | ||||
| 		t.Errorf("Expected TCP LoadBalancer Probe Protocol. Got %v", transportProto) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestProtocolTranslationUDP(t *testing.T) { | ||||
| 	proto := v1.ProtocolUDP | ||||
| 	_, _, _, err := getProtocolsFromKubernetesProtocol(proto) | ||||
| 	if err == nil { | ||||
| 		t.Error("Expected an error. UDP is unsupported.") | ||||
| 	transportProto, securityGroupProto, probeProto, _ := getProtocolsFromKubernetesProtocol(proto) | ||||
| 	if *transportProto != network.TransportProtocolUDP { | ||||
| 		t.Errorf("Expected UDP LoadBalancer Rule Protocol. Got %v", transportProto) | ||||
| 	} | ||||
| 	if *securityGroupProto != network.UDP { | ||||
| 		t.Errorf("Expected UDP SecurityGroup Protocol. Got %v", transportProto) | ||||
| 	} | ||||
| 	if probeProto != nil { | ||||
| 		t.Errorf("Expected UDP LoadBalancer Probe Protocol. Got %v", transportProto) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| @@ -660,24 +679,3 @@ func TestDecodeInstanceInfo(t *testing.T) { | ||||
| 		t.Error("got incorrect fault domain") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestFilterNodes(t *testing.T) { | ||||
| 	nodes := []compute.VirtualMachine{ | ||||
| 		{Name: to.StringPtr("test")}, | ||||
| 		{Name: to.StringPtr("test2")}, | ||||
| 		{Name: to.StringPtr("3test")}, | ||||
| 	} | ||||
|  | ||||
| 	filteredNodes, err := filterNodes(nodes, "^test$") | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Unexpeted error when filtering: %q", err) | ||||
| 	} | ||||
|  | ||||
| 	if len(filteredNodes) != 1 { | ||||
| 		t.Error("Got too many nodes after filtering") | ||||
| 	} | ||||
|  | ||||
| 	if *filteredNodes[0].Name != "test" { | ||||
| 		t.Error("Get the wrong node after filtering") | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -122,13 +122,25 @@ func getLastSegment(ID string) (string, error) { | ||||
|  | ||||
| // returns the equivalent LoadBalancerRule, SecurityRule and LoadBalancerProbe | ||||
| // protocol types for the given Kubernetes protocol type. | ||||
| func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (network.TransportProtocol, network.SecurityRuleProtocol, network.ProbeProtocol, error) { | ||||
| func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.TransportProtocol, *network.SecurityRuleProtocol, *network.ProbeProtocol, error) { | ||||
| 	var transportProto network.TransportProtocol | ||||
| 	var securityProto network.SecurityRuleProtocol | ||||
| 	var probeProto network.ProbeProtocol | ||||
|  | ||||
| 	switch protocol { | ||||
| 	case v1.ProtocolTCP: | ||||
| 		return network.TransportProtocolTCP, network.TCP, network.ProbeProtocolTCP, nil | ||||
| 		transportProto = network.TransportProtocolTCP | ||||
| 		securityProto = network.TCP | ||||
| 		probeProto = network.ProbeProtocolTCP | ||||
| 		return &transportProto, &securityProto, &probeProto, nil | ||||
| 	case v1.ProtocolUDP: | ||||
| 		transportProto = network.TransportProtocolUDP | ||||
| 		securityProto = network.UDP | ||||
| 		return &transportProto, &securityProto, nil, nil | ||||
| 	default: | ||||
| 		return "", "", "", fmt.Errorf("Only TCP is supported for Azure LoadBalancers") | ||||
| 		return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers") | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| // This returns the full identifier of the primary NIC for the given VM. | ||||
| @@ -175,8 +187,13 @@ func getBackendPoolName(clusterName string) string { | ||||
| 	return clusterName | ||||
| } | ||||
|  | ||||
| func getRuleName(service *v1.Service, port v1.ServicePort) string { | ||||
| 	return fmt.Sprintf("%s-%s-%d-%d", getRulePrefix(service), port.Protocol, port.Port, port.NodePort) | ||||
| func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort) string { | ||||
| 	return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port) | ||||
| } | ||||
|  | ||||
| func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string { | ||||
| 	safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1) | ||||
| 	return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix) | ||||
| } | ||||
|  | ||||
| // This returns a human-readable version of the Service used to tag some resources. | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Submit Queue
					Kubernetes Submit Queue