mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 02:08:13 +00:00 
			
		
		
		
	change --nodeport-addresses behavior to default to primary node ip only
This commit is contained in:
		| @@ -150,7 +150,7 @@ func (fake fakeProxierHealthChecker) IsHealthy() bool { | ||||
| func TestServer(t *testing.T) { | ||||
| 	listener := newFakeListener() | ||||
| 	httpFactory := newFakeHTTPServerFactory() | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{}) | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{}, nil) | ||||
| 	proxyChecker := &fakeProxierHealthChecker{true} | ||||
|  | ||||
| 	hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker) | ||||
| @@ -664,7 +664,7 @@ func TestServerWithSelectiveListeningAddress(t *testing.T) { | ||||
|  | ||||
| 	// limiting addresses to loop back. We don't want any cleverness here around getting IP for | ||||
| 	// machine nor testing ipv6 || ipv4. using loop back guarantees the test will work on any machine | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}) | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil) | ||||
|  | ||||
| 	hcsi := newServiceHealthServer("hostname", nil, listener, httpFactory, nodePortAddresses, proxyChecker) | ||||
| 	hcs := hcsi.(*server) | ||||
|   | ||||
| @@ -237,7 +237,7 @@ func NewProxier(ipFamily v1.IPFamily, | ||||
| 	nodePortAddressStrings []string, | ||||
| 	initOnly bool, | ||||
| ) (*Proxier, error) { | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil) | ||||
|  | ||||
| 	if !nodePortAddresses.ContainsIPv4Loopback() { | ||||
| 		localhostNodePorts = false | ||||
|   | ||||
| @@ -133,7 +133,7 @@ func NewFakeProxier(ipt utiliptables.Interface) *Proxier { | ||||
| 		natRules:                 proxyutil.NewLineBuffer(), | ||||
| 		nodeIP:                   netutils.ParseIPSloppy(testNodeIP), | ||||
| 		localhostNodePorts:       true, | ||||
| 		nodePortAddresses:        proxyutil.NewNodePortAddresses(ipfamily, nil), | ||||
| 		nodePortAddresses:        proxyutil.NewNodePortAddresses(ipfamily, nil, nil), | ||||
| 		networkInterfacer:        networkInterfacer, | ||||
| 	} | ||||
| 	p.setInitialized(true) | ||||
| @@ -2342,7 +2342,7 @@ func TestNodePorts(t *testing.T) { | ||||
| 			fp := NewFakeProxier(ipt) | ||||
| 			fp.localhostNodePorts = tc.localhostNodePorts | ||||
| 			if tc.nodePortAddresses != nil { | ||||
| 				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses) | ||||
| 				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, nil) | ||||
| 			} | ||||
|  | ||||
| 			makeServiceMap(fp, | ||||
| @@ -2490,7 +2490,7 @@ func TestNodePorts(t *testing.T) { | ||||
| func TestHealthCheckNodePort(t *testing.T) { | ||||
| 	ipt := iptablestest.NewFake() | ||||
| 	fp := NewFakeProxier(ipt) | ||||
| 	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}) | ||||
| 	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"127.0.0.0/8"}, nil) | ||||
|  | ||||
| 	svcIP := "172.30.0.42" | ||||
| 	svcPort := 80 | ||||
|   | ||||
| @@ -413,7 +413,7 @@ func NewProxier(ipFamily v1.IPFamily, | ||||
| 		scheduler = defaultScheduler | ||||
| 	} | ||||
|  | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nil) | ||||
|  | ||||
| 	serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) | ||||
|  | ||||
|   | ||||
| @@ -158,7 +158,7 @@ func NewFakeProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, ipset u | ||||
| 		filterRules:           proxyutil.NewLineBuffer(), | ||||
| 		netlinkHandle:         netlinkHandle, | ||||
| 		ipsetList:             ipsetList, | ||||
| 		nodePortAddresses:     proxyutil.NewNodePortAddresses(ipFamily, nil), | ||||
| 		nodePortAddresses:     proxyutil.NewNodePortAddresses(ipFamily, nil, nil), | ||||
| 		networkInterfacer:     proxyutiltest.NewFakeNetwork(), | ||||
| 		gracefuldeleteManager: NewGracefulTerminationManager(ipvs), | ||||
| 		ipFamily:              ipFamily, | ||||
| @@ -945,7 +945,7 @@ func TestNodePortIPv4(t *testing.T) { | ||||
| 			ipvs := ipvstest.NewFake() | ||||
| 			ipset := ipsettest.NewFake(testIPSetVersion) | ||||
| 			fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv4Protocol) | ||||
| 			fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses) | ||||
| 			fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, test.nodePortAddresses, nil) | ||||
|  | ||||
| 			makeServiceMap(fp, test.services...) | ||||
| 			populateEndpointSlices(fp, test.endpoints...) | ||||
| @@ -1287,7 +1287,7 @@ func TestNodePortIPv6(t *testing.T) { | ||||
| 			ipvs := ipvstest.NewFake() | ||||
| 			ipset := ipsettest.NewFake(testIPSetVersion) | ||||
| 			fp := NewFakeProxier(ipt, ipvs, ipset, test.nodeIPs, nil, v1.IPv6Protocol) | ||||
| 			fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses) | ||||
| 			fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv6Protocol, test.nodePortAddresses, nil) | ||||
|  | ||||
| 			makeServiceMap(fp, test.services...) | ||||
| 			populateEndpointSlices(fp, test.endpoints...) | ||||
| @@ -2040,7 +2040,7 @@ func TestOnlyLocalNodePorts(t *testing.T) { | ||||
| 	addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}} | ||||
| 	fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs) | ||||
| 	fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) | ||||
| 	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}) | ||||
| 	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil) | ||||
|  | ||||
| 	fp.syncProxyRules() | ||||
|  | ||||
| @@ -2128,7 +2128,7 @@ func TestHealthCheckNodePort(t *testing.T) { | ||||
| 	addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::"), Mask: net.CIDRMask(64, 128)}} | ||||
| 	fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf, addrs) | ||||
| 	fp.networkInterfacer.(*proxyutiltest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1) | ||||
| 	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}) | ||||
| 	fp.nodePortAddresses = proxyutil.NewNodePortAddresses(v1.IPv4Protocol, []string{"100.101.102.0/24"}, nil) | ||||
|  | ||||
| 	fp.syncProxyRules() | ||||
|  | ||||
|   | ||||
| @@ -223,7 +223,7 @@ func NewProxier(ipFamily v1.IPFamily, | ||||
| 	nodePortAddressStrings []string, | ||||
| 	initOnly bool, | ||||
| ) (*Proxier, error) { | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings) | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nodePortAddressStrings, nodeIP) | ||||
|  | ||||
| 	if initOnly { | ||||
| 		klog.InfoS("System initialized and --init-only specified") | ||||
|   | ||||
| @@ -106,6 +106,12 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) { | ||||
|  | ||||
| 	nft := knftables.NewFake(nftablesFamily, kubeProxyTable) | ||||
|  | ||||
| 	var nodeIP net.IP | ||||
| 	if ipFamily == v1.IPv4Protocol { | ||||
| 		nodeIP = netutils.ParseIPSloppy(testNodeIP) | ||||
| 	} else { | ||||
| 		nodeIP = netutils.ParseIPSloppy(testNodeIPv6) | ||||
| 	} | ||||
| 	p := &Proxier{ | ||||
| 		ipFamily:            ipFamily, | ||||
| 		svcPortMap:          make(proxy.ServicePortMap), | ||||
| @@ -118,8 +124,8 @@ func NewFakeProxier(ipFamily v1.IPFamily) (*knftables.Fake, *Proxier) { | ||||
| 		localDetector:       detectLocal, | ||||
| 		hostname:            testHostname, | ||||
| 		serviceHealthServer: healthcheck.NewFakeServiceHealthServer(), | ||||
| 		nodeIP:              netutils.ParseIPSloppy(testNodeIP), | ||||
| 		nodePortAddresses:   proxyutil.NewNodePortAddresses(ipFamily, nil), | ||||
| 		nodeIP:              nodeIP, | ||||
| 		nodePortAddresses:   proxyutil.NewNodePortAddresses(ipFamily, nil, nodeIP), | ||||
| 		networkInterfacer:   networkInterfacer, | ||||
| 		staleChains:         make(map[string]time.Time), | ||||
| 		serviceCIDRs:        serviceCIDRs, | ||||
| @@ -304,6 +310,8 @@ func TestOverallNFTablesRules(t *testing.T) { | ||||
| 		add rule ip kube-proxy masquerading mark set mark xor 0x4000 | ||||
| 		add rule ip kube-proxy masquerading masquerade fully-random | ||||
| 		add chain ip kube-proxy services | ||||
| 		add chain ip kube-proxy service-endpoints-check | ||||
| 		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services | ||||
| 		add chain ip kube-proxy filter-prerouting { type filter hook prerouting priority -110 ; } | ||||
| 		add rule ip kube-proxy filter-prerouting ct state new jump firewall-check | ||||
| 		add chain ip kube-proxy filter-forward { type filter hook forward priority -110 ; } | ||||
| @@ -323,12 +331,15 @@ func TestOverallNFTablesRules(t *testing.T) { | ||||
| 		add rule ip kube-proxy nat-postrouting jump masquerading | ||||
| 		add chain ip kube-proxy nat-prerouting { type nat hook prerouting priority -100 ; } | ||||
| 		add rule ip kube-proxy nat-prerouting jump services | ||||
| 		add chain ip kube-proxy nodeport-endpoints-check | ||||
| 		add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports | ||||
|  | ||||
| 		add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; } | ||||
| 		add chain ip kube-proxy cluster-ips-check | ||||
| 		add rule ip kube-proxy cluster-ips-check ip daddr @cluster-ips reject comment "Reject traffic to invalid ports of ClusterIPs" | ||||
| 		add rule ip kube-proxy cluster-ips-check ip daddr { 172.30.0.0/16 } drop comment "Drop traffic to unallocated ClusterIPs" | ||||
|  | ||||
| 		add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; } | ||||
| 		add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; } | ||||
| 		add chain ip kube-proxy firewall-check | ||||
| 		add rule ip kube-proxy firewall-check ip daddr . meta l4proto . th dport vmap @firewall-ips | ||||
| @@ -339,15 +350,11 @@ func TestOverallNFTablesRules(t *testing.T) { | ||||
| 		add map ip kube-proxy no-endpoint-services { type ipv4_addr . inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to services with no endpoints" ; } | ||||
| 		add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; } | ||||
|  | ||||
| 		add chain ip kube-proxy nodeport-endpoints-check | ||||
| 		add rule ip kube-proxy nodeport-endpoints-check ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @no-endpoint-nodeports | ||||
| 		add chain ip kube-proxy service-endpoints-check | ||||
| 		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services | ||||
|  | ||||
| 		add map ip kube-proxy service-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "ClusterIP, ExternalIP and LoadBalancer IP traffic" ; } | ||||
| 		add map ip kube-proxy service-nodeports { type inet_proto . inet_service : verdict ; comment "NodePort traffic" ; } | ||||
| 		add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips | ||||
| 		add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @service-nodeports | ||||
| 		add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
|  | ||||
| 		# svc1 | ||||
| 		add chain ip kube-proxy service-ULMVA6XW-ns1/svc1/tcp/p80 | ||||
| @@ -880,7 +887,7 @@ func TestLoadBalancer(t *testing.T) { | ||||
| } | ||||
|  | ||||
| // TestNodePorts tests NodePort services under various combinations of the | ||||
| // --nodeport-addresses and --localhost-nodeports flags. | ||||
| // --nodeport-addresses flags. | ||||
| func TestNodePorts(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		name string | ||||
| @@ -891,10 +898,6 @@ func TestNodePorts(t *testing.T) { | ||||
| 		// allowAltNodeIP is true if we expect NodePort traffic on the alternate | ||||
| 		// node IP to be accepted | ||||
| 		allowAltNodeIP bool | ||||
|  | ||||
| 		// expectFirewall is true if we expect firewall to be filled in with | ||||
| 		// an anti-martian-packet rule | ||||
| 		expectFirewall bool | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name: "ipv4", | ||||
| @@ -902,8 +905,7 @@ func TestNodePorts(t *testing.T) { | ||||
| 			family:            v1.IPv4Protocol, | ||||
| 			nodePortAddresses: nil, | ||||
|  | ||||
| 			allowAltNodeIP: true, | ||||
| 			expectFirewall: true, | ||||
| 			allowAltNodeIP: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "ipv4, multiple nodeport-addresses", | ||||
| @@ -912,7 +914,6 @@ func TestNodePorts(t *testing.T) { | ||||
| 			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"}, | ||||
|  | ||||
| 			allowAltNodeIP: true, | ||||
| 			expectFirewall: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "ipv6", | ||||
| @@ -920,17 +921,15 @@ func TestNodePorts(t *testing.T) { | ||||
| 			family:            v1.IPv6Protocol, | ||||
| 			nodePortAddresses: nil, | ||||
|  | ||||
| 			allowAltNodeIP: true, | ||||
| 			expectFirewall: false, | ||||
| 			allowAltNodeIP: false, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "ipv6, multiple nodeport-addresses", | ||||
|  | ||||
| 			family:            v1.IPv6Protocol, | ||||
| 			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64"}, | ||||
| 			nodePortAddresses: []string{"192.168.0.0/24", "192.168.1.0/24", "2001:db8::/64", "2001:db8:1::2/128"}, | ||||
|  | ||||
| 			allowAltNodeIP: false, | ||||
| 			expectFirewall: false, | ||||
| 			allowAltNodeIP: true, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| @@ -939,17 +938,20 @@ func TestNodePorts(t *testing.T) { | ||||
| 			nft, fp := NewFakeProxier(tc.family) | ||||
|  | ||||
| 			var svcIP, epIP1, epIP2 string | ||||
| 			var nodeIP string | ||||
| 			if tc.family == v1.IPv4Protocol { | ||||
| 				svcIP = "172.30.0.41" | ||||
| 				epIP1 = "10.180.0.1" | ||||
| 				epIP2 = "10.180.2.1" | ||||
| 				nodeIP = testNodeIP | ||||
| 			} else { | ||||
| 				svcIP = "fd00:172:30::41" | ||||
| 				epIP1 = "fd00:10:180::1" | ||||
| 				epIP2 = "fd00:10:180::2:1" | ||||
| 				nodeIP = testNodeIPv6 | ||||
| 			} | ||||
| 			if tc.nodePortAddresses != nil { | ||||
| 				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses) | ||||
| 				fp.nodePortAddresses = proxyutil.NewNodePortAddresses(tc.family, tc.nodePortAddresses, netutils.ParseIPSloppy(nodeIP)) | ||||
| 			} | ||||
|  | ||||
| 			makeServiceMap(fp, | ||||
| @@ -989,16 +991,14 @@ func TestNodePorts(t *testing.T) { | ||||
|  | ||||
| 			fp.syncProxyRules() | ||||
|  | ||||
| 			var podIP, externalClientIP, nodeIP, altNodeIP string | ||||
| 			var podIP, externalClientIP, altNodeIP string | ||||
| 			if tc.family == v1.IPv4Protocol { | ||||
| 				podIP = "10.0.0.2" | ||||
| 				externalClientIP = testExternalClient | ||||
| 				nodeIP = testNodeIP | ||||
| 				altNodeIP = testNodeIPAlt | ||||
| 			} else { | ||||
| 				podIP = "fd00:10::2" | ||||
| 				externalClientIP = "2600:5200::1" | ||||
| 				nodeIP = testNodeIPv6 | ||||
| 				altNodeIP = testNodeIPv6Alt | ||||
| 			} | ||||
| 			output := net.JoinHostPort(epIP1, "80") + ", " + net.JoinHostPort(epIP2, "80") | ||||
| @@ -1031,8 +1031,6 @@ func TestNodePorts(t *testing.T) { | ||||
| 				}, | ||||
| 			}) | ||||
|  | ||||
| 			// NodePort on altNodeIP should be allowed, unless | ||||
| 			// nodePortAddressess excludes altNodeIP | ||||
| 			if tc.allowAltNodeIP { | ||||
| 				runPacketFlowTests(t, getLine(), nft, testNodeIPs, []packetFlowTest{ | ||||
| 					{ | ||||
| @@ -3981,13 +3979,13 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add rule ip kube-proxy nat-output jump services | ||||
| 		add rule ip kube-proxy nat-postrouting jump masquerading | ||||
| 		add rule ip kube-proxy nat-prerouting jump services | ||||
| 		add rule ip kube-proxy nodeport-endpoints-check ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @no-endpoint-nodeports | ||||
| 		add rule ip kube-proxy nodeport-endpoints-check ip daddr @nodeport-ips meta l4proto . th dport vmap @no-endpoint-nodeports | ||||
| 		add rule ip kube-proxy reject-chain reject | ||||
| 		add rule ip kube-proxy services ip daddr . meta l4proto . th dport vmap @service-ips | ||||
| 		add rule ip kube-proxy services fib daddr type local ip daddr != 127.0.0.0/8 meta l4proto . th dport vmap @service-nodeports | ||||
| 		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services | ||||
|  | ||||
| 		add rule ip kube-proxy services ip daddr @nodeport-ips meta l4proto . th dport vmap @service-nodeports | ||||
| 		add set ip kube-proxy cluster-ips { type ipv4_addr ; comment "Active ClusterIPs" ; } | ||||
| 		add set ip kube-proxy nodeport-ips { type ipv4_addr ; comment "IPs that accept NodePort traffic" ; } | ||||
| 		add rule ip kube-proxy service-endpoints-check ip daddr . meta l4proto . th dport vmap @no-endpoint-services | ||||
|  | ||||
| 		add map ip kube-proxy firewall-ips { type ipv4_addr . inet_proto . inet_service : verdict ; comment "destinations that are subject to LoadBalancerSourceRanges" ; } | ||||
| 		add map ip kube-proxy no-endpoint-nodeports { type inet_proto . inet_service : verdict ; comment "vmap to drop or reject packets to service nodeports with no endpoints" ; } | ||||
| @@ -4059,6 +4057,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 	expected := baseRules + dedent.Dedent(` | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.42 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 } | ||||
|  | ||||
| @@ -4111,6 +4110,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.42 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.42 . tcp . 8080 : goto service-MHHHYRWA-ns2/svc2/tcp/p8080 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
| @@ -4144,6 +4144,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 	expected = baseRules + dedent.Dedent(` | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
|  | ||||
| @@ -4172,6 +4173,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 	expected = baseRules + dedent.Dedent(` | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
|  | ||||
| @@ -4208,6 +4210,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.44 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
|  | ||||
| @@ -4247,6 +4250,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.44 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } | ||||
| @@ -4285,6 +4289,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.44 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } | ||||
| @@ -4326,6 +4331,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.44 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } | ||||
| @@ -4365,6 +4371,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.44 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy no-endpoint-services { 172.30.0.43 . tcp . 80 comment "ns3/svc3:p80" : goto reject-chain } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } | ||||
| @@ -4400,6 +4407,7 @@ func TestSyncProxyRulesRepeated(t *testing.T) { | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.41 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.43 } | ||||
| 		add element ip kube-proxy cluster-ips { 172.30.0.44 } | ||||
| 		add element ip kube-proxy nodeport-ips { 192.168.0.2 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.41 . tcp . 80 : goto service-ULMVA6XW-ns1/svc1/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.43 . tcp . 80 : goto service-4AT6LBPK-ns3/svc3/tcp/p80 } | ||||
| 		add element ip kube-proxy service-ips { 172.30.0.44 . tcp . 80 : goto service-LAUZTJTB-ns4/svc4/tcp/p80 } | ||||
|   | ||||
| @@ -20,7 +20,7 @@ import ( | ||||
| 	"fmt" | ||||
| 	"net" | ||||
|  | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	netutils "k8s.io/utils/net" | ||||
| ) | ||||
|  | ||||
| @@ -37,11 +37,12 @@ type NodePortAddresses struct { | ||||
| var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0) | ||||
|  | ||||
| // NewNodePortAddresses takes an IP family and the `--nodeport-addresses` value (which is | ||||
| // assumed to contain only valid CIDRs, potentially of both IP families) and returns a | ||||
| // NodePortAddresses object for the given family. If there are no CIDRs of the given | ||||
| // family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are CIDRs of | ||||
| // the other family). | ||||
| func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAddresses { | ||||
| // assumed to contain only valid CIDRs, potentially of both IP families) and the primary IP | ||||
| // (which will be used as node port address when `--nodeport-addresses` is empty). | ||||
| // It will return a NodePortAddresses object for the given family. If there are no CIDRs of | ||||
| // the given family then the CIDR "0.0.0.0/0" or "::/0" will be added (even if there are | ||||
| // CIDRs of the other family). | ||||
| func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string, primaryIP net.IP) *NodePortAddresses { | ||||
| 	npa := &NodePortAddresses{} | ||||
|  | ||||
| 	// Filter CIDRs to correct family | ||||
| @@ -51,17 +52,24 @@ func NewNodePortAddresses(family v1.IPFamily, cidrStrings []string) *NodePortAdd | ||||
| 		} | ||||
| 	} | ||||
| 	if len(npa.cidrStrings) == 0 { | ||||
| 		if family == v1.IPv4Protocol { | ||||
| 			npa.cidrStrings = []string{IPv4ZeroCIDR} | ||||
| 		if primaryIP == nil { | ||||
| 			if family == v1.IPv4Protocol { | ||||
| 				npa.cidrStrings = []string{IPv4ZeroCIDR} | ||||
| 			} else { | ||||
| 				npa.cidrStrings = []string{IPv6ZeroCIDR} | ||||
| 			} | ||||
| 		} else { | ||||
| 			npa.cidrStrings = []string{IPv6ZeroCIDR} | ||||
| 			if family == v1.IPv4Protocol { | ||||
| 				npa.cidrStrings = []string{fmt.Sprintf("%s/32", primaryIP.String())} | ||||
| 			} else { | ||||
| 				npa.cidrStrings = []string{fmt.Sprintf("%s/128", primaryIP.String())} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Now parse | ||||
| 	for _, str := range npa.cidrStrings { | ||||
| 		_, cidr, _ := netutils.ParseCIDRSloppy(str) | ||||
|  | ||||
| 		if netutils.IsIPv4CIDR(cidr) { | ||||
| 			if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) { | ||||
| 				npa.containsIPv4Loopback = true | ||||
|   | ||||
| @@ -21,7 +21,7 @@ import ( | ||||
| 	"net" | ||||
| 	"testing" | ||||
|  | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	fake "k8s.io/kubernetes/pkg/proxy/util/testing" | ||||
| 	netutils "k8s.io/utils/net" | ||||
| @@ -60,6 +60,8 @@ func TestGetNodeIPs(t *testing.T) { | ||||
| 		cidrs         []string | ||||
| 		itfAddrsPairs []InterfaceAddrsPair | ||||
| 		expected      map[v1.IPFamily]expectation | ||||
| 		// nodeIP will take effect when `--nodeport-addresses` is empty | ||||
| 		nodeIP net.IP | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:  "IPv4 single", | ||||
| @@ -369,6 +371,53 @@ func TestGetNodeIPs(t *testing.T) { | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "ipv4 with nodeIP", | ||||
| 			itfAddrsPairs: []InterfaceAddrsPair{ | ||||
| 				{ | ||||
| 					itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, | ||||
| 					addrs: []net.Addr{ | ||||
| 						&net.IPNet{IP: netutils.ParseIPSloppy("1.2.3.4"), Mask: net.CIDRMask(30, 32)}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				{ | ||||
| 					itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, | ||||
| 					addrs: []net.Addr{ | ||||
| 						&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expected: map[v1.IPFamily]expectation{ | ||||
| 				v1.IPv4Protocol: { | ||||
| 					ips: sets.New[string]("1.2.3.4"), | ||||
| 				}, | ||||
| 			}, | ||||
| 			nodeIP: netutils.ParseIPSloppy("1.2.3.4"), | ||||
| 		}, | ||||
| 		{ | ||||
| 			name: "ipv6 with nodeIP", | ||||
| 			itfAddrsPairs: []InterfaceAddrsPair{ | ||||
| 				{ | ||||
| 					itf: net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}, | ||||
| 					addrs: []net.Addr{ | ||||
| 						&net.IPNet{IP: netutils.ParseIPSloppy("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, | ||||
| 					}, | ||||
| 				}, | ||||
| 				{ | ||||
| 					itf: net.Interface{Index: 1, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}, | ||||
| 					addrs: []net.Addr{ | ||||
| 						&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expected: map[v1.IPFamily]expectation{ | ||||
| 				v1.IPv6Protocol: { | ||||
| 					matchAll: true, | ||||
| 					ips:      sets.New[string]("2001:db8::1", "::1"), | ||||
| 				}, | ||||
| 			}, | ||||
| 			nodeIP: netutils.ParseIPSloppy("1.2.3.4"), | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tc := range testCases { | ||||
| @@ -379,7 +428,10 @@ func TestGetNodeIPs(t *testing.T) { | ||||
| 			} | ||||
|  | ||||
| 			for _, family := range []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol} { | ||||
| 				npa := NewNodePortAddresses(family, tc.cidrs) | ||||
| 				if tc.nodeIP != nil && v1.IPFamily(fmt.Sprintf("IPv%s", netutils.IPFamilyOf(tc.nodeIP))) != family { | ||||
| 					continue | ||||
| 				} | ||||
| 				npa := NewNodePortAddresses(family, tc.cidrs, tc.nodeIP) | ||||
|  | ||||
| 				if npa.MatchAll() != tc.expected[family].matchAll { | ||||
| 					t.Errorf("unexpected MatchAll(%s), expected: %v", family, tc.expected[family].matchAll) | ||||
| @@ -451,12 +503,12 @@ func TestContainsIPv4Loopback(t *testing.T) { | ||||
| 	} | ||||
| 	for _, tt := range tests { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings) | ||||
| 			npa := NewNodePortAddresses(v1.IPv4Protocol, tt.cidrStrings, nil) | ||||
| 			if got := npa.ContainsIPv4Loopback(); got != tt.want { | ||||
| 				t.Errorf("IPv4 ContainsIPv4Loopback() = %v, want %v", got, tt.want) | ||||
| 			} | ||||
| 			// ContainsIPv4Loopback should always be false for family=IPv6 | ||||
| 			npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings) | ||||
| 			npa = NewNodePortAddresses(v1.IPv6Protocol, tt.cidrStrings, nil) | ||||
| 			if got := npa.ContainsIPv4Loopback(); got { | ||||
| 				t.Errorf("IPv6 ContainsIPv4Loopback() = %v, want %v", got, false) | ||||
| 			} | ||||
|   | ||||
| @@ -30,11 +30,10 @@ import ( | ||||
| 	utilfeature "k8s.io/apiserver/pkg/util/feature" | ||||
| 	"k8s.io/client-go/tools/events" | ||||
| 	utilsysctl "k8s.io/component-helpers/node/util/sysctl" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	netutils "k8s.io/utils/net" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
|   | ||||
| @@ -672,7 +672,7 @@ func NewProxier( | ||||
| 	} | ||||
|  | ||||
| 	// windows listens to all node addresses | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil) | ||||
| 	nodePortAddresses := proxyutil.NewNodePortAddresses(ipFamily, nil, nil) | ||||
| 	serviceHealthServer := healthcheck.NewServiceHealthServer(hostname, recorder, nodePortAddresses, healthzServer) | ||||
|  | ||||
| 	hcnImpl := newHcnImpl() | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 nayihz
					nayihz