diff --git a/packages/apps/kubernetes/Chart.yaml b/packages/apps/kubernetes/Chart.yaml index 56a3b2d8..630b76df 100644 --- a/packages/apps/kubernetes/Chart.yaml +++ b/packages/apps/kubernetes/Chart.yaml @@ -16,7 +16,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.23.1 +version: 0.23.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/packages/apps/kubernetes/images/kubevirt-cloud-provider/Dockerfile b/packages/apps/kubernetes/images/kubevirt-cloud-provider/Dockerfile index 0e2c3a2f..563251d8 100644 --- a/packages/apps/kubernetes/images/kubevirt-cloud-provider/Dockerfile +++ b/packages/apps/kubernetes/images/kubevirt-cloud-provider/Dockerfile @@ -8,7 +8,7 @@ ENV GOARCH=$TARGETARCH RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \ && cd /go/src/kubevirt.io/cloud-provider-kubevirt \ - && git checkout 443a1fe + && git checkout a0acf33 WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt diff --git a/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/341.diff b/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/341.diff index c9bf8a1c..65928032 100644 --- a/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/341.diff +++ b/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/341.diff @@ -37,7 +37,7 @@ index 74166b5d9..4e744f8de 100644 klog.Infof("Initializing kubevirtEPSController") diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go -index 6f6e3d322..b56882c12 100644 +index 53388eb8e..b56882c12 100644 --- a/pkg/controller/kubevirteps/kubevirteps_controller.go +++ b/pkg/controller/kubevirteps/kubevirteps_controller.go @@ -54,10 +54,10 @@ type Controller struct { @@ -286,22 +286,6 @@ index 6f6e3d322..b56882c12 100644 for _, eps := range slicesToDelete { err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{}) if err != nil { -@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices [] - // Create the desired port configuration - var desiredPorts []discovery.EndpointPort - -- for _, port := range service.Spec.Ports { -+ for i := range service.Spec.Ports { - desiredPorts = append(desiredPorts, discovery.EndpointPort{ -- Port: &port.TargetPort.IntVal, -- Protocol: &port.Protocol, -- Name: &port.Name, -+ Port: &service.Spec.Ports[i].TargetPort.IntVal, -+ Protocol: &service.Spec.Ports[i].Protocol, -+ Name: &service.Spec.Ports[i].Name, - }) - } - @@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool { return false } @@ -437,18 +421,10 @@ index 6f6e3d322..b56882c12 100644 return nil diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go -index 1fb86e25f..14d92d340 100644 +index 1c97035b4..14d92d340 100644 --- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go +++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go -@@ -13,6 +13,7 @@ import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" -+ "k8s.io/apimachinery/pkg/util/sets" - dfake "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/testing" -@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController { +@@ -190,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController { }: "VirtualMachineInstanceList", }) @@ -457,83 +433,87 @@ index 1fb86e25f..14d92d340 100644 err := controller.Init() if err != nil { -@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() { - return false, err - }).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion") - }) -+ -+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() { -+ // Create a VMI in the infra cluster -+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89") -+ -+ // Create an EndpointSlice in the tenant cluster -+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4, -+ *createPort("http", 80, v1.ProtocolTCP), -+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)}) -+ +@@ -697,51 +697,43 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() { + *createPort("http", 80, v1.ProtocolTCP), + []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)}) + +- // Define several unique ports for the Service + // Define multiple ports for the Service -+ servicePorts := []v1.ServicePort{ -+ { + servicePorts := []v1.ServicePort{ + { +- Name: "client", +- Protocol: v1.ProtocolTCP, +- Port: 10001, +- TargetPort: intstr.FromInt(30396), +- NodePort: 30396, +- AppProtocol: nil, + Name: "client", + Protocol: v1.ProtocolTCP, + Port: 10001, + TargetPort: intstr.FromInt(30396), + NodePort: 30396, -+ }, -+ { + }, + { +- Name: "dashboard", +- Protocol: v1.ProtocolTCP, +- Port: 8265, +- TargetPort: intstr.FromInt(31003), +- NodePort: 31003, +- AppProtocol: nil, + Name: "dashboard", + Protocol: v1.ProtocolTCP, + Port: 8265, + TargetPort: intstr.FromInt(31003), + NodePort: 31003, -+ }, -+ { + }, + { +- Name: "metrics", +- Protocol: v1.ProtocolTCP, +- Port: 8080, +- TargetPort: intstr.FromInt(30452), +- NodePort: 30452, +- AppProtocol: nil, + Name: "metrics", + Protocol: v1.ProtocolTCP, + Port: 8080, + TargetPort: intstr.FromInt(30452), + NodePort: 30452, -+ }, -+ } -+ -+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster", + }, + } + +- // Create a Service with the first port + createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster", +- servicePorts[0], +- v1.ServiceExternalTrafficPolicyLocal) + servicePorts[0], v1.ServiceExternalTrafficPolicyLocal) -+ -+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{}) -+ Expect(err).To(BeNil()) -+ -+ svc.Spec.Ports = servicePorts -+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) -+ Expect(err).To(BeNil()) -+ -+ var epsListMultiPort *discoveryv1.EndpointSliceList -+ -+ Eventually(func() (bool, error) { -+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{}) -+ if len(epsListMultiPort.Items) != 1 { -+ return false, err -+ } -+ -+ createdSlice := epsListMultiPort.Items[0] -+ expectedPortNames := []string{"client", "dashboard", "metrics"} -+ foundPortNames := []string{} -+ -+ for _, port := range createdSlice.Ports { -+ if port.Name != nil { -+ foundPortNames = append(foundPortNames, *port.Name) -+ } -+ } -+ -+ if len(foundPortNames) != len(expectedPortNames) { -+ return false, err -+ } -+ -+ portSet := sets.NewString(foundPortNames...) -+ expectedPortSet := sets.NewString(expectedPortNames...) -+ return portSet.Equal(expectedPortSet), err -+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates") -+ }) -+ + +- // Update the Service by adding the remaining ports + svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{}) + Expect(err).To(BeNil()) + + svc.Spec.Ports = servicePorts +- + _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + + var epsListMultiPort *discoveryv1.EndpointSliceList + +- // Verify that the EndpointSlice is created with correct unique ports + Eventually(func() (bool, error) { + epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{}) + if len(epsListMultiPort.Items) != 1 { +@@ -758,7 +750,6 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() { + } + } + +- // Verify that all expected ports are present and without duplicates + if len(foundPortNames) != len(expectedPortNames) { + return false, err + } +@@ -769,5 +760,156 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() { + }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates") + }) + + g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() { + createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89") + createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4, diff --git a/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/354.diff b/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/354.diff new file mode 100644 index 00000000..3410ea93 --- /dev/null +++ b/packages/apps/kubernetes/images/kubevirt-cloud-provider/patches/354.diff @@ -0,0 +1,142 @@ +diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go +index 53388eb8e..28644236f 100644 +--- a/pkg/controller/kubevirteps/kubevirteps_controller.go ++++ b/pkg/controller/kubevirteps/kubevirteps_controller.go +@@ -12,7 +12,6 @@ import ( + apiequality "k8s.io/apimachinery/pkg/api/equality" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +@@ -669,35 +668,50 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di + for _, slice := range tenantSlices { + for _, endpoint := range slice.Endpoints { + // find all unique nodes that correspond to an endpoint in a tenant slice ++ if endpoint.NodeName == nil { ++ klog.Warningf("Skipping endpoint without NodeName in slice %s/%s", slice.Namespace, slice.Name) ++ continue ++ } + nodeSet.Insert(*endpoint.NodeName) + } + } + +- klog.Infof("Desired nodes for service %s in namespace %s: %v", service.Name, service.Namespace, sets.List(nodeSet)) ++ klog.Infof("Desired nodes for service %s/%s: %v", service.Namespace, service.Name, sets.List(nodeSet)) + + for _, node := range sets.List(nodeSet) { + // find vmi for node name +- obj := &unstructured.Unstructured{} +- vmi := &kubevirtv1.VirtualMachineInstance{} +- +- obj, err := c.infraDynamic.Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")).Namespace(c.infraNamespace).Get(context.TODO(), node, metav1.GetOptions{}) ++ obj, err := c.infraDynamic. ++ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")). ++ Namespace(c.infraNamespace). ++ Get(context.TODO(), node, metav1.GetOptions{}) + if err != nil { +- klog.Errorf("Failed to get VirtualMachineInstance %s in namespace %s:%v", node, c.infraNamespace, err) ++ klog.Errorf("Failed to get VMI %q in namespace %q: %v", node, c.infraNamespace, err) + continue + } + ++ vmi := &kubevirtv1.VirtualMachineInstance{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, vmi) + if err != nil { + klog.Errorf("Failed to convert Unstructured to VirtualMachineInstance: %v", err) +- klog.Fatal(err) ++ continue + } + ++ if vmi.Status.NodeName == "" { ++ klog.Warningf("Skipping VMI %s/%s: NodeName is empty", vmi.Namespace, vmi.Name) ++ continue ++ } ++ nodeNamePtr := &vmi.Status.NodeName ++ + ready := vmi.Status.Phase == kubevirtv1.Running + serving := vmi.Status.Phase == kubevirtv1.Running + terminating := vmi.Status.Phase == kubevirtv1.Failed || vmi.Status.Phase == kubevirtv1.Succeeded + + for _, i := range vmi.Status.Interfaces { + if i.Name == "default" { ++ if i.IP == "" { ++ klog.Warningf("VMI %s/%s interface %q has no IP, skipping", vmi.Namespace, vmi.Name, i.Name) ++ continue ++ } + desiredEndpoints = append(desiredEndpoints, &discovery.Endpoint{ + Addresses: []string{i.IP}, + Conditions: discovery.EndpointConditions{ +@@ -705,9 +719,9 @@ func (c *Controller) getDesiredEndpoints(service *v1.Service, tenantSlices []*di + Serving: &serving, + Terminating: &terminating, + }, +- NodeName: &vmi.Status.NodeName, ++ NodeName: nodeNamePtr, + }) +- continue ++ break + } + } + } +diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go +index 1c97035b4..d205d0bed 100644 +--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go ++++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go +@@ -771,3 +771,55 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() { + + }) + }) ++ ++var _ = g.Describe("getDesiredEndpoints", func() { ++ g.It("should skip endpoints without NodeName and VMIs without NodeName or IP", func() { ++ // Setup controller ++ ctrl := setupTestKubevirtEPSController().controller ++ ++ // Manually inject dynamic client content (1 VMI with missing NodeName) ++ vmi := createUnstructuredVMINode("vmi-without-node", "", "10.0.0.1") // empty NodeName ++ _, err := ctrl.infraDynamic. ++ Resource(kubevirtv1.VirtualMachineInstanceGroupVersionKind.GroupVersion().WithResource("virtualmachineinstances")). ++ Namespace(infraNamespace). ++ Create(context.TODO(), vmi, metav1.CreateOptions{}) ++ Expect(err).To(BeNil()) ++ ++ // Create service ++ svc := createInfraServiceLB("test-svc", "test-svc", "test-cluster", ++ v1.ServicePort{ ++ Name: "http", ++ Port: 80, ++ TargetPort: intstr.FromInt(8080), ++ Protocol: v1.ProtocolTCP, ++ }, ++ v1.ServiceExternalTrafficPolicyLocal, ++ ) ++ ++ // One endpoint has nil NodeName, another is valid ++ nodeName := "vmi-without-node" ++ tenantSlice := &discoveryv1.EndpointSlice{ ++ ObjectMeta: metav1.ObjectMeta{ ++ Name: "slice", ++ Namespace: tenantNamespace, ++ Labels: map[string]string{ ++ discoveryv1.LabelServiceName: "test-svc", ++ }, ++ }, ++ AddressType: discoveryv1.AddressTypeIPv4, ++ Endpoints: []discoveryv1.Endpoint{ ++ { // should be skipped due to nil NodeName ++ Addresses: []string{"10.1.1.1"}, ++ NodeName: nil, ++ }, ++ { // will hit VMI without NodeName and also be skipped ++ Addresses: []string{"10.1.1.2"}, ++ NodeName: &nodeName, ++ }, ++ }, ++ } ++ ++ endpoints := ctrl.getDesiredEndpoints(svc, []*discoveryv1.EndpointSlice{tenantSlice}) ++ Expect(endpoints).To(HaveLen(0), "Expected no endpoints due to missing NodeName or IP") ++ }) ++}) diff --git a/packages/apps/versions_map b/packages/apps/versions_map index f40d6f20..3c191764 100644 --- a/packages/apps/versions_map +++ b/packages/apps/versions_map @@ -71,7 +71,8 @@ kubernetes 0.19.0 93bdf411 kubernetes 0.20.0 609e7ede kubernetes 0.20.1 f9f8bb2f kubernetes 0.21.0 6130f43d -kubernetes 0.23.1 HEAD +kubernetes 0.23.1 632224a3 +kubernetes 0.23.2 HEAD mysql 0.1.0 263e47be mysql 0.2.0 c24a103f mysql 0.3.0 53f2365e