mirror of
https://github.com/outbackdingo/cluster-api-provider-proxmox.git
synced 2026-01-27 10:18:38 +00:00
Add API spec for machine ip pool ref (#429)
* WIP: Add API spec for machine ip pool ref * Adjusting code to comply with the new API * Implement the logic for the IP Pool Ref for proxmoxmachines * Make Default network required when setting network and add tests
This commit is contained in:
committed by
GitHub
parent
5a50fbda68
commit
b6635a09fa
@@ -271,8 +271,8 @@ type NetworkSpec struct {
|
||||
VirtualNetworkDevices `json:",inline"`
|
||||
}
|
||||
|
||||
// InterfaceConfig contains all configurables a network interface can have.
|
||||
type InterfaceConfig struct {
|
||||
// IPPoolConfig defines the IPAM pool ref.
|
||||
type IPPoolConfig struct {
|
||||
// IPv4PoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses.
|
||||
// The network device will use an available IP address from the referenced pool.
|
||||
// This can be combined with `IPv6PoolRef` in order to enable dual stack.
|
||||
@@ -288,13 +288,10 @@ type InterfaceConfig struct {
|
||||
// +kubebuilder:validation:XValidation:rule="self.apiGroup == 'ipam.cluster.x-k8s.io'",message="ipv6PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io"
|
||||
// +kubebuilder:validation:XValidation:rule="self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'",message="ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool"
|
||||
IPv6PoolRef *corev1.TypedLocalObjectReference `json:"ipv6PoolRef,omitempty"`
|
||||
}
|
||||
|
||||
// DNSServers contains information about nameservers to be used for this interface.
|
||||
// If this field is not set, it will use the default dns servers from the ProxmoxCluster.
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
DNSServers []string `json:"dnsServers,omitempty"`
|
||||
|
||||
// InterfaceConfig contains all configurables a network interface can have.
|
||||
type InterfaceConfig struct {
|
||||
// Routing is the common spec of routes and routing policies to all interfaces and VRFs.
|
||||
Routing `json:",inline"`
|
||||
|
||||
@@ -404,6 +401,19 @@ type NetworkDevice struct {
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
// +kubebuilder:validation:Maximum=4094
|
||||
VLAN *uint16 `json:"vlan,omitempty"`
|
||||
|
||||
// DNSServers contains information about nameservers to be used for this interface.
|
||||
// If this field is not set, it will use the default dns servers from the ProxmoxCluster.
|
||||
// +optional
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
DNSServers []string `json:"dnsServers,omitempty"`
|
||||
|
||||
// IPPoolConfig defines config for IP Pool ref.
|
||||
// For default device 'net0' the IP pool is optional,
|
||||
// If not set, the default IPAM pool will be used.
|
||||
// For additional devices, the IP pool is required (IPV4/IPV6).
|
||||
// +optional
|
||||
IPPoolConfig `json:",inline"`
|
||||
}
|
||||
|
||||
// MTU is the network device Maximum Transmission Unit. MTUs below 1280 break IPv6.
|
||||
|
||||
@@ -179,15 +179,17 @@ var _ = Describe("ProxmoxMachine Test", func() {
|
||||
},
|
||||
AdditionalDevices: []AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: NetworkDevice{},
|
||||
Name: "net0",
|
||||
InterfaceConfig: InterfaceConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "some-pool",
|
||||
NetworkDevice: NetworkDevice{
|
||||
IPPoolConfig: IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "some-pool",
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "net0",
|
||||
InterfaceConfig: InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -200,14 +202,15 @@ var _ = Describe("ProxmoxMachine Test", func() {
|
||||
dm.Spec.Network = &NetworkSpec{
|
||||
AdditionalDevices: []AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: NetworkDevice{},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("apps"),
|
||||
Name: "some-app",
|
||||
NetworkDevice: NetworkDevice{
|
||||
IPPoolConfig: IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("apps"),
|
||||
Name: "some-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "net1",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -219,13 +222,17 @@ var _ = Describe("ProxmoxMachine Test", func() {
|
||||
dm.Spec.Network = &NetworkSpec{
|
||||
AdditionalDevices: []AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: NetworkDevice{},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "ConfigMap",
|
||||
Name: "some-app",
|
||||
}},
|
||||
NetworkDevice: NetworkDevice{
|
||||
IPPoolConfig: IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "ConfigMap",
|
||||
Name: "some-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -237,14 +244,16 @@ var _ = Describe("ProxmoxMachine Test", func() {
|
||||
dm.Spec.Network = &NetworkSpec{
|
||||
AdditionalDevices: []AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: NetworkDevice{},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("apps"),
|
||||
Name: "some-app",
|
||||
NetworkDevice: NetworkDevice{
|
||||
IPPoolConfig: IPPoolConfig{
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("apps"),
|
||||
Name: "some-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -256,15 +265,17 @@ var _ = Describe("ProxmoxMachine Test", func() {
|
||||
dm.Spec.Network = &NetworkSpec{
|
||||
AdditionalDevices: []AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: NetworkDevice{},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "ConfigMap",
|
||||
Name: "some-app",
|
||||
NetworkDevice: NetworkDevice{
|
||||
IPPoolConfig: IPPoolConfig{
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "ConfigMap",
|
||||
Name: "some-app",
|
||||
},
|
||||
},
|
||||
},
|
||||
Name: "net1",
|
||||
InterfaceConfig: InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func (in *IPConfigSpec) DeepCopy() *IPConfigSpec {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InterfaceConfig) DeepCopyInto(out *InterfaceConfig) {
|
||||
func (in *IPPoolConfig) DeepCopyInto(out *IPPoolConfig) {
|
||||
*out = *in
|
||||
if in.IPv4PoolRef != nil {
|
||||
in, out := &in.IPv4PoolRef, &out.IPv4PoolRef
|
||||
@@ -96,11 +96,21 @@ func (in *InterfaceConfig) DeepCopyInto(out *InterfaceConfig) {
|
||||
*out = new(v1.TypedLocalObjectReference)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DNSServers != nil {
|
||||
in, out := &in.DNSServers, &out.DNSServers
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolConfig.
|
||||
func (in *IPPoolConfig) DeepCopy() *IPPoolConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPPoolConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InterfaceConfig) DeepCopyInto(out *InterfaceConfig) {
|
||||
*out = *in
|
||||
in.Routing.DeepCopyInto(&out.Routing)
|
||||
if in.LinkMTU != nil {
|
||||
in, out := &in.LinkMTU, &out.LinkMTU
|
||||
@@ -152,6 +162,12 @@ func (in *NetworkDevice) DeepCopyInto(out *NetworkDevice) {
|
||||
*out = new(uint16)
|
||||
**out = **in
|
||||
}
|
||||
if in.DNSServers != nil {
|
||||
in, out := &in.DNSServers, &out.DNSServers
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.IPPoolConfig.DeepCopyInto(&out.IPPoolConfig)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDevice.
|
||||
|
||||
@@ -388,6 +388,80 @@ spec:
|
||||
to the machine.
|
||||
minLength: 1
|
||||
type: string
|
||||
dnsServers:
|
||||
description: |-
|
||||
DNSServers contains information about nameservers to be used for this interface.
|
||||
If this field is not set, it will use the default dns servers from the ProxmoxCluster.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
ipv4PoolRef:
|
||||
description: |-
|
||||
IPv4PoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
This can be combined with `IPv6PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being
|
||||
referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being
|
||||
referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv4PoolRef allows only IPAM apiGroup
|
||||
ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv4PoolRef allows either InClusterIPPool
|
||||
or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' || self.kind
|
||||
== 'GlobalInClusterIPPool'
|
||||
ipv6PoolRef:
|
||||
description: |-
|
||||
IPv6PoolRef is a reference to an IPAM pool resource, which exposes IPv6 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
this can be combined with `IPv4PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being
|
||||
referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being
|
||||
referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv6PoolRef allows only IPAM apiGroup
|
||||
ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv6PoolRef allows either InClusterIPPool
|
||||
or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' || self.kind
|
||||
== 'GlobalInClusterIPPool'
|
||||
model:
|
||||
default: virtio
|
||||
description: Model is the network device model.
|
||||
|
||||
@@ -418,6 +418,80 @@ spec:
|
||||
to attach to the machine.
|
||||
minLength: 1
|
||||
type: string
|
||||
dnsServers:
|
||||
description: |-
|
||||
DNSServers contains information about nameservers to be used for this interface.
|
||||
If this field is not set, it will use the default dns servers from the ProxmoxCluster.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
ipv4PoolRef:
|
||||
description: |-
|
||||
IPv4PoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
This can be combined with `IPv6PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource
|
||||
being referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource
|
||||
being referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv4PoolRef allows only IPAM
|
||||
apiGroup ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv4PoolRef allows either InClusterIPPool
|
||||
or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' ||
|
||||
self.kind == 'GlobalInClusterIPPool'
|
||||
ipv6PoolRef:
|
||||
description: |-
|
||||
IPv6PoolRef is a reference to an IPAM pool resource, which exposes IPv6 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
this can be combined with `IPv4PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource
|
||||
being referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource
|
||||
being referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv6PoolRef allows only IPAM
|
||||
apiGroup ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv6PoolRef allows either InClusterIPPool
|
||||
or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' ||
|
||||
self.kind == 'GlobalInClusterIPPool'
|
||||
model:
|
||||
default: virtio
|
||||
description: Model is the network device
|
||||
|
||||
@@ -361,6 +361,70 @@ spec:
|
||||
machine.
|
||||
minLength: 1
|
||||
type: string
|
||||
dnsServers:
|
||||
description: |-
|
||||
DNSServers contains information about nameservers to be used for this interface.
|
||||
If this field is not set, it will use the default dns servers from the ProxmoxCluster.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
ipv4PoolRef:
|
||||
description: |-
|
||||
IPv4PoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
This can be combined with `IPv6PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv4PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv4PoolRef allows either InClusterIPPool or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'
|
||||
ipv6PoolRef:
|
||||
description: |-
|
||||
IPv6PoolRef is a reference to an IPAM pool resource, which exposes IPv6 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
this can be combined with `IPv4PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv6PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv6PoolRef allows either InClusterIPPool or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' || self.kind == 'GlobalInClusterIPPool'
|
||||
model:
|
||||
default: virtio
|
||||
description: Model is the network device model.
|
||||
|
||||
@@ -388,6 +388,78 @@ spec:
|
||||
to the machine.
|
||||
minLength: 1
|
||||
type: string
|
||||
dnsServers:
|
||||
description: |-
|
||||
DNSServers contains information about nameservers to be used for this interface.
|
||||
If this field is not set, it will use the default dns servers from the ProxmoxCluster.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
ipv4PoolRef:
|
||||
description: |-
|
||||
IPv4PoolRef is a reference to an IPAM Pool resource, which exposes IPv4 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
This can be combined with `IPv6PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being
|
||||
referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being
|
||||
referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv4PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv4PoolRef allows either InClusterIPPool
|
||||
or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' || self.kind
|
||||
== 'GlobalInClusterIPPool'
|
||||
ipv6PoolRef:
|
||||
description: |-
|
||||
IPv6PoolRef is a reference to an IPAM pool resource, which exposes IPv6 addresses.
|
||||
The network device will use an available IP address from the referenced pool.
|
||||
this can be combined with `IPv4PoolRef` in order to enable dual stack.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: |-
|
||||
APIGroup is the group for the resource being referenced.
|
||||
If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
For any other third-party types, APIGroup is required.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind is the type of resource being
|
||||
referenced
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being
|
||||
referenced
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: ipv6PoolRef allows only IPAM apiGroup ipam.cluster.x-k8s.io
|
||||
rule: self.apiGroup == 'ipam.cluster.x-k8s.io'
|
||||
- message: ipv6PoolRef allows either InClusterIPPool
|
||||
or GlobalInClusterIPPool
|
||||
rule: self.kind == 'InClusterIPPool' || self.kind
|
||||
== 'GlobalInClusterIPPool'
|
||||
model:
|
||||
default: virtio
|
||||
description: Model is the network device model.
|
||||
|
||||
@@ -284,16 +284,21 @@ func getDefaultNetworkDevice(ctx context.Context, machineScope *scope.MachineSco
|
||||
var config types.NetworkConfigData
|
||||
|
||||
// default network device ipv4.
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil {
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil ||
|
||||
(machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef != nil) {
|
||||
conf, err := getNetworkConfigDataForDevice(ctx, machineScope, DefaultNetworkDeviceIPV4)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get network config data for device=%s", DefaultNetworkDeviceIPV4)
|
||||
}
|
||||
if machineScope.ProxmoxMachine.Spec.Network != nil && len(machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers) != 0 {
|
||||
config.DNSServers = machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers
|
||||
}
|
||||
config = *conf
|
||||
}
|
||||
|
||||
// default network device ipv6.
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil {
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil ||
|
||||
(machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef != nil) {
|
||||
conf, err := getNetworkConfigDataForDevice(ctx, machineScope, DefaultNetworkDeviceIPV6)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get network config data for device=%s", DefaultNetworkDeviceIPV6)
|
||||
@@ -309,6 +314,10 @@ func getDefaultNetworkDevice(ctx context.Context, machineScope *scope.MachineSco
|
||||
config.Gateway6 = conf.Gateway6
|
||||
config.Metric6 = conf.Metric6
|
||||
}
|
||||
|
||||
if machineScope.ProxmoxMachine.Spec.Network != nil && len(machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers) != 0 {
|
||||
config.DNSServers = machineScope.ProxmoxMachine.Spec.Network.Default.DNSServers
|
||||
}
|
||||
}
|
||||
|
||||
// Default Network Device lacks a datastructure to transport MTU.
|
||||
@@ -330,16 +339,16 @@ func getDefaultNetworkDevice(ctx context.Context, machineScope *scope.MachineSco
|
||||
return []types.NetworkConfigData{config}, nil
|
||||
}
|
||||
|
||||
func getCommonInterfaceConfig(ctx context.Context, machineScope *scope.MachineScope, ciconfig *types.NetworkConfigData, ifconfig infrav1alpha1.InterfaceConfig) error {
|
||||
if len(ifconfig.DNSServers) != 0 {
|
||||
ciconfig.DNSServers = ifconfig.DNSServers
|
||||
func getCommonInterfaceConfig(ctx context.Context, machineScope *scope.MachineScope, ciconfig *types.NetworkConfigData, nic infrav1alpha1.AdditionalNetworkDevice) error {
|
||||
if len(nic.DNSServers) != 0 {
|
||||
ciconfig.DNSServers = nic.DNSServers
|
||||
}
|
||||
ciconfig.Routes = *getRoutingData(ifconfig.Routing.Routes)
|
||||
ciconfig.FIBRules = *getRoutingPolicyData(ifconfig.Routing.RoutingPolicy)
|
||||
ciconfig.LinkMTU = ifconfig.LinkMTU
|
||||
ciconfig.Routes = *getRoutingData(nic.InterfaceConfig.Routing.Routes)
|
||||
ciconfig.FIBRules = *getRoutingPolicyData(nic.InterfaceConfig.Routing.RoutingPolicy)
|
||||
ciconfig.LinkMTU = nic.InterfaceConfig.LinkMTU
|
||||
|
||||
// Only set IPAddresses if they haven't been set yet
|
||||
if ippool := ifconfig.IPv4PoolRef; ippool != nil && ciconfig.IPAddress == "" {
|
||||
if ippool := nic.NetworkDevice.IPv4PoolRef; ippool != nil && ciconfig.IPAddress == "" {
|
||||
// retrieve IPAddress.
|
||||
var ifname = fmt.Sprintf("%s-%s", ciconfig.Name, infrav1alpha1.DefaultSuffix)
|
||||
ipAddr, err := findIPAddress(ctx, machineScope, ifname)
|
||||
@@ -355,7 +364,7 @@ func getCommonInterfaceConfig(ctx context.Context, machineScope *scope.MachineSc
|
||||
ciconfig.Gateway = ipAddr.Spec.Gateway
|
||||
ciconfig.Metric = metric
|
||||
}
|
||||
if ifconfig.IPv6PoolRef != nil && ciconfig.IPV6Address == "" {
|
||||
if nic.NetworkDevice.IPv6PoolRef != nil && ciconfig.IPV6Address == "" {
|
||||
var ifname = fmt.Sprintf("%s-%s", ciconfig.Name, infrav1alpha1.DefaultSuffix+"6")
|
||||
ipAddr, err := findIPAddress(ctx, machineScope, ifname)
|
||||
if err != nil {
|
||||
@@ -436,7 +445,7 @@ func getAdditionalNetworkDevices(ctx context.Context, machineScope *scope.Machin
|
||||
config.Gateway6 = conf.Gateway6
|
||||
}
|
||||
|
||||
err := getCommonInterfaceConfig(ctx, machineScope, config, nic.InterfaceConfig)
|
||||
err := getCommonInterfaceConfig(ctx, machineScope, config, nic)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get network config data for device=%s", nic.Name)
|
||||
}
|
||||
|
||||
@@ -84,13 +84,15 @@ func TestReconcileBootstrapData_NoNetworkConfig_UpdateStatus(t *testing.T) {
|
||||
func TestReconcileBootstrapData_UpdateStatus(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{
|
||||
Bridge: "vmbr0",
|
||||
Model: ptr.To("virtio"),
|
||||
},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{
|
||||
DNSServers: []string{"1.2.3.4"},
|
||||
},
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), DNSServers: []string{"1.2.3.4"}},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -209,21 +211,21 @@ func TestGetCommonInterfaceConfig_MissingIPPool(t *testing.T) {
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"), IPPoolConfig: infrav1alpha1.IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "net1-inet",
|
||||
},
|
||||
},
|
||||
}},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cfg := &types.NetworkConfigData{Name: "net1"}
|
||||
err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0].InterfaceConfig)
|
||||
err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0])
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -240,7 +242,7 @@ func TestGetCommonInterfaceConfig_NoIPAddresses(t *testing.T) {
|
||||
}
|
||||
|
||||
cfg := &types.NetworkConfigData{Name: "net1"}
|
||||
err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0].InterfaceConfig)
|
||||
err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -251,20 +253,22 @@ func TestGetCommonInterfaceConfig(t *testing.T) {
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")},
|
||||
Name: "net1",
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"),
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "net1-inet6",
|
||||
},
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "net1-inet",
|
||||
},
|
||||
},
|
||||
DNSServers: []string{"1.2.3.4"}},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{
|
||||
DNSServers: []string{"1.2.3.4"},
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "net1-inet6",
|
||||
},
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "net1-inet",
|
||||
},
|
||||
LinkMTU: &MTU,
|
||||
Routing: infrav1alpha1.Routing{
|
||||
Routes: []infrav1alpha1.RouteSpec{
|
||||
@@ -290,7 +294,7 @@ func TestGetCommonInterfaceConfig(t *testing.T) {
|
||||
createIP6AddressResource(t, kubeClient, machineScope, "net1", "2001:db8::9")
|
||||
|
||||
cfg := &types.NetworkConfigData{Name: "net1"}
|
||||
err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0].InterfaceConfig)
|
||||
err := getCommonInterfaceConfig(context.Background(), machineScope, cfg, machineScope.ProxmoxMachine.Spec.Network.AdditionalDevices[0])
|
||||
require.Equal(t, "10.0.0.10/24", cfg.IPAddress)
|
||||
require.Equal(t, "2001:db8::9/64", cfg.IPV6Address)
|
||||
require.Equal(t, "1.2.3.4", cfg.DNSServers[0])
|
||||
@@ -358,23 +362,28 @@ func TestReconcileBootstrapData_DualStack_AdditionalDevices(t *testing.T) {
|
||||
}
|
||||
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{
|
||||
Bridge: "vmbr0",
|
||||
Model: ptr.To("virtio"),
|
||||
},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{
|
||||
DNSServers: []string{"1.2.3.4"},
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "sample",
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"),
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{
|
||||
IPv6PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "sample",
|
||||
},
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "sample",
|
||||
},
|
||||
},
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "sample",
|
||||
},
|
||||
},
|
||||
DNSServers: []string{"1.2.3.4"}},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -405,6 +414,7 @@ func TestReconcileBootstrapData_VirtualDevices_VRF(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0", Model: ptr.To("virtio")},
|
||||
VirtualNetworkDevices: infrav1alpha1.VirtualNetworkDevices{
|
||||
VRFs: []infrav1alpha1.VRFDevice{{
|
||||
Interfaces: []string{"net1"},
|
||||
@@ -414,15 +424,17 @@ func TestReconcileBootstrapData_VirtualDevices_VRF(t *testing.T) {
|
||||
},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio")},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{Bridge: "vmbr1", Model: ptr.To("virtio"),
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "sample",
|
||||
}},
|
||||
DNSServers: []string{"1.2.3.4"},
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "sample",
|
||||
}},
|
||||
},
|
||||
Name: "net1",
|
||||
InterfaceConfig: infrav1alpha1.InterfaceConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -561,3 +573,38 @@ func TestIgnitionISOInjector(t *testing.T) {
|
||||
require.NotNil(t, injector.(*inject.ISOInjector).IgnitionEnricher)
|
||||
require.Equal(t, []byte("data"), injector.(*inject.ISOInjector).IgnitionEnricher.BootstrapData)
|
||||
}
|
||||
|
||||
func TestReconcileBootstrapData_DefaultDeviceIPPoolRef(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{
|
||||
Bridge: "vmbr0",
|
||||
Model: ptr.To("virtio"),
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
Name: "sample-shared-pool",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
vm := newVMWithNets("virtio=A6:23:64:4D:84:CB,bridge=vmbr0")
|
||||
vm.VirtualMachineConfig.SMBios1 = biosUUID
|
||||
machineScope.SetVirtualMachine(vm)
|
||||
machineScope.ProxmoxMachine.Status.IPAddresses = map[string]infrav1alpha1.IPAddress{infrav1alpha1.DefaultNetworkDevice: {IPV4: "10.5.10.10"}}
|
||||
createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.5.10.10")
|
||||
|
||||
createBootstrapSecret(t, kubeClient, machineScope, cloudinit.FormatCloudConfig)
|
||||
getISOInjector = func(_ *proxmox.VirtualMachine, _ []byte, _, _ cloudinit.Renderer) isoInjector {
|
||||
return FakeISOInjector{}
|
||||
}
|
||||
t.Cleanup(func() { getISOInjector = defaultISOInjector })
|
||||
|
||||
requeue, err := reconcileBootstrapData(context.Background(), machineScope)
|
||||
require.NoError(t, err)
|
||||
require.False(t, requeue)
|
||||
require.False(t, conditions.Has(machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition))
|
||||
require.True(t, *machineScope.ProxmoxMachine.Status.BootstrapDataProvided)
|
||||
}
|
||||
|
||||
@@ -140,8 +140,14 @@ func handleIPAddressForDevice(ctx context.Context, machineScope *scope.MachineSc
|
||||
|
||||
func handleDefaultDevice(ctx context.Context, machineScope *scope.MachineScope, addresses map[string]infrav1alpha1.IPAddress) (bool, error) {
|
||||
// default network device ipv4.
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil {
|
||||
ip, err := handleIPAddressForDevice(ctx, machineScope, infrav1alpha1.DefaultNetworkDevice, infrav1alpha1.IPV4Format, nil)
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv4Config != nil ||
|
||||
(machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef != nil) {
|
||||
var ipamRef *corev1.TypedLocalObjectReference
|
||||
if machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef != nil {
|
||||
ipamRef = machineScope.ProxmoxMachine.Spec.Network.Default.IPv4PoolRef
|
||||
}
|
||||
|
||||
ip, err := handleIPAddressForDevice(ctx, machineScope, infrav1alpha1.DefaultNetworkDevice, infrav1alpha1.IPV4Format, ipamRef)
|
||||
if err != nil || ip == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -151,8 +157,14 @@ func handleDefaultDevice(ctx context.Context, machineScope *scope.MachineScope,
|
||||
}
|
||||
|
||||
// default network device ipv6.
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil {
|
||||
ip, err := handleIPAddressForDevice(ctx, machineScope, infrav1alpha1.DefaultNetworkDevice, infrav1alpha1.IPV6Format, nil)
|
||||
if machineScope.InfraCluster.ProxmoxCluster.Spec.IPv6Config != nil ||
|
||||
(machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef != nil) {
|
||||
var ipamRef *corev1.TypedLocalObjectReference
|
||||
if machineScope.ProxmoxMachine.Spec.Network != nil && machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef != nil {
|
||||
ipamRef = machineScope.ProxmoxMachine.Spec.Network.Default.IPv6PoolRef
|
||||
}
|
||||
|
||||
ip, err := handleIPAddressForDevice(ctx, machineScope, infrav1alpha1.DefaultNetworkDevice, infrav1alpha1.IPV6Format, ipamRef)
|
||||
if err != nil || ip == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -184,9 +196,9 @@ func handleAdditionalDevices(ctx context.Context, machineScope *scope.MachineSco
|
||||
return true, errors.Wrapf(err, "unable to handle IPAddress for device %s", net.Name)
|
||||
}
|
||||
|
||||
addresses[net.Name] = infrav1alpha1.IPAddress{
|
||||
IPV6: ip,
|
||||
}
|
||||
addr := addresses[net.Name]
|
||||
addr.IPV6 = ip
|
||||
addresses[net.Name] = addr
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,9 @@ func TestReconcileIPAddresses_CreateDefaultClaim(t *testing.T) {
|
||||
func TestReconcileIPAddresses_CreateAdditionalClaim(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{Name: "net1", InterfaceConfig: infrav1alpha1.InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "InClusterIPPool", Name: "custom"}}},
|
||||
{Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "InClusterIPPool", Name: "custom"}}}},
|
||||
},
|
||||
}
|
||||
vm := newStoppedVM()
|
||||
@@ -74,8 +75,12 @@ func TestReconcileIPAddresses_AddIPTag(t *testing.T) {
|
||||
func TestReconcileIPAddresses_SetIPAddresses(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{Name: "net1", InterfaceConfig: infrav1alpha1.InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom"}}},
|
||||
{Name: "net1",
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom"}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
vm := newStoppedVM()
|
||||
@@ -94,9 +99,10 @@ func TestReconcileIPAddresses_SetIPAddresses(t *testing.T) {
|
||||
func TestReconcileIPAddresses_MultipleDevices(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{Name: "net1", InterfaceConfig: infrav1alpha1.InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "ipv4pool"}}},
|
||||
{Name: "net2", InterfaceConfig: infrav1alpha1.InterfaceConfig{IPv6PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "ipv6pool"}}},
|
||||
{Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "ipv4pool"}}}},
|
||||
{Name: "net2", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv6PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "ipv6pool"}}}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -132,8 +138,9 @@ func TestReconcileIPAddresses_IPV6(t *testing.T) {
|
||||
Gateway: "fe80::1",
|
||||
}
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{Bridge: "vmbr0"},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{Name: "net1", InterfaceConfig: infrav1alpha1.InterfaceConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom"}}},
|
||||
{Name: "net1", NetworkDevice: infrav1alpha1.NetworkDevice{IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom"}}}},
|
||||
},
|
||||
}
|
||||
vm := newStoppedVM()
|
||||
@@ -149,3 +156,29 @@ func TestReconcileIPAddresses_IPV6(t *testing.T) {
|
||||
require.True(t, requeue)
|
||||
requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)
|
||||
}
|
||||
|
||||
func TestReconcileIPAddresses_MachineIPPoolRef(t *testing.T) {
|
||||
machineScope, _, kubeClient := setupReconcilerTest(t)
|
||||
machineScope.ProxmoxMachine.Spec.Network = &infrav1alpha1.NetworkSpec{
|
||||
Default: &infrav1alpha1.NetworkDevice{
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom-ips"}},
|
||||
},
|
||||
AdditionalDevices: []infrav1alpha1.AdditionalNetworkDevice{
|
||||
{Name: "net1",
|
||||
NetworkDevice: infrav1alpha1.NetworkDevice{
|
||||
IPPoolConfig: infrav1alpha1.IPPoolConfig{IPv4PoolRef: &corev1.TypedLocalObjectReference{Kind: "GlobalInClusterIPPool", Name: "custom-additional-ips"}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
vm := newStoppedVM()
|
||||
vm.VirtualMachineConfig.Tags = ipTag
|
||||
machineScope.SetVirtualMachine(vm)
|
||||
createIP4AddressResource(t, kubeClient, machineScope, infrav1alpha1.DefaultNetworkDevice, "10.10.10.10")
|
||||
createIP4AddressResource(t, kubeClient, machineScope, "net1", "10.100.10.10")
|
||||
createIPPools(t, kubeClient, machineScope)
|
||||
|
||||
requeue, err := reconcileIPAddresses(context.Background(), machineScope)
|
||||
require.NoError(t, err)
|
||||
require.True(t, requeue)
|
||||
requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)
|
||||
}
|
||||
|
||||
@@ -99,6 +99,16 @@ func validateNetworks(machine *infrav1.ProxmoxMachine) error {
|
||||
|
||||
gk, name := machine.GroupVersionKind().GroupKind(), machine.GetName()
|
||||
|
||||
if machine.Spec.Network != nil && machine.Spec.Network.Default == nil {
|
||||
return apierrors.NewInvalid(
|
||||
gk,
|
||||
name,
|
||||
field.ErrorList{
|
||||
field.Invalid(
|
||||
field.NewPath("spec", "network", "default"), machine.Spec.Network.Default, "default network device must be set when setting network spec"),
|
||||
})
|
||||
}
|
||||
|
||||
if machine.Spec.Network.Default != nil {
|
||||
err := validateNetworkDeviceMTU(machine.Spec.Network.Default)
|
||||
if err != nil {
|
||||
@@ -113,7 +123,18 @@ func validateNetworks(machine *infrav1.ProxmoxMachine) error {
|
||||
}
|
||||
|
||||
for i := range machine.Spec.Network.AdditionalDevices {
|
||||
err := validateNetworkDeviceMTU(&machine.Spec.Network.AdditionalDevices[i].NetworkDevice)
|
||||
err := validateIPPoolRef(machine.Spec.Network.AdditionalDevices[i])
|
||||
if err != nil {
|
||||
return apierrors.NewInvalid(
|
||||
gk,
|
||||
name,
|
||||
field.ErrorList{
|
||||
field.Invalid(
|
||||
field.NewPath("spec", "network", "additionalDevices", fmt.Sprint(i), "IPPoolConfig"), machine.Spec.Network.AdditionalDevices[i], err.Error()),
|
||||
})
|
||||
}
|
||||
|
||||
err = validateNetworkDeviceMTU(&machine.Spec.Network.AdditionalDevices[i].NetworkDevice)
|
||||
if err != nil {
|
||||
return apierrors.NewInvalid(
|
||||
gk,
|
||||
@@ -214,3 +235,11 @@ func validateNetworkDeviceMTU(device *infrav1.NetworkDevice) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateIPPoolRef(net infrav1.AdditionalNetworkDevice) error {
|
||||
if net.IPv4PoolRef == nil && net.IPv6PoolRef == nil {
|
||||
return fmt.Errorf("at least one of IPv4PoolRef or IPv6PoolRef must be set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,9 +19,10 @@ package webhook
|
||||
import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -77,6 +78,28 @@ var _ = Describe("Controller Test", func() {
|
||||
machine.Spec.Network.AdditionalDevices[0].InterfaceConfig.Routing.RoutingPolicy[0].Table = nil
|
||||
g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("routing policy [0] requires a table")))
|
||||
})
|
||||
|
||||
It("should disallow machine with network spec but without Default device", func() {
|
||||
machine := validProxmoxMachine("test-machine")
|
||||
machine.Spec.Network = &infrav1.NetworkSpec{
|
||||
AdditionalDevices: []infrav1.AdditionalNetworkDevice{
|
||||
{
|
||||
Name: "net1",
|
||||
NetworkDevice: infrav1.NetworkDevice{
|
||||
Bridge: "vmbr2",
|
||||
IPPoolConfig: infrav1.IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "simple-pool",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
g.Expect(k8sClient.Create(testEnv.GetContext(), &machine)).To(MatchError(ContainSubstring("default network device must be set when setting network spec")))
|
||||
})
|
||||
})
|
||||
|
||||
Context("update proxmox cluster", func() {
|
||||
@@ -148,13 +171,15 @@ func validProxmoxMachine(name string) infrav1.ProxmoxMachine {
|
||||
Model: ptr.To("virtio"),
|
||||
MTU: ptr.To(uint16(1500)),
|
||||
VLAN: ptr.To(uint16(100)),
|
||||
IPPoolConfig: infrav1.IPPoolConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
Name: "simple-pool",
|
||||
Kind: "InClusterIPPool",
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
},
|
||||
},
|
||||
},
|
||||
InterfaceConfig: infrav1.InterfaceConfig{
|
||||
IPv4PoolRef: &corev1.TypedLocalObjectReference{
|
||||
Name: "simple-pool",
|
||||
Kind: "InClusterIPPool",
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
},
|
||||
Routing: infrav1.Routing{
|
||||
RoutingPolicy: []infrav1.RoutingPolicySpec{{
|
||||
Table: ptr.To(uint32(665)),
|
||||
|
||||
@@ -231,7 +231,7 @@ func (h *Helper) CreateIPAddressClaim(ctx context.Context, owner client.Object,
|
||||
}
|
||||
|
||||
switch {
|
||||
case device == infrav1.DefaultNetworkDevice:
|
||||
case device == infrav1.DefaultNetworkDevice && ref == nil:
|
||||
pool, err := h.GetDefaultInClusterIPPool(ctx, format)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to find inclusterpool for cluster %s", h.cluster.Name)
|
||||
|
||||
@@ -246,9 +246,7 @@ func (s *IPAMTestSuite) Test_GetIPPoolAnnotations() {
|
||||
Name: "test-cluster-v4-icip",
|
||||
}, &pool))
|
||||
|
||||
err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{
|
||||
Name: "test-cluster-icip",
|
||||
})
|
||||
err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", nil)
|
||||
s.NoError(err)
|
||||
|
||||
// create a dummy IPAddress.
|
||||
@@ -285,7 +283,7 @@ func (s *IPAMTestSuite) Test_GetIPPoolAnnotations() {
|
||||
}, &globalPool))
|
||||
|
||||
err = s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{
|
||||
Name: "test-ippool-anontations",
|
||||
Name: "test-ippool-annotations",
|
||||
Kind: "GlobalInClusterIPPool",
|
||||
APIGroup: ptr.To("ipam.cluster.x-k8s.io"),
|
||||
})
|
||||
@@ -430,7 +428,8 @@ func (s *IPAMTestSuite) Test_GetIPAddress() {
|
||||
}, &pool))
|
||||
|
||||
err := s.helper.CreateIPAddressClaim(s.ctx, getCluster(), "net0", infrav1.IPV4Format, "test-cluster", &corev1.TypedLocalObjectReference{
|
||||
Name: "test-cluster-icip",
|
||||
Kind: "InClusterIPPool",
|
||||
Name: "test-cluster-v4-icip",
|
||||
})
|
||||
s.NoError(err)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user