mirror of
https://github.com/outbackdingo/proxmox-cloud-controller-manager.git
synced 2026-01-27 02:20:02 +00:00
feat: use proxmox ha-group as zone name
This feature enables live migration without changing any Kubernetes labels. Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
This commit is contained in:
2
go.mod
2
go.mod
@@ -6,7 +6,7 @@ require (
|
||||
github.com/Telmate/proxmox-api-go v0.0.0-20250202141955-0f3daee49334
|
||||
github.com/jarcoal/httpmock v1.4.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/pflag v1.0.9
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.34.0
|
||||
|
||||
3
go.sum
3
go.sum
@@ -141,8 +141,9 @@ github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs=
|
||||
github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
||||
@@ -63,12 +63,23 @@ type NetworkOpts struct {
|
||||
Mode NetworkMode `yaml:"mode,omitempty"`
|
||||
}
|
||||
|
||||
// ClustersFeatures specifies the features for the cloud provider.
|
||||
type ClustersFeatures struct {
|
||||
// HAGroup specifies if the provider should use HA groups to determine node zone.
|
||||
// If enabled, the provider will use the HA group name as the zone name.
|
||||
// If disabled, the provider will use the node's cluster name as the zone name.
|
||||
// Default is false.
|
||||
HAGroup bool `yaml:"ha_group,omitempty"`
|
||||
// Provider specifies the provider to use. Can be 'default' or 'capmox'.
|
||||
// Default is 'default'.
|
||||
Provider Provider `yaml:"provider,omitempty"`
|
||||
// Network specifies the network options for the cloud provider.
|
||||
Network NetworkOpts `yaml:"network,omitempty"`
|
||||
}
|
||||
|
||||
// ClustersConfig is proxmox multi-cluster cloud config.
|
||||
type ClustersConfig struct {
|
||||
Features struct {
|
||||
Provider Provider `yaml:"provider,omitempty"`
|
||||
Network NetworkOpts `yaml:"network,omitempty"`
|
||||
} `yaml:"features,omitempty"`
|
||||
Features ClustersFeatures `yaml:"features,omitempty"`
|
||||
Clusters []*proxmoxpool.ProxmoxCluster `yaml:"clusters,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ func newCloud(config *ccmConfig.ClustersConfig) (cloudprovider.Interface, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instancesInterface := newInstances(client, config.Features.Provider, config.Features.Network)
|
||||
instancesInterface := newInstances(client, config.Features)
|
||||
|
||||
return &cloud{
|
||||
client: client,
|
||||
|
||||
@@ -55,44 +55,42 @@ type instanceInfo struct {
|
||||
}
|
||||
|
||||
type instances struct {
|
||||
c *client
|
||||
provider providerconfig.Provider
|
||||
networkOpts instanceNetops
|
||||
c *client
|
||||
zoneAsHAGroup bool
|
||||
provider providerconfig.Provider
|
||||
networkOpts instanceNetops
|
||||
}
|
||||
|
||||
var instanceTypeNameRegexp = regexp.MustCompile(`(^[a-zA-Z0-9_.-]+)$`)
|
||||
|
||||
func newInstances(
|
||||
client *client,
|
||||
provider providerconfig.Provider,
|
||||
networkOpts providerconfig.NetworkOpts,
|
||||
) *instances {
|
||||
externalIPCIDRs := ParseCIDRList(networkOpts.ExternalIPCIDRS)
|
||||
if len(networkOpts.ExternalIPCIDRS) > 0 && len(externalIPCIDRs) == 0 {
|
||||
klog.Warningf("Failed to parse external CIDRs: %v", networkOpts.ExternalIPCIDRS)
|
||||
func newInstances(client *client, features providerconfig.ClustersFeatures) *instances {
|
||||
externalIPCIDRs := ParseCIDRList(features.Network.ExternalIPCIDRS)
|
||||
if len(features.Network.ExternalIPCIDRS) > 0 && len(externalIPCIDRs) == 0 {
|
||||
klog.Warningf("Failed to parse external CIDRs: %v", features.Network.ExternalIPCIDRS)
|
||||
}
|
||||
|
||||
sortOrderCIDRs, ignoredCIDRs, err := ParseCIDRRuleset(networkOpts.IPSortOrder)
|
||||
sortOrderCIDRs, ignoredCIDRs, err := ParseCIDRRuleset(features.Network.IPSortOrder)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse sort order CIDRs: %v", err)
|
||||
}
|
||||
|
||||
if len(networkOpts.IPSortOrder) > 0 && (len(sortOrderCIDRs)+len(ignoredCIDRs)) == 0 {
|
||||
klog.Warningf("Failed to parse sort order CIDRs: %v", networkOpts.IPSortOrder)
|
||||
if len(features.Network.IPSortOrder) > 0 && (len(sortOrderCIDRs)+len(ignoredCIDRs)) == 0 {
|
||||
klog.Warningf("Failed to parse sort order CIDRs: %v", features.Network.IPSortOrder)
|
||||
}
|
||||
|
||||
netOps := instanceNetops{
|
||||
ExternalCIDRs: externalIPCIDRs,
|
||||
SortOrder: sortOrderCIDRs,
|
||||
IgnoredCIDRs: ignoredCIDRs,
|
||||
Mode: networkOpts.Mode,
|
||||
IPv6SupportDisabled: networkOpts.IPv6SupportDisabled,
|
||||
Mode: features.Network.Mode,
|
||||
IPv6SupportDisabled: features.Network.IPv6SupportDisabled,
|
||||
}
|
||||
|
||||
return &instances{
|
||||
c: client,
|
||||
provider: provider,
|
||||
networkOpts: netOps,
|
||||
c: client,
|
||||
zoneAsHAGroup: features.HAGroup,
|
||||
provider: features.Provider,
|
||||
networkOpts: netOps,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,12 +222,6 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud
|
||||
}
|
||||
}
|
||||
|
||||
if len(additionalLabels) > 0 && !hasUninitializedTaint(node) {
|
||||
if err := syncNodeLabels(i.c, node, additionalLabels); err != nil {
|
||||
klog.ErrorS(err, "error updating labels for the node", "node", klog.KRef("", node.Name))
|
||||
}
|
||||
}
|
||||
|
||||
metadata := &cloudprovider.InstanceMetadata{
|
||||
ProviderID: providerID,
|
||||
NodeAddresses: i.addresses(ctx, node, info),
|
||||
@@ -239,6 +231,24 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud
|
||||
AdditionalLabels: additionalLabels,
|
||||
}
|
||||
|
||||
if i.zoneAsHAGroup {
|
||||
haGroup, err := i.c.pxpool.GetNodeGroup(ctx, info.Region, info.Node)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "instances.InstanceMetadata() failed to get HA group for the node", "node", klog.KRef("", node.Name), "region", info.Region)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metadata.Zone = haGroup
|
||||
additionalLabels[LabelTopologyHAGroup] = haGroup
|
||||
}
|
||||
|
||||
if len(additionalLabels) > 0 && !hasUninitializedTaint(node) {
|
||||
if err := syncNodeLabels(i.c, node, additionalLabels); err != nil {
|
||||
klog.ErrorS(err, "error updating labels for the node", "node", klog.KRef("", node.Name))
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(5).InfoS("instances.InstanceMetadata()", "info", info, "metadata", metadata)
|
||||
|
||||
return metadata, nil
|
||||
|
||||
@@ -179,7 +179,12 @@ clusters:
|
||||
kclient: fake.NewSimpleClientset(),
|
||||
}
|
||||
|
||||
ts.i = newInstances(client, providerconfig.ProviderDefault, providerconfig.NetworkOpts{})
|
||||
features := providerconfig.ClustersFeatures{
|
||||
Provider: providerconfig.ProviderDefault,
|
||||
Network: providerconfig.NetworkOpts{},
|
||||
}
|
||||
|
||||
ts.i = newInstances(client, features)
|
||||
}
|
||||
|
||||
func (ts *ccmTestSuite) TearDownTest() {
|
||||
|
||||
@@ -143,12 +143,7 @@ func (c *ProxmoxPool) GetProxmoxCluster(region string) (*proxmox.Client, error)
|
||||
|
||||
// GetNodeGroup returns a Proxmox node ha-group in a given region.
|
||||
func (c *ProxmoxPool) GetNodeGroup(ctx context.Context, region string, node string) (string, error) {
|
||||
px, err := c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
haGroups, err := px.GetHAGroupList(ctx)
|
||||
haGroups, err := c.GetHAGroupList(ctx, region)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error get ha-groups %v", err)
|
||||
}
|
||||
@@ -168,6 +163,44 @@ func (c *ProxmoxPool) GetNodeGroup(ctx context.Context, region string, node stri
|
||||
return "", ErrHAGroupNotFound
|
||||
}
|
||||
|
||||
// GetHAGroupList returns a list of Proxmox ha-groups in a given region.
|
||||
func (c *ProxmoxPool) GetHAGroupList(ctx context.Context, region string) (haGroups []proxmox.HAGroup, err error) {
|
||||
px, err := c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list, err := px.GetItemList(ctx, "/cluster/ha/groups")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
haGroups = []proxmox.HAGroup{}
|
||||
|
||||
items, ok := list["data"].([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast response to list of HA groups in region %s, error: %v", region, err)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
itemMap := item.(map[string]interface{})
|
||||
|
||||
if itemMap["type"].(string) != "group" {
|
||||
continue
|
||||
}
|
||||
|
||||
haGroups = append(haGroups, proxmox.HAGroup{
|
||||
Group: itemMap["group"].(string),
|
||||
Nodes: strings.Split(itemMap["nodes"].(string), ","),
|
||||
NoFailback: itemMap["nofailback"].(float64) == 1,
|
||||
Restricted: itemMap["restricted"].(float64) == 1,
|
||||
Type: itemMap["type"].(string),
|
||||
})
|
||||
}
|
||||
|
||||
return haGroups, nil
|
||||
}
|
||||
|
||||
// FindVMByNode find a VM by kubernetes node resource in all Proxmox clusters.
|
||||
func (c *ProxmoxPool) FindVMByNode(ctx context.Context, node *v1.Node) (*proxmox.VmRef, string, error) {
|
||||
for region, px := range c.clients {
|
||||
|
||||
Reference in New Issue
Block a user