mirror of
https://github.com/outbackdingo/proxmox-cloud-controller-manager.git
synced 2026-01-27 02:20:02 +00:00
refactor: split cloud config module
We will split the cloud configuration into two parts: the original cloud controller configuration and a separate function for working with multiple Proxmox clusters. Signed-off-by: Daniel J. Holmes (jaitaiwan) <dan@jaitaiwan.dev> Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
# syntax = docker/dockerfile:1.16
|
||||
########################################
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.24.4-alpine AS builder
|
||||
FROM --platform=${BUILDPLATFORM} golang:1.24.5-alpine AS builder
|
||||
RUN apk update && apk add --no-cache make
|
||||
ENV GO111MODULE=on
|
||||
WORKDIR /src
|
||||
|
||||
5
Makefile
5
Makefile
@@ -77,6 +77,11 @@ run: build ## Run
|
||||
lint: ## Lint Code
|
||||
golangci-lint run --config .golangci.yml
|
||||
|
||||
.PHONY: lint-fix
|
||||
lint-fix: ## Fix Lint Issues
|
||||
golangci-lint run --fix --config .golangci.yml
|
||||
|
||||
|
||||
.PHONY: unit
|
||||
unit: ## Unit Tests
|
||||
go test -tags=unit $(shell go list ./...) $(TESTARGS)
|
||||
|
||||
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster
|
||||
// Package config is the configuration for the cloud provider.
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -24,6 +25,8 @@ import (
|
||||
"strings"
|
||||
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
)
|
||||
|
||||
// Provider specifies the provider. Can be 'default' or 'capmox'
|
||||
@@ -40,15 +43,7 @@ type ClustersConfig struct {
|
||||
Features struct {
|
||||
Provider Provider `yaml:"provider,omitempty"`
|
||||
} `yaml:"features,omitempty"`
|
||||
Clusters []struct {
|
||||
URL string `yaml:"url"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
TokenID string `yaml:"token_id,omitempty"`
|
||||
TokenSecret string `yaml:"token_secret,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Region string `yaml:"region,omitempty"`
|
||||
} `yaml:"clusters,omitempty"`
|
||||
Clusters []*pxpool.ProxmoxCluster `yaml:"clusters,omitempty"`
|
||||
}
|
||||
|
||||
// ReadCloudConfig reads cloud config from a reader.
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster_test
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@@ -22,23 +22,23 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
)
|
||||
|
||||
func TestReadCloudConfig(t *testing.T) {
|
||||
cfg, err := cluster.ReadCloudConfig(nil)
|
||||
cfg, err := ccmConfig.ReadCloudConfig(nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Empty config
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
`))
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Wrong config
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
test: false
|
||||
`))
|
||||
@@ -47,7 +47,7 @@ clusters:
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Non full config
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: abcd
|
||||
region: cluster-1
|
||||
@@ -57,7 +57,7 @@ clusters:
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Valid config with one cluster
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://example.com
|
||||
insecure: false
|
||||
@@ -70,7 +70,7 @@ clusters:
|
||||
assert.Equal(t, 1, len(cfg.Clusters))
|
||||
|
||||
// Valid config with one cluster (username/password), implicit default provider
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://example.com
|
||||
insecure: false
|
||||
@@ -81,10 +81,10 @@ clusters:
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
assert.Equal(t, 1, len(cfg.Clusters))
|
||||
assert.Equal(t, cluster.ProviderDefault, cfg.Features.Provider)
|
||||
assert.Equal(t, ccmConfig.ProviderDefault, cfg.Features.Provider)
|
||||
|
||||
// Valid config with one cluster (username/password), explicit provider default
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
features:
|
||||
provider: 'default'
|
||||
clusters:
|
||||
@@ -97,10 +97,10 @@ clusters:
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
assert.Equal(t, 1, len(cfg.Clusters))
|
||||
assert.Equal(t, cluster.ProviderDefault, cfg.Features.Provider)
|
||||
assert.Equal(t, ccmConfig.ProviderDefault, cfg.Features.Provider)
|
||||
|
||||
// Valid config with one cluster (username/password), explicit provider capmox
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
features:
|
||||
provider: 'capmox'
|
||||
clusters:
|
||||
@@ -113,16 +113,16 @@ clusters:
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
assert.Equal(t, 1, len(cfg.Clusters))
|
||||
assert.Equal(t, cluster.ProviderCapmox, cfg.Features.Provider)
|
||||
assert.Equal(t, ccmConfig.ProviderCapmox, cfg.Features.Provider)
|
||||
}
|
||||
|
||||
func TestReadCloudConfigFromFile(t *testing.T) {
|
||||
cfg, err := cluster.ReadCloudConfigFromFile("testdata/cloud-config.yaml")
|
||||
cfg, err := ccmConfig.ReadCloudConfigFromFile("testdata/cloud-config.yaml")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "error reading testdata/cloud-config.yaml: open testdata/cloud-config.yaml: no such file or directory")
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
cfg, err = cluster.ReadCloudConfigFromFile("../../hack/proxmox-config.yaml")
|
||||
cfg, err = ccmConfig.ReadCloudConfigFromFile("../../hack/proxmox-config.yaml")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
assert.Equal(t, 2, len(cfg.Clusters))
|
||||
@@ -21,8 +21,9 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
|
||||
clientkubernetes "k8s.io/client-go/kubernetes"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
@@ -38,7 +39,7 @@ const (
|
||||
)
|
||||
|
||||
type cloud struct {
|
||||
client *cluster.Cluster
|
||||
client *pxpool.ProxmoxPool
|
||||
kclient clientkubernetes.Interface
|
||||
instancesV2 cloudprovider.InstancesV2
|
||||
|
||||
@@ -48,7 +49,7 @@ type cloud struct {
|
||||
|
||||
func init() {
|
||||
cloudprovider.RegisterCloudProvider(provider.ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||
cfg, err := cluster.ReadCloudConfig(config)
|
||||
cfg, err := ccmConfig.ReadCloudConfig(config)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to read config")
|
||||
|
||||
@@ -59,8 +60,8 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func newCloud(config *cluster.ClustersConfig) (cloudprovider.Interface, error) {
|
||||
client, err := cluster.NewCluster(config, nil)
|
||||
func newCloud(config *ccmConfig.ClustersConfig) (cloudprovider.Interface, error) {
|
||||
client, err := pxpool.NewProxmoxPool(config.Clusters, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -22,19 +22,19 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||
)
|
||||
|
||||
func TestNewCloudError(t *testing.T) {
|
||||
cloud, err := newCloud(&cluster.ClustersConfig{})
|
||||
cloud, err := newCloud(&ccmConfig.ClustersConfig{})
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, cloud)
|
||||
assert.EqualError(t, err, "no Proxmox clusters found")
|
||||
}
|
||||
|
||||
func TestCloud(t *testing.T) {
|
||||
cfg, err := cluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err := ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://example.com
|
||||
insecure: false
|
||||
|
||||
@@ -25,9 +25,10 @@ import (
|
||||
|
||||
pxapi "github.com/Telmate/proxmox-api-go/proxmox"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
metrics "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/metrics"
|
||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
@@ -36,13 +37,13 @@ import (
|
||||
)
|
||||
|
||||
type instances struct {
|
||||
c *cluster.Cluster
|
||||
provider cluster.Provider
|
||||
c *pxpool.ProxmoxPool
|
||||
provider ccmConfig.Provider
|
||||
}
|
||||
|
||||
var instanceTypeNameRegexp = regexp.MustCompile(`(^[a-zA-Z0-9_.-]+)$`)
|
||||
|
||||
func newInstances(client *cluster.Cluster, provider cluster.Provider) *instances {
|
||||
func newInstances(client *pxpool.ProxmoxPool, provider ccmConfig.Provider) *instances {
|
||||
return &instances{
|
||||
c: client,
|
||||
provider: provider,
|
||||
@@ -156,7 +157,7 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud
|
||||
}
|
||||
}
|
||||
|
||||
if i.provider == cluster.ProviderCapmox {
|
||||
if i.provider == ccmConfig.ProviderCapmox {
|
||||
providerID = provider.GetProviderIDFromUUID(uuid)
|
||||
} else {
|
||||
providerID = provider.GetProviderID(region, vmRef)
|
||||
@@ -209,7 +210,7 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud
|
||||
func (i *instances) getInstance(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||
klog.V(4).InfoS("instances.getInstance() called", "node", klog.KRef("", node.Name), "provider", i.provider)
|
||||
|
||||
if i.provider == cluster.ProviderCapmox {
|
||||
if i.provider == ccmConfig.ProviderCapmox {
|
||||
uuid := node.Status.NodeInfo.SystemUUID
|
||||
|
||||
vmRef, region, err := i.c.FindVMByUUID(ctx, uuid)
|
||||
|
||||
@@ -28,8 +28,9 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
proxmoxcluster "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -44,7 +45,7 @@ type ccmTestSuite struct {
|
||||
}
|
||||
|
||||
func (ts *ccmTestSuite) SetupTest() {
|
||||
cfg, err := proxmoxcluster.ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err := ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://127.0.0.1:8006/api2/json
|
||||
insecure: false
|
||||
@@ -171,12 +172,12 @@ clusters:
|
||||
},
|
||||
)
|
||||
|
||||
cluster, err := proxmoxcluster.NewCluster(&cfg, &http.Client{})
|
||||
cluster, err := pxpool.NewProxmoxPool(cfg.Clusters, &http.Client{})
|
||||
if err != nil {
|
||||
ts.T().Fatalf("failed to create cluster client: %v", err)
|
||||
}
|
||||
|
||||
ts.i = newInstances(cluster, proxmoxcluster.ProviderDefault)
|
||||
ts.i = newInstances(cluster, ccmConfig.ProviderDefault)
|
||||
}
|
||||
|
||||
func (ts *ccmTestSuite) TearDownTest() {
|
||||
|
||||
@@ -14,8 +14,8 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cluster implements the multi-cloud provider interface for Proxmox.
|
||||
package cluster
|
||||
// Package proxmoxpool provides a pool of Telmate/proxmox-api-go/proxmox clients
|
||||
package proxmoxpool
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -33,43 +33,52 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Cluster is a Proxmox client.
|
||||
type Cluster struct {
|
||||
config *ClustersConfig
|
||||
proxmox map[string]*pxapi.Client
|
||||
// ProxmoxCluster defines a Proxmox cluster configuration.
|
||||
type ProxmoxCluster struct {
|
||||
URL string `yaml:"url"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
TokenID string `yaml:"token_id,omitempty"`
|
||||
TokenSecret string `yaml:"token_secret,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Region string `yaml:"region,omitempty"`
|
||||
}
|
||||
|
||||
// NewCluster creates a new Proxmox cluster client.
|
||||
func NewCluster(config *ClustersConfig, hclient *http.Client) (*Cluster, error) {
|
||||
clusters := len(config.Clusters)
|
||||
// ProxmoxPool is a Proxmox client.
|
||||
type ProxmoxPool struct {
|
||||
clients map[string]*pxapi.Client
|
||||
}
|
||||
|
||||
// NewProxmoxPool creates a new Proxmox cluster client.
|
||||
func NewProxmoxPool(config []*ProxmoxCluster, hClient *http.Client) (*ProxmoxPool, error) {
|
||||
clusters := len(config)
|
||||
if clusters > 0 {
|
||||
proxmox := make(map[string]*pxapi.Client, clusters)
|
||||
|
||||
for _, cfg := range config.Clusters {
|
||||
for _, cfg := range config {
|
||||
tlsconf := &tls.Config{InsecureSkipVerify: true}
|
||||
if !cfg.Insecure {
|
||||
tlsconf = nil
|
||||
}
|
||||
|
||||
client, err := pxapi.NewClient(cfg.URL, hclient, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
||||
pClient, err := pxapi.NewClient(cfg.URL, hClient, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
if err := client.Login(context.Background(), cfg.Username, cfg.Password, ""); err != nil {
|
||||
if err := pClient.Login(context.Background(), cfg.Username, cfg.Password, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
client.SetAPIToken(cfg.TokenID, cfg.TokenSecret)
|
||||
pClient.SetAPIToken(cfg.TokenID, cfg.TokenSecret)
|
||||
}
|
||||
|
||||
proxmox[cfg.Region] = client
|
||||
proxmox[cfg.Region] = pClient
|
||||
}
|
||||
|
||||
return &Cluster{
|
||||
config: config,
|
||||
proxmox: proxmox,
|
||||
return &ProxmoxPool{
|
||||
clients: proxmox,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -77,13 +86,13 @@ func NewCluster(config *ClustersConfig, hclient *http.Client) (*Cluster, error)
|
||||
}
|
||||
|
||||
// CheckClusters checks if the Proxmox connection is working.
|
||||
func (c *Cluster) CheckClusters(ctx context.Context) error {
|
||||
for region, client := range c.proxmox {
|
||||
if _, err := client.GetVersion(ctx); err != nil {
|
||||
func (c *ProxmoxPool) CheckClusters(ctx context.Context) error {
|
||||
for region, pClient := range c.clients {
|
||||
if _, err := pClient.GetVersion(ctx); err != nil {
|
||||
return fmt.Errorf("failed to initialized proxmox client in region %s, error: %v", region, err)
|
||||
}
|
||||
|
||||
vmlist, err := client.GetVmList(ctx)
|
||||
vmlist, err := pClient.GetVmList(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of VMs in region %s, error: %v", region, err)
|
||||
}
|
||||
@@ -104,17 +113,17 @@ func (c *Cluster) CheckClusters(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// GetProxmoxCluster returns a Proxmox cluster client in a given region.
|
||||
func (c *Cluster) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
||||
if c.proxmox[region] != nil {
|
||||
return c.proxmox[region], nil
|
||||
func (c *ProxmoxPool) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
||||
if c.clients[region] != nil {
|
||||
return c.clients[region], nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("proxmox cluster %s not found", region)
|
||||
}
|
||||
|
||||
// FindVMByNode find a VM by kubernetes node resource in all Proxmox clusters.
|
||||
func (c *Cluster) FindVMByNode(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.proxmox {
|
||||
func (c *ProxmoxPool) FindVMByNode(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.clients {
|
||||
vmrs, err := px.GetVmRefsByName(ctx, node.Name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
@@ -140,8 +149,8 @@ func (c *Cluster) FindVMByNode(ctx context.Context, node *v1.Node) (*pxapi.VmRef
|
||||
}
|
||||
|
||||
// FindVMByName find a VM by name in all Proxmox clusters.
|
||||
func (c *Cluster) FindVMByName(ctx context.Context, name string) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.proxmox {
|
||||
func (c *ProxmoxPool) FindVMByName(ctx context.Context, name string) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.clients {
|
||||
vmr, err := px.GetVmRefByName(ctx, name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
@@ -158,8 +167,8 @@ func (c *Cluster) FindVMByName(ctx context.Context, name string) (*pxapi.VmRef,
|
||||
}
|
||||
|
||||
// FindVMByUUID find a VM by uuid in all Proxmox clusters.
|
||||
func (c *Cluster) FindVMByUUID(ctx context.Context, uuid string) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.proxmox {
|
||||
func (c *ProxmoxPool) FindVMByUUID(ctx context.Context, uuid string) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.clients {
|
||||
vms, err := px.GetResourceList(ctx, "vm")
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error get resources %v", err)
|
||||
@@ -196,7 +205,7 @@ func (c *Cluster) FindVMByUUID(ctx context.Context, uuid string) (*pxapi.VmRef,
|
||||
}
|
||||
|
||||
// GetVMName returns the VM name.
|
||||
func (c *Cluster) GetVMName(vmInfo map[string]interface{}) string {
|
||||
func (c *ProxmoxPool) GetVMName(vmInfo map[string]interface{}) string {
|
||||
if vmInfo["name"] != nil {
|
||||
return vmInfo["name"].(string) //nolint:errcheck
|
||||
}
|
||||
@@ -205,7 +214,7 @@ func (c *Cluster) GetVMName(vmInfo map[string]interface{}) string {
|
||||
}
|
||||
|
||||
// GetVMUUID returns the VM UUID.
|
||||
func (c *Cluster) GetVMUUID(vmInfo map[string]interface{}) string {
|
||||
func (c *ProxmoxPool) GetVMUUID(vmInfo map[string]interface{}) string {
|
||||
if vmInfo["smbios1"] != nil {
|
||||
return c.getSMBSetting(vmInfo, "uuid")
|
||||
}
|
||||
@@ -214,7 +223,7 @@ func (c *Cluster) GetVMUUID(vmInfo map[string]interface{}) string {
|
||||
}
|
||||
|
||||
// GetVMSKU returns the VM instance type name.
|
||||
func (c *Cluster) GetVMSKU(vmInfo map[string]interface{}) string {
|
||||
func (c *ProxmoxPool) GetVMSKU(vmInfo map[string]interface{}) string {
|
||||
if vmInfo["smbios1"] != nil {
|
||||
return c.getSMBSetting(vmInfo, "sku")
|
||||
}
|
||||
@@ -222,7 +231,7 @@ func (c *Cluster) GetVMSKU(vmInfo map[string]interface{}) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Cluster) getSMBSetting(vmInfo map[string]interface{}, name string) string {
|
||||
func (c *ProxmoxPool) getSMBSetting(vmInfo map[string]interface{}, name string) string {
|
||||
smbios, ok := vmInfo["smbios1"].(string)
|
||||
if !ok {
|
||||
return ""
|
||||
@@ -14,78 +14,78 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster_test
|
||||
package proxmoxpool_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
)
|
||||
|
||||
func newClusterEnv() (*cluster.ClustersConfig, error) {
|
||||
cfg, err := cluster.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://127.0.0.1:8006/api2/json
|
||||
insecure: false
|
||||
token_id: "user!token-id"
|
||||
token_secret: "secret"
|
||||
region: cluster-1
|
||||
- url: https://127.0.0.2:8006/api2/json
|
||||
insecure: false
|
||||
token_id: "user!token-id"
|
||||
token_secret: "secret"
|
||||
region: cluster-2
|
||||
`))
|
||||
func newClusterEnv() []*pxpool.ProxmoxCluster {
|
||||
// copilot convert the cfg call to an array of []*proxmox_pool.ProxmoxCluster:
|
||||
cfg := []*pxpool.ProxmoxCluster{
|
||||
{
|
||||
URL: "https://127.0.0.1:8006/api2/json",
|
||||
Insecure: false,
|
||||
TokenID: "user!token-id",
|
||||
TokenSecret: "secret",
|
||||
Region: "cluster-1",
|
||||
},
|
||||
{
|
||||
URL: "https://127.0.0.2:8006/api2/json",
|
||||
Insecure: false,
|
||||
TokenID: "user!token-id",
|
||||
TokenSecret: "secret",
|
||||
Region: "cluster-2",
|
||||
},
|
||||
}
|
||||
|
||||
return &cfg, err
|
||||
return cfg
|
||||
}
|
||||
|
||||
func TestNewClient(t *testing.T) {
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
client, err := cluster.NewCluster(&cluster.ClustersConfig{}, nil)
|
||||
pClient, err := pxpool.NewProxmoxPool([]*pxpool.ProxmoxCluster{}, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, client)
|
||||
assert.Nil(t, pClient)
|
||||
|
||||
client, err = cluster.NewCluster(cfg, nil)
|
||||
pClient, err = pxpool.NewProxmoxPool(cfg, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, pClient)
|
||||
}
|
||||
|
||||
func TestCheckClusters(t *testing.T) {
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
client, err := cluster.NewCluster(cfg, nil)
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, pClient)
|
||||
|
||||
pxapi, err := client.GetProxmoxCluster("test")
|
||||
pxapi, err := pClient.GetProxmoxCluster("test")
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, pxapi)
|
||||
assert.Equal(t, "proxmox cluster test not found", err.Error())
|
||||
|
||||
pxapi, err = client.GetProxmoxCluster("cluster-1")
|
||||
pxapi, err = pClient.GetProxmoxCluster("cluster-1")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pxapi)
|
||||
|
||||
err = client.CheckClusters(t.Context())
|
||||
err = pClient.CheckClusters(t.Context())
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to initialized proxmox client in region")
|
||||
}
|
||||
|
||||
func TestFindVMByNameNonExist(t *testing.T) {
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
httpmock.Activate()
|
||||
@@ -121,11 +121,11 @@ func TestFindVMByNameNonExist(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
client, err := cluster.NewCluster(cfg, &http.Client{})
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, &http.Client{})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, pClient)
|
||||
|
||||
vmr, cluster, err := client.FindVMByName(t.Context(), "non-existing-vm")
|
||||
vmr, cluster, err := pClient.FindVMByName(t.Context(), "non-existing-vm")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "", cluster)
|
||||
assert.Nil(t, vmr)
|
||||
@@ -133,8 +133,7 @@ func TestFindVMByNameNonExist(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindVMByNameExist(t *testing.T) {
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
httpmock.Activate()
|
||||
@@ -168,9 +167,9 @@ func TestFindVMByNameExist(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
client, err := cluster.NewCluster(cfg, &http.Client{})
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, &http.Client{})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, pClient)
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
@@ -202,7 +201,7 @@ func TestFindVMByNameExist(t *testing.T) {
|
||||
testCase := testCase
|
||||
|
||||
t.Run(fmt.Sprint(testCase.msg), func(t *testing.T) {
|
||||
vmr, cluster, err := client.FindVMByName(t.Context(), testCase.vmName)
|
||||
vmr, cluster, err := pClient.FindVMByName(t.Context(), testCase.vmName)
|
||||
|
||||
if testCase.expectedError == nil {
|
||||
assert.Nil(t, err)
|
||||
Reference in New Issue
Block a user