mirror of
https://github.com/outbackdingo/proxmox-cloud-controller-manager.git
synced 2026-01-27 10:20:13 +00:00
refactor: split cloud config module
We will split the cloud configuration into two parts: the original cloud controller configuration and a separate function for working with multiple Proxmox clusters. Signed-off-by: Daniel J. Holmes (jaitaiwan) <dan@jaitaiwan.dev> Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
This commit is contained in:
@@ -1,7 +1,7 @@
|
|||||||
# syntax = docker/dockerfile:1.16
|
# syntax = docker/dockerfile:1.16
|
||||||
########################################
|
########################################
|
||||||
|
|
||||||
FROM --platform=${BUILDPLATFORM} golang:1.24.4-alpine AS builder
|
FROM --platform=${BUILDPLATFORM} golang:1.24.5-alpine AS builder
|
||||||
RUN apk update && apk add --no-cache make
|
RUN apk update && apk add --no-cache make
|
||||||
ENV GO111MODULE=on
|
ENV GO111MODULE=on
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|||||||
5
Makefile
5
Makefile
@@ -77,6 +77,11 @@ run: build ## Run
|
|||||||
lint: ## Lint Code
|
lint: ## Lint Code
|
||||||
golangci-lint run --config .golangci.yml
|
golangci-lint run --config .golangci.yml
|
||||||
|
|
||||||
|
.PHONY: lint-fix
|
||||||
|
lint-fix: ## Fix Lint Issues
|
||||||
|
golangci-lint run --fix --config .golangci.yml
|
||||||
|
|
||||||
|
|
||||||
.PHONY: unit
|
.PHONY: unit
|
||||||
unit: ## Unit Tests
|
unit: ## Unit Tests
|
||||||
go test -tags=unit $(shell go list ./...) $(TESTARGS)
|
go test -tags=unit $(shell go list ./...) $(TESTARGS)
|
||||||
|
|||||||
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package cluster
|
// Package config is the configuration for the cloud provider.
|
||||||
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -24,6 +25,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
yaml "gopkg.in/yaml.v3"
|
yaml "gopkg.in/yaml.v3"
|
||||||
|
|
||||||
|
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Provider specifies the provider. Can be 'default' or 'capmox'
|
// Provider specifies the provider. Can be 'default' or 'capmox'
|
||||||
@@ -40,15 +43,7 @@ type ClustersConfig struct {
|
|||||||
Features struct {
|
Features struct {
|
||||||
Provider Provider `yaml:"provider,omitempty"`
|
Provider Provider `yaml:"provider,omitempty"`
|
||||||
} `yaml:"features,omitempty"`
|
} `yaml:"features,omitempty"`
|
||||||
Clusters []struct {
|
Clusters []*pxpool.ProxmoxCluster `yaml:"clusters,omitempty"`
|
||||||
URL string `yaml:"url"`
|
|
||||||
Insecure bool `yaml:"insecure,omitempty"`
|
|
||||||
TokenID string `yaml:"token_id,omitempty"`
|
|
||||||
TokenSecret string `yaml:"token_secret,omitempty"`
|
|
||||||
Username string `yaml:"username,omitempty"`
|
|
||||||
Password string `yaml:"password,omitempty"`
|
|
||||||
Region string `yaml:"region,omitempty"`
|
|
||||||
} `yaml:"clusters,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadCloudConfig reads cloud config from a reader.
|
// ReadCloudConfig reads cloud config from a reader.
|
||||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package cluster_test
|
package config_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
@@ -22,23 +22,23 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReadCloudConfig(t *testing.T) {
|
func TestReadCloudConfig(t *testing.T) {
|
||||||
cfg, err := cluster.ReadCloudConfig(nil)
|
cfg, err := ccmConfig.ReadCloudConfig(nil)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
// Empty config
|
// Empty config
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
`))
|
`))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
// Wrong config
|
// Wrong config
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
test: false
|
test: false
|
||||||
`))
|
`))
|
||||||
@@ -47,7 +47,7 @@ clusters:
|
|||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
// Non full config
|
// Non full config
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
- url: abcd
|
- url: abcd
|
||||||
region: cluster-1
|
region: cluster-1
|
||||||
@@ -57,7 +57,7 @@ clusters:
|
|||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
// Valid config with one cluster
|
// Valid config with one cluster
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
- url: https://example.com
|
- url: https://example.com
|
||||||
insecure: false
|
insecure: false
|
||||||
@@ -70,7 +70,7 @@ clusters:
|
|||||||
assert.Equal(t, 1, len(cfg.Clusters))
|
assert.Equal(t, 1, len(cfg.Clusters))
|
||||||
|
|
||||||
// Valid config with one cluster (username/password), implicit default provider
|
// Valid config with one cluster (username/password), implicit default provider
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
- url: https://example.com
|
- url: https://example.com
|
||||||
insecure: false
|
insecure: false
|
||||||
@@ -81,10 +81,10 @@ clusters:
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
assert.Equal(t, 1, len(cfg.Clusters))
|
assert.Equal(t, 1, len(cfg.Clusters))
|
||||||
assert.Equal(t, cluster.ProviderDefault, cfg.Features.Provider)
|
assert.Equal(t, ccmConfig.ProviderDefault, cfg.Features.Provider)
|
||||||
|
|
||||||
// Valid config with one cluster (username/password), explicit provider default
|
// Valid config with one cluster (username/password), explicit provider default
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
features:
|
features:
|
||||||
provider: 'default'
|
provider: 'default'
|
||||||
clusters:
|
clusters:
|
||||||
@@ -97,10 +97,10 @@ clusters:
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
assert.Equal(t, 1, len(cfg.Clusters))
|
assert.Equal(t, 1, len(cfg.Clusters))
|
||||||
assert.Equal(t, cluster.ProviderDefault, cfg.Features.Provider)
|
assert.Equal(t, ccmConfig.ProviderDefault, cfg.Features.Provider)
|
||||||
|
|
||||||
// Valid config with one cluster (username/password), explicit provider capmox
|
// Valid config with one cluster (username/password), explicit provider capmox
|
||||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err = ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
features:
|
features:
|
||||||
provider: 'capmox'
|
provider: 'capmox'
|
||||||
clusters:
|
clusters:
|
||||||
@@ -113,16 +113,16 @@ clusters:
|
|||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
assert.Equal(t, 1, len(cfg.Clusters))
|
assert.Equal(t, 1, len(cfg.Clusters))
|
||||||
assert.Equal(t, cluster.ProviderCapmox, cfg.Features.Provider)
|
assert.Equal(t, ccmConfig.ProviderCapmox, cfg.Features.Provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadCloudConfigFromFile(t *testing.T) {
|
func TestReadCloudConfigFromFile(t *testing.T) {
|
||||||
cfg, err := cluster.ReadCloudConfigFromFile("testdata/cloud-config.yaml")
|
cfg, err := ccmConfig.ReadCloudConfigFromFile("testdata/cloud-config.yaml")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.EqualError(t, err, "error reading testdata/cloud-config.yaml: open testdata/cloud-config.yaml: no such file or directory")
|
assert.EqualError(t, err, "error reading testdata/cloud-config.yaml: open testdata/cloud-config.yaml: no such file or directory")
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
cfg, err = cluster.ReadCloudConfigFromFile("../../hack/proxmox-config.yaml")
|
cfg, err = ccmConfig.ReadCloudConfigFromFile("../../hack/proxmox-config.yaml")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
assert.Equal(t, 2, len(cfg.Clusters))
|
assert.Equal(t, 2, len(cfg.Clusters))
|
||||||
@@ -21,8 +21,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||||
|
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||||
|
|
||||||
clientkubernetes "k8s.io/client-go/kubernetes"
|
clientkubernetes "k8s.io/client-go/kubernetes"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
@@ -38,7 +39,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type cloud struct {
|
type cloud struct {
|
||||||
client *cluster.Cluster
|
client *pxpool.ProxmoxPool
|
||||||
kclient clientkubernetes.Interface
|
kclient clientkubernetes.Interface
|
||||||
instancesV2 cloudprovider.InstancesV2
|
instancesV2 cloudprovider.InstancesV2
|
||||||
|
|
||||||
@@ -48,7 +49,7 @@ type cloud struct {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
cloudprovider.RegisterCloudProvider(provider.ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
cloudprovider.RegisterCloudProvider(provider.ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
|
||||||
cfg, err := cluster.ReadCloudConfig(config)
|
cfg, err := ccmConfig.ReadCloudConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "failed to read config")
|
klog.ErrorS(err, "failed to read config")
|
||||||
|
|
||||||
@@ -59,8 +60,8 @@ func init() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCloud(config *cluster.ClustersConfig) (cloudprovider.Interface, error) {
|
func newCloud(config *ccmConfig.ClustersConfig) (cloudprovider.Interface, error) {
|
||||||
client, err := cluster.NewCluster(config, nil)
|
client, err := pxpool.NewProxmoxPool(config.Clusters, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,19 +22,19 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewCloudError(t *testing.T) {
|
func TestNewCloudError(t *testing.T) {
|
||||||
cloud, err := newCloud(&cluster.ClustersConfig{})
|
cloud, err := newCloud(&ccmConfig.ClustersConfig{})
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.Nil(t, cloud)
|
assert.Nil(t, cloud)
|
||||||
assert.EqualError(t, err, "no Proxmox clusters found")
|
assert.EqualError(t, err, "no Proxmox clusters found")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCloud(t *testing.T) {
|
func TestCloud(t *testing.T) {
|
||||||
cfg, err := cluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err := ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
- url: https://example.com
|
- url: https://example.com
|
||||||
insecure: false
|
insecure: false
|
||||||
|
|||||||
@@ -25,9 +25,10 @@ import (
|
|||||||
|
|
||||||
pxapi "github.com/Telmate/proxmox-api-go/proxmox"
|
pxapi "github.com/Telmate/proxmox-api-go/proxmox"
|
||||||
|
|
||||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||||
metrics "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/metrics"
|
metrics "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/metrics"
|
||||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||||
|
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
@@ -36,13 +37,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type instances struct {
|
type instances struct {
|
||||||
c *cluster.Cluster
|
c *pxpool.ProxmoxPool
|
||||||
provider cluster.Provider
|
provider ccmConfig.Provider
|
||||||
}
|
}
|
||||||
|
|
||||||
var instanceTypeNameRegexp = regexp.MustCompile(`(^[a-zA-Z0-9_.-]+)$`)
|
var instanceTypeNameRegexp = regexp.MustCompile(`(^[a-zA-Z0-9_.-]+)$`)
|
||||||
|
|
||||||
func newInstances(client *cluster.Cluster, provider cluster.Provider) *instances {
|
func newInstances(client *pxpool.ProxmoxPool, provider ccmConfig.Provider) *instances {
|
||||||
return &instances{
|
return &instances{
|
||||||
c: client,
|
c: client,
|
||||||
provider: provider,
|
provider: provider,
|
||||||
@@ -156,7 +157,7 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if i.provider == cluster.ProviderCapmox {
|
if i.provider == ccmConfig.ProviderCapmox {
|
||||||
providerID = provider.GetProviderIDFromUUID(uuid)
|
providerID = provider.GetProviderIDFromUUID(uuid)
|
||||||
} else {
|
} else {
|
||||||
providerID = provider.GetProviderID(region, vmRef)
|
providerID = provider.GetProviderID(region, vmRef)
|
||||||
@@ -209,7 +210,7 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud
|
|||||||
func (i *instances) getInstance(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
func (i *instances) getInstance(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||||
klog.V(4).InfoS("instances.getInstance() called", "node", klog.KRef("", node.Name), "provider", i.provider)
|
klog.V(4).InfoS("instances.getInstance() called", "node", klog.KRef("", node.Name), "provider", i.provider)
|
||||||
|
|
||||||
if i.provider == cluster.ProviderCapmox {
|
if i.provider == ccmConfig.ProviderCapmox {
|
||||||
uuid := node.Status.NodeInfo.SystemUUID
|
uuid := node.Status.NodeInfo.SystemUUID
|
||||||
|
|
||||||
vmRef, region, err := i.c.FindVMByUUID(ctx, uuid)
|
vmRef, region, err := i.c.FindVMByUUID(ctx, uuid)
|
||||||
|
|||||||
@@ -28,8 +28,9 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
proxmoxcluster "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
ccmConfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||||
|
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -44,7 +45,7 @@ type ccmTestSuite struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ts *ccmTestSuite) SetupTest() {
|
func (ts *ccmTestSuite) SetupTest() {
|
||||||
cfg, err := proxmoxcluster.ReadCloudConfig(strings.NewReader(`
|
cfg, err := ccmConfig.ReadCloudConfig(strings.NewReader(`
|
||||||
clusters:
|
clusters:
|
||||||
- url: https://127.0.0.1:8006/api2/json
|
- url: https://127.0.0.1:8006/api2/json
|
||||||
insecure: false
|
insecure: false
|
||||||
@@ -171,12 +172,12 @@ clusters:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
cluster, err := proxmoxcluster.NewCluster(&cfg, &http.Client{})
|
cluster, err := pxpool.NewProxmoxPool(cfg.Clusters, &http.Client{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ts.T().Fatalf("failed to create cluster client: %v", err)
|
ts.T().Fatalf("failed to create cluster client: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ts.i = newInstances(cluster, proxmoxcluster.ProviderDefault)
|
ts.i = newInstances(cluster, ccmConfig.ProviderDefault)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *ccmTestSuite) TearDownTest() {
|
func (ts *ccmTestSuite) TearDownTest() {
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package cluster implements the multi-cloud provider interface for Proxmox.
|
// Package proxmoxpool provides a pool of Telmate/proxmox-api-go/proxmox clients
|
||||||
package cluster
|
package proxmoxpool
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@@ -33,43 +33,52 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cluster is a Proxmox client.
|
// ProxmoxCluster defines a Proxmox cluster configuration.
|
||||||
type Cluster struct {
|
type ProxmoxCluster struct {
|
||||||
config *ClustersConfig
|
URL string `yaml:"url"`
|
||||||
proxmox map[string]*pxapi.Client
|
Insecure bool `yaml:"insecure,omitempty"`
|
||||||
|
TokenID string `yaml:"token_id,omitempty"`
|
||||||
|
TokenSecret string `yaml:"token_secret,omitempty"`
|
||||||
|
Username string `yaml:"username,omitempty"`
|
||||||
|
Password string `yaml:"password,omitempty"`
|
||||||
|
Region string `yaml:"region,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCluster creates a new Proxmox cluster client.
|
// ProxmoxPool is a Proxmox client.
|
||||||
func NewCluster(config *ClustersConfig, hclient *http.Client) (*Cluster, error) {
|
type ProxmoxPool struct {
|
||||||
clusters := len(config.Clusters)
|
clients map[string]*pxapi.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProxmoxPool creates a new Proxmox cluster client.
|
||||||
|
func NewProxmoxPool(config []*ProxmoxCluster, hClient *http.Client) (*ProxmoxPool, error) {
|
||||||
|
clusters := len(config)
|
||||||
if clusters > 0 {
|
if clusters > 0 {
|
||||||
proxmox := make(map[string]*pxapi.Client, clusters)
|
proxmox := make(map[string]*pxapi.Client, clusters)
|
||||||
|
|
||||||
for _, cfg := range config.Clusters {
|
for _, cfg := range config {
|
||||||
tlsconf := &tls.Config{InsecureSkipVerify: true}
|
tlsconf := &tls.Config{InsecureSkipVerify: true}
|
||||||
if !cfg.Insecure {
|
if !cfg.Insecure {
|
||||||
tlsconf = nil
|
tlsconf = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := pxapi.NewClient(cfg.URL, hclient, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
pClient, err := pxapi.NewClient(cfg.URL, hClient, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Username != "" && cfg.Password != "" {
|
if cfg.Username != "" && cfg.Password != "" {
|
||||||
if err := client.Login(context.Background(), cfg.Username, cfg.Password, ""); err != nil {
|
if err := pClient.Login(context.Background(), cfg.Username, cfg.Password, ""); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
client.SetAPIToken(cfg.TokenID, cfg.TokenSecret)
|
pClient.SetAPIToken(cfg.TokenID, cfg.TokenSecret)
|
||||||
}
|
}
|
||||||
|
|
||||||
proxmox[cfg.Region] = client
|
proxmox[cfg.Region] = pClient
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Cluster{
|
return &ProxmoxPool{
|
||||||
config: config,
|
clients: proxmox,
|
||||||
proxmox: proxmox,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,13 +86,13 @@ func NewCluster(config *ClustersConfig, hclient *http.Client) (*Cluster, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CheckClusters checks if the Proxmox connection is working.
|
// CheckClusters checks if the Proxmox connection is working.
|
||||||
func (c *Cluster) CheckClusters(ctx context.Context) error {
|
func (c *ProxmoxPool) CheckClusters(ctx context.Context) error {
|
||||||
for region, client := range c.proxmox {
|
for region, pClient := range c.clients {
|
||||||
if _, err := client.GetVersion(ctx); err != nil {
|
if _, err := pClient.GetVersion(ctx); err != nil {
|
||||||
return fmt.Errorf("failed to initialized proxmox client in region %s, error: %v", region, err)
|
return fmt.Errorf("failed to initialized proxmox client in region %s, error: %v", region, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vmlist, err := client.GetVmList(ctx)
|
vmlist, err := pClient.GetVmList(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get list of VMs in region %s, error: %v", region, err)
|
return fmt.Errorf("failed to get list of VMs in region %s, error: %v", region, err)
|
||||||
}
|
}
|
||||||
@@ -104,17 +113,17 @@ func (c *Cluster) CheckClusters(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetProxmoxCluster returns a Proxmox cluster client in a given region.
|
// GetProxmoxCluster returns a Proxmox cluster client in a given region.
|
||||||
func (c *Cluster) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
func (c *ProxmoxPool) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
||||||
if c.proxmox[region] != nil {
|
if c.clients[region] != nil {
|
||||||
return c.proxmox[region], nil
|
return c.clients[region], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("proxmox cluster %s not found", region)
|
return nil, fmt.Errorf("proxmox cluster %s not found", region)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindVMByNode find a VM by kubernetes node resource in all Proxmox clusters.
|
// FindVMByNode find a VM by kubernetes node resource in all Proxmox clusters.
|
||||||
func (c *Cluster) FindVMByNode(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
func (c *ProxmoxPool) FindVMByNode(ctx context.Context, node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||||
for region, px := range c.proxmox {
|
for region, px := range c.clients {
|
||||||
vmrs, err := px.GetVmRefsByName(ctx, node.Name)
|
vmrs, err := px.GetVmRefsByName(ctx, node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "not found") {
|
if strings.Contains(err.Error(), "not found") {
|
||||||
@@ -140,8 +149,8 @@ func (c *Cluster) FindVMByNode(ctx context.Context, node *v1.Node) (*pxapi.VmRef
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindVMByName find a VM by name in all Proxmox clusters.
|
// FindVMByName find a VM by name in all Proxmox clusters.
|
||||||
func (c *Cluster) FindVMByName(ctx context.Context, name string) (*pxapi.VmRef, string, error) {
|
func (c *ProxmoxPool) FindVMByName(ctx context.Context, name string) (*pxapi.VmRef, string, error) {
|
||||||
for region, px := range c.proxmox {
|
for region, px := range c.clients {
|
||||||
vmr, err := px.GetVmRefByName(ctx, name)
|
vmr, err := px.GetVmRefByName(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "not found") {
|
if strings.Contains(err.Error(), "not found") {
|
||||||
@@ -158,8 +167,8 @@ func (c *Cluster) FindVMByName(ctx context.Context, name string) (*pxapi.VmRef,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindVMByUUID find a VM by uuid in all Proxmox clusters.
|
// FindVMByUUID find a VM by uuid in all Proxmox clusters.
|
||||||
func (c *Cluster) FindVMByUUID(ctx context.Context, uuid string) (*pxapi.VmRef, string, error) {
|
func (c *ProxmoxPool) FindVMByUUID(ctx context.Context, uuid string) (*pxapi.VmRef, string, error) {
|
||||||
for region, px := range c.proxmox {
|
for region, px := range c.clients {
|
||||||
vms, err := px.GetResourceList(ctx, "vm")
|
vms, err := px.GetResourceList(ctx, "vm")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", fmt.Errorf("error get resources %v", err)
|
return nil, "", fmt.Errorf("error get resources %v", err)
|
||||||
@@ -196,7 +205,7 @@ func (c *Cluster) FindVMByUUID(ctx context.Context, uuid string) (*pxapi.VmRef,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetVMName returns the VM name.
|
// GetVMName returns the VM name.
|
||||||
func (c *Cluster) GetVMName(vmInfo map[string]interface{}) string {
|
func (c *ProxmoxPool) GetVMName(vmInfo map[string]interface{}) string {
|
||||||
if vmInfo["name"] != nil {
|
if vmInfo["name"] != nil {
|
||||||
return vmInfo["name"].(string) //nolint:errcheck
|
return vmInfo["name"].(string) //nolint:errcheck
|
||||||
}
|
}
|
||||||
@@ -205,7 +214,7 @@ func (c *Cluster) GetVMName(vmInfo map[string]interface{}) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetVMUUID returns the VM UUID.
|
// GetVMUUID returns the VM UUID.
|
||||||
func (c *Cluster) GetVMUUID(vmInfo map[string]interface{}) string {
|
func (c *ProxmoxPool) GetVMUUID(vmInfo map[string]interface{}) string {
|
||||||
if vmInfo["smbios1"] != nil {
|
if vmInfo["smbios1"] != nil {
|
||||||
return c.getSMBSetting(vmInfo, "uuid")
|
return c.getSMBSetting(vmInfo, "uuid")
|
||||||
}
|
}
|
||||||
@@ -214,7 +223,7 @@ func (c *Cluster) GetVMUUID(vmInfo map[string]interface{}) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetVMSKU returns the VM instance type name.
|
// GetVMSKU returns the VM instance type name.
|
||||||
func (c *Cluster) GetVMSKU(vmInfo map[string]interface{}) string {
|
func (c *ProxmoxPool) GetVMSKU(vmInfo map[string]interface{}) string {
|
||||||
if vmInfo["smbios1"] != nil {
|
if vmInfo["smbios1"] != nil {
|
||||||
return c.getSMBSetting(vmInfo, "sku")
|
return c.getSMBSetting(vmInfo, "sku")
|
||||||
}
|
}
|
||||||
@@ -222,7 +231,7 @@ func (c *Cluster) GetVMSKU(vmInfo map[string]interface{}) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) getSMBSetting(vmInfo map[string]interface{}, name string) string {
|
func (c *ProxmoxPool) getSMBSetting(vmInfo map[string]interface{}, name string) string {
|
||||||
smbios, ok := vmInfo["smbios1"].(string)
|
smbios, ok := vmInfo["smbios1"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return ""
|
return ""
|
||||||
@@ -14,78 +14,78 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package cluster_test
|
package proxmoxpool_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/jarcoal/httpmock"
|
"github.com/jarcoal/httpmock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newClusterEnv() (*cluster.ClustersConfig, error) {
|
func newClusterEnv() []*pxpool.ProxmoxCluster {
|
||||||
cfg, err := cluster.ReadCloudConfig(strings.NewReader(`
|
// copilot convert the cfg call to an array of []*proxmox_pool.ProxmoxCluster:
|
||||||
clusters:
|
cfg := []*pxpool.ProxmoxCluster{
|
||||||
- url: https://127.0.0.1:8006/api2/json
|
{
|
||||||
insecure: false
|
URL: "https://127.0.0.1:8006/api2/json",
|
||||||
token_id: "user!token-id"
|
Insecure: false,
|
||||||
token_secret: "secret"
|
TokenID: "user!token-id",
|
||||||
region: cluster-1
|
TokenSecret: "secret",
|
||||||
- url: https://127.0.0.2:8006/api2/json
|
Region: "cluster-1",
|
||||||
insecure: false
|
},
|
||||||
token_id: "user!token-id"
|
{
|
||||||
token_secret: "secret"
|
URL: "https://127.0.0.2:8006/api2/json",
|
||||||
region: cluster-2
|
Insecure: false,
|
||||||
`))
|
TokenID: "user!token-id",
|
||||||
|
TokenSecret: "secret",
|
||||||
|
Region: "cluster-2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
return &cfg, err
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewClient(t *testing.T) {
|
func TestNewClient(t *testing.T) {
|
||||||
cfg, err := newClusterEnv()
|
cfg := newClusterEnv()
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
client, err := cluster.NewCluster(&cluster.ClustersConfig{}, nil)
|
pClient, err := pxpool.NewProxmoxPool([]*pxpool.ProxmoxCluster{}, nil)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.Nil(t, client)
|
assert.Nil(t, pClient)
|
||||||
|
|
||||||
client, err = cluster.NewCluster(cfg, nil)
|
pClient, err = pxpool.NewProxmoxPool(cfg, nil)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, client)
|
assert.NotNil(t, pClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckClusters(t *testing.T) {
|
func TestCheckClusters(t *testing.T) {
|
||||||
cfg, err := newClusterEnv()
|
cfg := newClusterEnv()
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
client, err := cluster.NewCluster(cfg, nil)
|
pClient, err := pxpool.NewProxmoxPool(cfg, nil)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, client)
|
assert.NotNil(t, pClient)
|
||||||
|
|
||||||
pxapi, err := client.GetProxmoxCluster("test")
|
pxapi, err := pClient.GetProxmoxCluster("test")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.Nil(t, pxapi)
|
assert.Nil(t, pxapi)
|
||||||
assert.Equal(t, "proxmox cluster test not found", err.Error())
|
assert.Equal(t, "proxmox cluster test not found", err.Error())
|
||||||
|
|
||||||
pxapi, err = client.GetProxmoxCluster("cluster-1")
|
pxapi, err = pClient.GetProxmoxCluster("cluster-1")
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, pxapi)
|
assert.NotNil(t, pxapi)
|
||||||
|
|
||||||
err = client.CheckClusters(t.Context())
|
err = pClient.CheckClusters(t.Context())
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.Contains(t, err.Error(), "failed to initialized proxmox client in region")
|
assert.Contains(t, err.Error(), "failed to initialized proxmox client in region")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindVMByNameNonExist(t *testing.T) {
|
func TestFindVMByNameNonExist(t *testing.T) {
|
||||||
cfg, err := newClusterEnv()
|
cfg := newClusterEnv()
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
httpmock.Activate()
|
httpmock.Activate()
|
||||||
@@ -121,11 +121,11 @@ func TestFindVMByNameNonExist(t *testing.T) {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
client, err := cluster.NewCluster(cfg, &http.Client{})
|
pClient, err := pxpool.NewProxmoxPool(cfg, &http.Client{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, client)
|
assert.NotNil(t, pClient)
|
||||||
|
|
||||||
vmr, cluster, err := client.FindVMByName(t.Context(), "non-existing-vm")
|
vmr, cluster, err := pClient.FindVMByName(t.Context(), "non-existing-vm")
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
assert.Equal(t, "", cluster)
|
assert.Equal(t, "", cluster)
|
||||||
assert.Nil(t, vmr)
|
assert.Nil(t, vmr)
|
||||||
@@ -133,8 +133,7 @@ func TestFindVMByNameNonExist(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestFindVMByNameExist(t *testing.T) {
|
func TestFindVMByNameExist(t *testing.T) {
|
||||||
cfg, err := newClusterEnv()
|
cfg := newClusterEnv()
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, cfg)
|
assert.NotNil(t, cfg)
|
||||||
|
|
||||||
httpmock.Activate()
|
httpmock.Activate()
|
||||||
@@ -168,9 +167,9 @@ func TestFindVMByNameExist(t *testing.T) {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
client, err := cluster.NewCluster(cfg, &http.Client{})
|
pClient, err := pxpool.NewProxmoxPool(cfg, &http.Client{})
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
assert.NotNil(t, client)
|
assert.NotNil(t, pClient)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
msg string
|
msg string
|
||||||
@@ -202,7 +201,7 @@ func TestFindVMByNameExist(t *testing.T) {
|
|||||||
testCase := testCase
|
testCase := testCase
|
||||||
|
|
||||||
t.Run(fmt.Sprint(testCase.msg), func(t *testing.T) {
|
t.Run(fmt.Sprint(testCase.msg), func(t *testing.T) {
|
||||||
vmr, cluster, err := client.FindVMByName(t.Context(), testCase.vmName)
|
vmr, cluster, err := pClient.FindVMByName(t.Context(), testCase.vmName)
|
||||||
|
|
||||||
if testCase.expectedError == nil {
|
if testCase.expectedError == nil {
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
Reference in New Issue
Block a user