mirror of
https://github.com/outbackdingo/proxmox-cloud-controller-manager.git
synced 2026-01-27 10:20:13 +00:00
refactor: change proxmox api go module
New proxmox api modules * luthermonson/go-proxmox * sergelogvinov/go-proxmox Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
This commit is contained in:
@@ -22,8 +22,6 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Telmate/proxmox-api-go/proxmox"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -63,20 +61,20 @@ func GetVMID(providerID string) (int, error) {
|
||||
}
|
||||
|
||||
// ParseProviderID returns the VmRef and region from the providerID.
|
||||
func ParseProviderID(providerID string) (*proxmox.VmRef, string, error) {
|
||||
func ParseProviderID(providerID string) (int, string, error) {
|
||||
if !strings.HasPrefix(providerID, ProviderName) {
|
||||
return nil, "", fmt.Errorf("foreign providerID or empty \"%s\"", providerID)
|
||||
return 0, "", fmt.Errorf("foreign providerID or empty \"%s\"", providerID)
|
||||
}
|
||||
|
||||
matches := providerIDRegexp.FindStringSubmatch(providerID)
|
||||
if len(matches) != 3 {
|
||||
return nil, "", fmt.Errorf("providerID \"%s\" didn't match expected format \"%s://region/InstanceID\"", providerID, ProviderName)
|
||||
return 0, "", fmt.Errorf("providerID \"%s\" didn't match expected format \"%s://region/InstanceID\"", providerID, ProviderName)
|
||||
}
|
||||
|
||||
vmID, err := strconv.Atoi(matches[2])
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("InstanceID have to be a number, but got \"%s\"", matches[2])
|
||||
return 0, "", fmt.Errorf("InstanceID have to be a number, but got \"%s\"", matches[2])
|
||||
}
|
||||
|
||||
return proxmox.NewVmRef(vmID), matches[1], nil
|
||||
return vmID, matches[1], nil
|
||||
}
|
||||
|
||||
@@ -177,8 +177,7 @@ func TestParseProviderID(t *testing.T) {
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, testCase.expectedError.Error())
|
||||
} else {
|
||||
assert.NotNil(t, vmr)
|
||||
assert.Equal(t, testCase.expectedvmID, vmr.VmId())
|
||||
assert.Equal(t, testCase.expectedvmID, vmr)
|
||||
assert.Equal(t, testCase.expectedRegion, region)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -69,27 +69,26 @@ func init() {
|
||||
}
|
||||
|
||||
func newCloud(config *ccmConfig.ClustersConfig) (cloudprovider.Interface, error) {
|
||||
client, err := newClient(config.Clusters)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
px, err := pxpool.NewProxmoxPool(ctx, config.Clusters)
|
||||
if err != nil {
|
||||
cancel()
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &client{
|
||||
pxpool: px,
|
||||
}
|
||||
|
||||
instancesInterface := newInstances(client, config.Features)
|
||||
|
||||
return &cloud{
|
||||
client: client,
|
||||
instancesV2: instancesInterface,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newClient(clusters []*pxpool.ProxmoxCluster) (*client, error) {
|
||||
px, err := pxpool.NewProxmoxPool(clusters, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &client{
|
||||
pxpool: px,
|
||||
ctx: ctx,
|
||||
stop: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -101,11 +100,7 @@ func (c *cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder,
|
||||
|
||||
klog.InfoS("clientset initialized")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c.ctx = ctx
|
||||
c.stop = cancel
|
||||
|
||||
err := c.client.pxpool.CheckClusters(ctx)
|
||||
err := c.client.pxpool.CheckClusters(c.ctx)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to check proxmox cluster")
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/Telmate/proxmox-api-go/proxmox"
|
||||
"github.com/luthermonson/go-proxmox"
|
||||
|
||||
providerconfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
metrics "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/metrics"
|
||||
@@ -116,10 +116,7 @@ func (i *instances) addresses(ctx context.Context, node *v1.Node, info *instance
|
||||
func (i *instances) retrieveQemuAddresses(ctx context.Context, info *instanceInfo) ([]v1.NodeAddress, error) {
|
||||
var addresses []v1.NodeAddress
|
||||
|
||||
vmRef := proxmox.NewVmRef(info.ID)
|
||||
vmRef.SetNode(info.Node)
|
||||
|
||||
nics, err := i.getInstanceNics(ctx, vmRef, info.Region)
|
||||
nics, err := i.getInstanceNics(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -130,15 +127,16 @@ func (i *instances) retrieveQemuAddresses(ctx context.Context, info *instanceInf
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ip := range nic.IpAddresses {
|
||||
i.processIP(ctx, &addresses, ip)
|
||||
for _, ip := range nic.IPAddresses {
|
||||
i.processIP(ctx, &addresses, ip.IPAddress)
|
||||
}
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
func (i *instances) processIP(_ context.Context, addresses *[]v1.NodeAddress, ip net.IP) {
|
||||
func (i *instances) processIP(_ context.Context, addresses *[]v1.NodeAddress, addr string) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil || ip.IsLoopback() {
|
||||
return
|
||||
}
|
||||
@@ -166,17 +164,27 @@ func (i *instances) processIP(_ context.Context, addresses *[]v1.NodeAddress, ip
|
||||
})
|
||||
}
|
||||
|
||||
func (i *instances) getInstanceNics(ctx context.Context, vmRef *proxmox.VmRef, region string) ([]proxmox.AgentNetworkInterface, error) {
|
||||
result := make([]proxmox.AgentNetworkInterface, 0)
|
||||
func (i *instances) getInstanceNics(ctx context.Context, info *instanceInfo) ([]*proxmox.AgentNetworkIface, error) {
|
||||
result := make([]*proxmox.AgentNetworkIface, 0)
|
||||
|
||||
px, err := i.c.pxpool.GetProxmoxCluster(region)
|
||||
px, err := i.c.pxpool.GetProxmoxCluster(info.Region)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
node, err := px.Node(ctx, info.Node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vm, err := node.VirtualMachine(ctx, info.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mc := metrics.NewMetricContext("getVmInfo")
|
||||
|
||||
nicset, err := vmRef.GetAgentInformation(ctx, px, false)
|
||||
nicset, err := vm.AgentGetNetworkIFaces(ctx)
|
||||
if mc.ObserveRequest(err) != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
@@ -24,8 +24,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Telmate/proxmox-api-go/proxmox"
|
||||
|
||||
providerconfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
metrics "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/metrics"
|
||||
provider "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/provider"
|
||||
@@ -142,7 +140,7 @@ func (i *instances) InstanceShutdown(ctx context.Context, node *v1.Node) (bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
vmr, region, err := provider.ParseProviderID(node.Spec.ProviderID)
|
||||
vmID, region, err := provider.ParseProviderID(node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "instances.InstanceShutdown() failed to parse providerID", "providerID", node.Spec.ProviderID)
|
||||
|
||||
@@ -158,12 +156,12 @@ func (i *instances) InstanceShutdown(ctx context.Context, node *v1.Node) (bool,
|
||||
|
||||
mc := metrics.NewMetricContext("getVmState")
|
||||
|
||||
vmState, err := px.GetVmState(ctx, vmr)
|
||||
vm, err := px.GetVMStatus(ctx, vmID)
|
||||
if mc.ObserveRequest(err) != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if vmState["status"].(string) == "stopped" { //nolint:errcheck
|
||||
if vm.Status == "stopped" {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -258,7 +256,7 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
|
||||
klog.V(4).InfoS("instances.getInstanceInfo() called", "node", klog.KRef("", node.Name), "provider", i.provider)
|
||||
|
||||
var (
|
||||
vmRef *proxmox.VmRef
|
||||
vmID int
|
||||
region string
|
||||
err error
|
||||
)
|
||||
@@ -270,7 +268,7 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
|
||||
region = node.Labels[v1.LabelTopologyRegion]
|
||||
}
|
||||
|
||||
vmID, err := strconv.Atoi(node.Annotations[AnnotationProxmoxInstanceID])
|
||||
vmID, err = strconv.Atoi(node.Annotations[AnnotationProxmoxInstanceID])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("instances.getInstanceInfo() parse annotation error: %v", err)
|
||||
}
|
||||
@@ -287,31 +285,31 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
|
||||
|
||||
mc := metrics.NewMetricContext("findVmByName")
|
||||
|
||||
vmRef, region, err = i.c.pxpool.FindVMByNode(ctx, node)
|
||||
vmID, region, err = i.c.pxpool.FindVMByNode(ctx, node)
|
||||
if mc.ObserveRequest(err) != nil {
|
||||
mc := metrics.NewMetricContext("findVmByUUID")
|
||||
|
||||
vmRef, region, err = i.c.pxpool.FindVMByUUID(ctx, node.Status.NodeInfo.SystemUUID)
|
||||
vmID, region, err = i.c.pxpool.FindVMByUUID(ctx, node.Status.NodeInfo.SystemUUID)
|
||||
if mc.ObserveRequest(err) != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if vmRef == nil {
|
||||
if vmID == 0 {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
providerID = provider.GetProviderIDFromID(region, vmRef.VmId())
|
||||
providerID = provider.GetProviderIDFromID(region, vmID)
|
||||
}
|
||||
|
||||
if vmRef == nil {
|
||||
vmRef, region, err = provider.ParseProviderID(providerID)
|
||||
if vmID == 0 {
|
||||
vmID, region, err = provider.ParseProviderID(providerID)
|
||||
if err != nil {
|
||||
if i.provider == providerconfig.ProviderDefault {
|
||||
return nil, fmt.Errorf("instances.getInstanceInfo() error: %v", err)
|
||||
}
|
||||
|
||||
vmRef, region, err = i.c.pxpool.FindVMByUUID(ctx, node.Status.NodeInfo.SystemUUID)
|
||||
vmID, region, err = i.c.pxpool.FindVMByUUID(ctx, node.Status.NodeInfo.SystemUUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("instances.getInstanceInfo() error: %v", err)
|
||||
}
|
||||
@@ -325,7 +323,7 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
|
||||
|
||||
mc := metrics.NewMetricContext("getVmInfo")
|
||||
|
||||
vmConfig, err := px.GetVmConfig(ctx, vmRef)
|
||||
vm, err := px.GetVMConfig(ctx, vmID)
|
||||
if mc.ObserveRequest(err) != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
@@ -335,12 +333,12 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
|
||||
}
|
||||
|
||||
info := &instanceInfo{
|
||||
ID: vmRef.VmId(),
|
||||
UUID: i.c.pxpool.GetVMUUID(vmConfig),
|
||||
Name: i.c.pxpool.GetVMName(vmConfig),
|
||||
Node: vmRef.Node().String(),
|
||||
ID: vmID,
|
||||
UUID: i.c.pxpool.GetVMUUID(vm),
|
||||
Name: vm.Name,
|
||||
Node: vm.Node,
|
||||
Region: region,
|
||||
Zone: vmRef.Node().String(),
|
||||
Zone: vm.Node,
|
||||
}
|
||||
|
||||
if info.UUID != node.Status.NodeInfo.SystemUUID {
|
||||
@@ -355,18 +353,9 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
info.Type = i.c.pxpool.GetVMSKU(vmConfig)
|
||||
info.Type = i.c.pxpool.GetVMSKU(vm)
|
||||
if !instanceTypeNameRegexp.MatchString(info.Type) {
|
||||
if vmConfig["cores"] != nil && vmConfig["memory"] != nil {
|
||||
memory, err := strconv.Atoi(vmConfig["memory"].(string))
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
info.Type = fmt.Sprintf("%.0fVCPU-%.0fGB",
|
||||
vmConfig["cores"].(float64), //nolint:errcheck
|
||||
float64(memory)/1024)
|
||||
}
|
||||
info.Type = fmt.Sprintf("%dVCPU-%dGB", vm.CPUs, vm.MaxMem/1024/1024/1024)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
|
||||
@@ -23,8 +23,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/jarcoal/httpmock"
|
||||
proxmox "github.com/luthermonson/go-proxmox"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
goproxmox "github.com/sergelogvinov/go-proxmox"
|
||||
providerconfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
|
||||
@@ -59,6 +61,31 @@ clusters:
|
||||
ts.T().Fatalf("failed to read config: %v", err)
|
||||
}
|
||||
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/cluster/status`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.NodeStatuses{{Name: "pve-1"}, {Name: "pve-2"}, {Name: "pve-3"}},
|
||||
})
|
||||
})
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/status`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.Node{},
|
||||
})
|
||||
})
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/status`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.Node{},
|
||||
})
|
||||
})
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/status`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.Node{},
|
||||
})
|
||||
})
|
||||
|
||||
httpmock.RegisterResponderWithQuery("GET", "https://127.0.0.1:8006/api2/json/cluster/resources", "type=vm",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
@@ -95,12 +122,32 @@ clusters:
|
||||
"name": "cluster-2-node-1",
|
||||
"maxcpu": 1,
|
||||
"maxmem": 2 * 1024 * 1024 * 1024,
|
||||
"status": "stopped",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/qemu/100/status/current`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.VirtualMachine{Node: "pve-1", Name: "cluster-1-node-1", VMID: 100, CPUs: 4, MaxMem: 10 * 1024 * 1024 * 1024, Status: "running"},
|
||||
})
|
||||
})
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/qemu/101/status/current`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.VirtualMachine{Node: "pve-2", Name: "cluster-1-node-2", VMID: 101, CPUs: 2, MaxMem: 5 * 1024 * 1024 * 1024, Status: "running"},
|
||||
})
|
||||
})
|
||||
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/qemu/100/status/current`,
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]any{
|
||||
"data": proxmox.VirtualMachine{Node: "pve-3", Name: "cluster-2-node-1", VMID: 100, CPUs: 1, MaxMem: 2 * 1024 * 1024 * 1024, Status: "stopped"},
|
||||
})
|
||||
})
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/nodes/pve-1/qemu/100/config",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
@@ -149,27 +196,7 @@ clusters:
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/nodes/pve-1/qemu/100/status/current",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"status": "running",
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/nodes/pve-3/qemu/100/status/current",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"status": "stopped",
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
px, err := proxmoxpool.NewProxmoxPool(cfg.Clusters, &http.Client{})
|
||||
px, err := proxmoxpool.NewProxmoxPool(ts.T().Context(), cfg.Clusters, proxmox.WithHTTPClient(&http.Client{}))
|
||||
if err != nil {
|
||||
ts.T().Fatalf("failed to create cluster client: %v", err)
|
||||
}
|
||||
@@ -374,7 +401,7 @@ func (ts *ccmTestSuite) TestInstanceShutdown() {
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
expectedError: "vm '500' not found",
|
||||
expectedError: goproxmox.ErrVirtualMachineNotFound.Error(),
|
||||
},
|
||||
{
|
||||
msg: "NodeExists",
|
||||
@@ -397,7 +424,7 @@ func (ts *ccmTestSuite) TestInstanceShutdown() {
|
||||
msg: "NodeExistsStopped",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-3",
|
||||
Name: "cluster-2-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-2/100",
|
||||
@@ -485,7 +512,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
|
||||
expected *cloudprovider.InstanceMetadata
|
||||
}{
|
||||
{
|
||||
msg: "NodeAnnotations",
|
||||
msg: "NodeUndefined",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-1",
|
||||
|
||||
@@ -23,11 +23,12 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Telmate/proxmox-api-go/proxmox"
|
||||
proxmox "github.com/luthermonson/go-proxmox"
|
||||
|
||||
goproxmox "github.com/sergelogvinov/go-proxmox"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -37,28 +38,38 @@ import (
|
||||
type ProxmoxCluster struct {
|
||||
URL string `yaml:"url"`
|
||||
Insecure bool `yaml:"insecure,omitempty"`
|
||||
TokenIDFile string `yaml:"token_id_file,omitempty"`
|
||||
TokenSecretFile string `yaml:"token_secret_file,omitempty"`
|
||||
TokenID string `yaml:"token_id,omitempty"`
|
||||
TokenIDFile string `yaml:"token_id_file,omitempty"`
|
||||
TokenSecret string `yaml:"token_secret,omitempty"`
|
||||
TokenSecretFile string `yaml:"token_secret_file,omitempty"`
|
||||
Username string `yaml:"username,omitempty"`
|
||||
Password string `yaml:"password,omitempty"`
|
||||
Region string `yaml:"region,omitempty"`
|
||||
}
|
||||
|
||||
// ProxmoxPool is a Proxmox client.
|
||||
// ProxmoxPool is a Proxmox client pool of proxmox clusters.
|
||||
type ProxmoxPool struct {
|
||||
clients map[string]*proxmox.Client
|
||||
clients map[string]*goproxmox.APIClient
|
||||
}
|
||||
|
||||
// NewProxmoxPool creates a new Proxmox cluster client.
|
||||
func NewProxmoxPool(config []*ProxmoxCluster, hClient *http.Client) (*ProxmoxPool, error) {
|
||||
func NewProxmoxPool(ctx context.Context, config []*ProxmoxCluster, options ...proxmox.Option) (*ProxmoxPool, error) {
|
||||
clusters := len(config)
|
||||
if clusters > 0 {
|
||||
clients := make(map[string]*proxmox.Client, clusters)
|
||||
clients := make(map[string]*goproxmox.APIClient, clusters)
|
||||
|
||||
for _, cfg := range config {
|
||||
if cfg.TokenID == "" {
|
||||
options = append(options, proxmox.WithUserAgent("ProxmoxCCM v1.0"))
|
||||
|
||||
if cfg.Insecure {
|
||||
httpTr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
|
||||
options = append(options, proxmox.WithHTTPClient(&http.Client{Transport: httpTr}))
|
||||
}
|
||||
|
||||
if cfg.TokenID == "" && cfg.TokenIDFile != "" {
|
||||
var err error
|
||||
|
||||
cfg.TokenID, err = readValueFromFile(cfg.TokenIDFile)
|
||||
@@ -67,7 +78,7 @@ func NewProxmoxPool(config []*ProxmoxCluster, hClient *http.Client) (*ProxmoxPoo
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.TokenSecret == "" {
|
||||
if cfg.TokenSecret == "" && cfg.TokenSecretFile != "" {
|
||||
var err error
|
||||
|
||||
cfg.TokenSecret, err = readValueFromFile(cfg.TokenSecretFile)
|
||||
@@ -76,25 +87,21 @@ func NewProxmoxPool(config []*ProxmoxCluster, hClient *http.Client) (*ProxmoxPoo
|
||||
}
|
||||
}
|
||||
|
||||
tlsconf := &tls.Config{InsecureSkipVerify: true}
|
||||
if !cfg.Insecure {
|
||||
tlsconf = nil
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
options = append(options, proxmox.WithCredentials(&proxmox.Credentials{
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
}))
|
||||
} else if cfg.TokenID != "" && cfg.TokenSecret != "" {
|
||||
options = append(options, proxmox.WithAPIToken(cfg.TokenID, cfg.TokenSecret))
|
||||
}
|
||||
|
||||
pClient, err := proxmox.NewClient(cfg.URL, hClient, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
||||
pxClient, err := goproxmox.NewAPIClient(ctx, cfg.URL, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
if err := pClient.Login(context.Background(), cfg.Username, cfg.Password, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pClient.SetAPIToken(cfg.TokenID, cfg.TokenSecret)
|
||||
}
|
||||
|
||||
clients[cfg.Region] = pClient
|
||||
clients[cfg.Region] = pxClient
|
||||
}
|
||||
|
||||
return &ProxmoxPool{
|
||||
@@ -105,21 +112,33 @@ func NewProxmoxPool(config []*ProxmoxCluster, hClient *http.Client) (*ProxmoxPoo
|
||||
return nil, ErrClustersNotFound
|
||||
}
|
||||
|
||||
// GetRegions returns supported regions.
|
||||
func (c *ProxmoxPool) GetRegions() []string {
|
||||
regions := make([]string, 0, len(c.clients))
|
||||
|
||||
for region := range c.clients {
|
||||
regions = append(regions, region)
|
||||
}
|
||||
|
||||
return regions
|
||||
}
|
||||
|
||||
// CheckClusters checks if the Proxmox connection is working.
|
||||
func (c *ProxmoxPool) CheckClusters(ctx context.Context) error {
|
||||
for region, pClient := range c.clients {
|
||||
if _, err := pClient.GetVersion(ctx); err != nil {
|
||||
for region, pxClient := range c.clients {
|
||||
if _, err := pxClient.Version(ctx); err != nil {
|
||||
return fmt.Errorf("failed to initialized proxmox client in region %s, error: %v", region, err)
|
||||
}
|
||||
|
||||
vmlist, err := pClient.GetVmList(ctx)
|
||||
pxCluster, err := pxClient.Cluster(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of VMs in region %s, error: %v", region, err)
|
||||
return fmt.Errorf("failed to get cluster info in region %s, error: %v", region, err)
|
||||
}
|
||||
|
||||
vms, ok := vmlist["data"].([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to cast response to list of VMs in region %s, error: %v", region, err)
|
||||
// Check if we can have permission to list VMs
|
||||
vms, err := pxCluster.Resources(ctx, "vm")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get list of VMs in region %s, error: %v", region, err)
|
||||
}
|
||||
|
||||
if len(vms) > 0 {
|
||||
@@ -133,7 +152,7 @@ func (c *ProxmoxPool) CheckClusters(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// GetProxmoxCluster returns a Proxmox cluster client in a given region.
|
||||
func (c *ProxmoxPool) GetProxmoxCluster(region string) (*proxmox.Client, error) {
|
||||
func (c *ProxmoxPool) GetProxmoxCluster(region string) (*goproxmox.APIClient, error) {
|
||||
if c.clients[region] != nil {
|
||||
return c.clients[region], nil
|
||||
}
|
||||
@@ -141,9 +160,39 @@ func (c *ProxmoxPool) GetProxmoxCluster(region string) (*proxmox.Client, error)
|
||||
return nil, ErrRegionNotFound
|
||||
}
|
||||
|
||||
// GetVMByIDInRegion returns a Proxmox VM by its ID in a given region.
|
||||
func (c *ProxmoxPool) GetVMByIDInRegion(ctx context.Context, region string, vmid uint64) (*proxmox.ClusterResource, error) {
|
||||
px, err := c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vm, err := px.FindVMByID(ctx, uint64(vmid)) //nolint: unconvert
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
// DeleteVMByIDInRegion deletes a Proxmox VM by its ID in a given region.
|
||||
func (c *ProxmoxPool) DeleteVMByIDInRegion(ctx context.Context, region string, vm *proxmox.ClusterResource) error {
|
||||
px, err := c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return px.DeleteVMByID(ctx, vm.Node, int(vm.VMID))
|
||||
}
|
||||
|
||||
// GetNodeGroup returns a Proxmox node ha-group in a given region.
|
||||
func (c *ProxmoxPool) GetNodeGroup(ctx context.Context, region string, node string) (string, error) {
|
||||
haGroups, err := c.GetHAGroupList(ctx, region)
|
||||
px, err := c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
haGroups, err := px.GetHAGroupList(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error get ha-groups %v", err)
|
||||
}
|
||||
@@ -153,7 +202,7 @@ func (c *ProxmoxPool) GetNodeGroup(ctx context.Context, region string, node stri
|
||||
continue
|
||||
}
|
||||
|
||||
for _, n := range g.Nodes {
|
||||
for _, n := range strings.Split(g.Nodes, ",") {
|
||||
if node == strings.Split(n, ":")[0] {
|
||||
return g.Group, nil
|
||||
}
|
||||
@@ -163,183 +212,105 @@ func (c *ProxmoxPool) GetNodeGroup(ctx context.Context, region string, node stri
|
||||
return "", ErrHAGroupNotFound
|
||||
}
|
||||
|
||||
// GetHAGroupList returns a list of Proxmox ha-groups in a given region.
|
||||
func (c *ProxmoxPool) GetHAGroupList(ctx context.Context, region string) (haGroups []proxmox.HAGroup, err error) {
|
||||
px, err := c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// FindVMByNode find a VM by kubernetes node resource in all Proxmox clusters.
|
||||
func (c *ProxmoxPool) FindVMByNode(ctx context.Context, node *v1.Node) (vmID int, region string, err error) {
|
||||
for region, px := range c.clients {
|
||||
vmid, err := px.FindVMByFilter(ctx, func(rs *proxmox.ClusterResource) (bool, error) {
|
||||
if rs.Type != "qemu" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
list, err := px.GetItemList(ctx, "/cluster/ha/groups")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !strings.HasPrefix(rs.Name, node.Name) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
haGroups = []proxmox.HAGroup{}
|
||||
pxnode, err := px.Client.Node(ctx, rs.Node)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
items, ok := list["data"].([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast response to list of HA groups in region %s, error: %v", region, err)
|
||||
}
|
||||
vm, err := pxnode.VirtualMachine(ctx, int(rs.VMID))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
itemMap := item.(map[string]interface{})
|
||||
smbios1 := goproxmox.VMSMBIOS{}
|
||||
smbios1.UnmarshalString(vm.VirtualMachineConfig.SMBios1) //nolint:errcheck
|
||||
|
||||
if itemMap["type"].(string) != "group" {
|
||||
if smbios1.UUID == node.Status.NodeInfo.SystemUUID {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
if err == goproxmox.ErrVirtualMachineNotFound {
|
||||
continue
|
||||
}
|
||||
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if vmid == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
haGroups = append(haGroups, proxmox.HAGroup{
|
||||
Group: itemMap["group"].(string),
|
||||
Nodes: strings.Split(itemMap["nodes"].(string), ","),
|
||||
NoFailback: itemMap["nofailback"].(float64) == 1,
|
||||
Restricted: itemMap["restricted"].(float64) == 1,
|
||||
Type: itemMap["type"].(string),
|
||||
})
|
||||
return vmid, region, nil
|
||||
}
|
||||
|
||||
return haGroups, nil
|
||||
}
|
||||
|
||||
// FindVMByNode find a VM by kubernetes node resource in all Proxmox clusters.
|
||||
func (c *ProxmoxPool) FindVMByNode(ctx context.Context, node *v1.Node) (*proxmox.VmRef, string, error) {
|
||||
for region, px := range c.clients {
|
||||
vmrs, err := px.GetVmRefsByName(ctx, node.Name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
for _, vmr := range vmrs {
|
||||
config, err := px.GetVmConfig(ctx, vmr)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if c.GetVMUUID(config) == node.Status.NodeInfo.SystemUUID {
|
||||
return vmr, region, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, "", ErrInstanceNotFound
|
||||
}
|
||||
|
||||
// FindVMByName find a VM by name in all Proxmox clusters.
|
||||
func (c *ProxmoxPool) FindVMByName(ctx context.Context, name string) (*proxmox.VmRef, string, error) {
|
||||
for region, px := range c.clients {
|
||||
vmr, err := px.GetVmRefByName(ctx, name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return vmr, region, nil
|
||||
}
|
||||
|
||||
return nil, "", ErrInstanceNotFound
|
||||
return 0, "", ErrInstanceNotFound
|
||||
}
|
||||
|
||||
// FindVMByUUID find a VM by uuid in all Proxmox clusters.
|
||||
func (c *ProxmoxPool) FindVMByUUID(ctx context.Context, uuid string) (*proxmox.VmRef, string, error) {
|
||||
func (c *ProxmoxPool) FindVMByUUID(ctx context.Context, uuid string) (vmID int, region string, err error) {
|
||||
for region, px := range c.clients {
|
||||
vms, err := px.GetResourceList(ctx, "vm")
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error get resources %v", err)
|
||||
}
|
||||
|
||||
for vmii := range vms {
|
||||
vm, ok := vms[vmii].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, "", fmt.Errorf("failed to cast response to map, vm: %v", vm)
|
||||
vmid, err := px.FindVMByFilter(ctx, func(rs *proxmox.ClusterResource) (bool, error) {
|
||||
if rs.Type != "qemu" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if vm["type"].(string) != "qemu" { //nolint:errcheck
|
||||
continue
|
||||
}
|
||||
|
||||
vmr := proxmox.NewVmRef(int(vm["vmid"].(float64))) //nolint:errcheck
|
||||
vmr.SetNode(vm["node"].(string)) //nolint:errcheck
|
||||
vmr.SetVmType("qemu")
|
||||
|
||||
config, err := px.GetVmConfig(ctx, vmr)
|
||||
pxnode, err := px.Client.Node(ctx, rs.Node)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return false, err
|
||||
}
|
||||
|
||||
if config["smbios1"] != nil {
|
||||
if c.getSMBSetting(config, "uuid") == uuid {
|
||||
return vmr, region, nil
|
||||
}
|
||||
vm, err := pxnode.VirtualMachine(ctx, int(rs.VMID))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if c.GetVMUUID(vm) == uuid {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, "", ErrInstanceNotFound
|
||||
}
|
||||
|
||||
return vmid, region, nil
|
||||
}
|
||||
|
||||
return nil, "", ErrInstanceNotFound
|
||||
}
|
||||
|
||||
// GetVMName returns the VM name.
|
||||
func (c *ProxmoxPool) GetVMName(vmInfo map[string]interface{}) string {
|
||||
if vmInfo["name"] != nil {
|
||||
return vmInfo["name"].(string) //nolint:errcheck
|
||||
}
|
||||
|
||||
return ""
|
||||
return 0, "", ErrInstanceNotFound
|
||||
}
|
||||
|
||||
// GetVMUUID returns the VM UUID.
|
||||
func (c *ProxmoxPool) GetVMUUID(vmInfo map[string]interface{}) string {
|
||||
if vmInfo["smbios1"] != nil {
|
||||
return c.getSMBSetting(vmInfo, "uuid")
|
||||
}
|
||||
func (c *ProxmoxPool) GetVMUUID(vm *proxmox.VirtualMachine) string {
|
||||
smbios1 := goproxmox.VMSMBIOS{}
|
||||
smbios1.UnmarshalString(vm.VirtualMachineConfig.SMBios1) //nolint:errcheck
|
||||
|
||||
return ""
|
||||
return smbios1.UUID
|
||||
}
|
||||
|
||||
// GetVMSKU returns the VM instance type name.
|
||||
func (c *ProxmoxPool) GetVMSKU(vmInfo map[string]interface{}) string {
|
||||
if vmInfo["smbios1"] != nil {
|
||||
return c.getSMBSetting(vmInfo, "sku")
|
||||
}
|
||||
func (c *ProxmoxPool) GetVMSKU(vm *proxmox.VirtualMachine) string {
|
||||
smbios1 := goproxmox.VMSMBIOS{}
|
||||
smbios1.UnmarshalString(vm.VirtualMachineConfig.SMBios1) //nolint:errcheck
|
||||
|
||||
return ""
|
||||
}
|
||||
sku, _ := base64.StdEncoding.DecodeString(smbios1.SKU) //nolint:errcheck
|
||||
|
||||
func (c *ProxmoxPool) getSMBSetting(vmInfo map[string]interface{}, name string) string {
|
||||
smbios, ok := vmInfo["smbios1"].(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, l := range strings.Split(smbios, ",") {
|
||||
if l == "" || l == "base64=1" {
|
||||
continue
|
||||
}
|
||||
|
||||
parsedParameter, err := url.ParseQuery(l)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for k, v := range parsedParameter {
|
||||
if k == name {
|
||||
decodedString, err := base64.StdEncoding.DecodeString(v[0])
|
||||
if err != nil {
|
||||
decodedString = []byte(v[0])
|
||||
}
|
||||
|
||||
return string(decodedString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
return string(sku)
|
||||
}
|
||||
|
||||
func readValueFromFile(path string) (string, error) {
|
||||
|
||||
@@ -17,12 +17,9 @@ limitations under the License.
|
||||
package proxmoxpool_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
pxpool "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
|
||||
@@ -67,13 +64,13 @@ func TestNewClient(t *testing.T) {
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
pClient, err := pxpool.NewProxmoxPool([]*pxpool.ProxmoxCluster{}, nil)
|
||||
pxClient, err := pxpool.NewProxmoxPool(t.Context(), []*pxpool.ProxmoxCluster{})
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, pClient)
|
||||
assert.Nil(t, pxClient)
|
||||
|
||||
pClient, err = pxpool.NewProxmoxPool(cfg, nil)
|
||||
pxClient, err = pxpool.NewProxmoxPool(t.Context(), cfg)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pClient)
|
||||
assert.NotNil(t, pxClient)
|
||||
}
|
||||
|
||||
func TestNewClientWithCredentialsFromFile(t *testing.T) {
|
||||
@@ -92,9 +89,9 @@ func TestNewClientWithCredentialsFromFile(t *testing.T) {
|
||||
|
||||
cfg := newClusterEnvWithFiles(tokenIDFile.Name(), tokenSecretFile.Name())
|
||||
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, nil)
|
||||
pxClient, err := pxpool.NewProxmoxPool(t.Context(), cfg)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pClient)
|
||||
assert.NotNil(t, pxClient)
|
||||
assert.Equal(t, "user!token-id", cfg[0].TokenID)
|
||||
assert.Equal(t, "secret", cfg[0].TokenSecret)
|
||||
}
|
||||
@@ -103,152 +100,20 @@ func TestCheckClusters(t *testing.T) {
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, nil)
|
||||
pxClient, err := pxpool.NewProxmoxPool(t.Context(), cfg)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pClient)
|
||||
assert.NotNil(t, pxClient)
|
||||
|
||||
pxapi, err := pClient.GetProxmoxCluster("test")
|
||||
pxapi, err := pxClient.GetProxmoxCluster("test")
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, pxapi)
|
||||
assert.Equal(t, pxpool.ErrRegionNotFound, err)
|
||||
|
||||
pxapi, err = pClient.GetProxmoxCluster("cluster-1")
|
||||
pxapi, err = pxClient.GetProxmoxCluster("cluster-1")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pxapi)
|
||||
|
||||
err = pClient.CheckClusters(t.Context())
|
||||
err = pxClient.CheckClusters(t.Context())
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to initialized proxmox client in region")
|
||||
}
|
||||
|
||||
func TestFindVMByNameNonExist(t *testing.T) {
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/cluster/resources",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-1",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test1-vm",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/cluster/resources",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-2",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test2-vm",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, &http.Client{})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pClient)
|
||||
|
||||
vmr, cluster, err := pClient.FindVMByName(t.Context(), "non-existing-vm")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "", cluster)
|
||||
assert.Nil(t, vmr)
|
||||
assert.Equal(t, pxpool.ErrInstanceNotFound, err)
|
||||
}
|
||||
|
||||
func TestFindVMByNameExist(t *testing.T) {
|
||||
cfg := newClusterEnv()
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/cluster/resources",
|
||||
httpmock.NewJsonResponderOrPanic(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-1",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test1-vm",
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/cluster/resources",
|
||||
func(_ *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-2",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test2-vm",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
pClient, err := pxpool.NewProxmoxPool(cfg, &http.Client{})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, pClient)
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
vmName string
|
||||
expectedError error
|
||||
expectedVMID int
|
||||
expectedCluster string
|
||||
}{
|
||||
{
|
||||
msg: "vm not found",
|
||||
vmName: "non-existing-vm",
|
||||
expectedError: fmt.Errorf("vm 'non-existing-vm' not found"),
|
||||
},
|
||||
{
|
||||
msg: "Test1-VM",
|
||||
vmName: "test1-vm",
|
||||
expectedVMID: 100,
|
||||
expectedCluster: "cluster-1",
|
||||
},
|
||||
{
|
||||
msg: "Test2-VM",
|
||||
vmName: "test2-vm",
|
||||
expectedVMID: 100,
|
||||
expectedCluster: "cluster-2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
t.Run(fmt.Sprint(testCase.msg), func(t *testing.T) {
|
||||
vmr, cluster, err := pClient.FindVMByName(t.Context(), testCase.vmName)
|
||||
|
||||
if testCase.expectedError == nil {
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, vmr)
|
||||
assert.Equal(t, testCase.expectedVMID, vmr.VmId())
|
||||
assert.Equal(t, testCase.expectedCluster, cluster)
|
||||
} else {
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "", cluster)
|
||||
assert.Nil(t, vmr)
|
||||
assert.Equal(t, pxpool.ErrInstanceNotFound, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user