mirror of
https://github.com/outbackdingo/proxmox-cloud-controller-manager.git
synced 2026-01-27 10:20:13 +00:00
test: mock proxmox api
Test CCM and mock proxmox API.
This commit is contained in:
5
Makefile
5
Makefile
@@ -7,6 +7,11 @@ PUSH ?= false
|
||||
|
||||
SHA ?= $(shell git describe --match=none --always --abbrev=8 --dirty)
|
||||
TAG ?= $(shell git describe --tag --always --match v[0-9]\*)
|
||||
ifneq ($(TAG),edge)
|
||||
GO_LDFLAGS ?= -ldflags '-X k8s.io/component-base/version.gitVersion=$(TAG)'
|
||||
else
|
||||
GO_LDFLAGS ?= -ldflags '-X k8s.io/component-base/version.gitCommit=$(SHA)'
|
||||
endif
|
||||
|
||||
OS ?= $(shell go env GOOS)
|
||||
ARCH ?= $(shell go env GOARCH)
|
||||
|
||||
3
go.mod
3
go.mod
@@ -4,6 +4,7 @@ go 1.20
|
||||
|
||||
require (
|
||||
github.com/Telmate/proxmox-api-go v0.0.0-20230329163449-4d08b16c14e0
|
||||
github.com/jarcoal/httpmock v1.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.2
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
@@ -12,7 +13,7 @@ require (
|
||||
k8s.io/client-go v0.27.1
|
||||
k8s.io/cloud-provider v0.27.1
|
||||
k8s.io/component-base v0.27.1
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
k8s.io/klog/v2 v2.100.1
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
7
go.sum
7
go.sum
@@ -225,6 +225,8 @@ github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc=
|
||||
github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
@@ -256,6 +258,7 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
|
||||
@@ -731,8 +734,8 @@ k8s.io/component-helpers v0.27.1 h1:uY63v834MAHuf3fBiKGQGPq/cToU5kY5SW/58Xv0gl4=
|
||||
k8s.io/component-helpers v0.27.1/go.mod h1:oOpwSYW1AdL+pU7abHADwX1ZcJl+5c8mnIkvoFZNFWA=
|
||||
k8s.io/controller-manager v0.27.1 h1:+4OGWAzg4JVLEauPSmyQFIfrYrYQoUsC4MbHmRuPaFU=
|
||||
k8s.io/controller-manager v0.27.1/go.mod h1:oe9vKl0RPiedlCXmeVbhkDV2yX8r7C4K/B8OGaKdYtY=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
|
||||
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kms v0.27.1 h1:JTSQbJb+mcobScQwF0bOmZhIwP17k8GvBsiLlA6SQqw=
|
||||
k8s.io/kms v0.27.1/go.mod h1:VuTsw0uHlSycKLCkypCGxfFCjLfzf/5YMeATECd/zJA=
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg=
|
||||
|
||||
@@ -20,19 +20,21 @@ package cluster
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
pxapi "github.com/Telmate/proxmox-api-go/proxmox"
|
||||
)
|
||||
|
||||
// Client is a Proxmox client.
|
||||
type Client struct {
|
||||
// Cluster is a Proxmox client.
|
||||
type Cluster struct {
|
||||
config *ClustersConfig
|
||||
proxmox map[string]*pxapi.Client
|
||||
}
|
||||
|
||||
// NewClient creates a new Proxmox client.
|
||||
func NewClient(config *ClustersConfig) (*Client, error) {
|
||||
// NewCluster creates a new Proxmox cluster client.
|
||||
func NewCluster(config *ClustersConfig, hclient *http.Client) (*Cluster, error) {
|
||||
clusters := len(config.Clusters)
|
||||
if clusters > 0 {
|
||||
proxmox := make(map[string]*pxapi.Client, clusters)
|
||||
@@ -43,7 +45,7 @@ func NewClient(config *ClustersConfig) (*Client, error) {
|
||||
tlsconf = nil
|
||||
}
|
||||
|
||||
client, err := pxapi.NewClient(cfg.URL, nil, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
||||
client, err := pxapi.NewClient(cfg.URL, hclient, os.Getenv("PM_HTTP_HEADERS"), tlsconf, "", 600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -53,7 +55,7 @@ func NewClient(config *ClustersConfig) (*Client, error) {
|
||||
proxmox[cfg.Region] = client
|
||||
}
|
||||
|
||||
return &Client{
|
||||
return &Cluster{
|
||||
config: config,
|
||||
proxmox: proxmox,
|
||||
}, nil
|
||||
@@ -63,7 +65,7 @@ func NewClient(config *ClustersConfig) (*Client, error) {
|
||||
}
|
||||
|
||||
// CheckClusters checks if the Proxmox connection is working.
|
||||
func (c *Client) CheckClusters() error {
|
||||
func (c *Cluster) CheckClusters() error {
|
||||
for region, client := range c.proxmox {
|
||||
if _, err := client.GetVersion(); err != nil {
|
||||
return fmt.Errorf("failed to initialized proxmox client in region %s, error: %v", region, err)
|
||||
@@ -74,7 +76,7 @@ func (c *Client) CheckClusters() error {
|
||||
}
|
||||
|
||||
// GetProxmoxCluster returns a Proxmox cluster client in a given region.
|
||||
func (c *Client) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
||||
func (c *Cluster) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
||||
if c.proxmox[region] != nil {
|
||||
return c.proxmox[region], nil
|
||||
}
|
||||
@@ -83,15 +85,19 @@ func (c *Client) GetProxmoxCluster(region string) (*pxapi.Client, error) {
|
||||
}
|
||||
|
||||
// FindVMByName find a VM by name in all Proxmox clusters.
|
||||
func (c *Client) FindVMByName(name string) (*pxapi.VmRef, string, error) {
|
||||
func (c *Cluster) FindVMByName(name string) (*pxapi.VmRef, string, error) {
|
||||
for region, px := range c.proxmox {
|
||||
vmr, err := px.GetVmRefByName(name)
|
||||
if err != nil {
|
||||
continue
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return vmr, region, nil
|
||||
}
|
||||
|
||||
return nil, "", fmt.Errorf("VM %s not found", name)
|
||||
return nil, "", fmt.Errorf("vm '%s' not found", name)
|
||||
}
|
||||
|
||||
@@ -14,52 +14,60 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster
|
||||
package cluster_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
)
|
||||
|
||||
func newClientEnv() (*ClustersConfig, error) {
|
||||
cfg, err := ReadCloudConfig(strings.NewReader(`
|
||||
func newClusterEnv() (*cluster.ClustersConfig, error) {
|
||||
cfg, err := cluster.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://127.0.0.1:8006
|
||||
- url: https://127.0.0.1:8006/api2/json
|
||||
insecure: false
|
||||
token_id: "user!token-id"
|
||||
token_secret: "secret"
|
||||
region: cluster-1
|
||||
- url: https://127.0.0.2:8006/api2/json
|
||||
insecure: false
|
||||
token_id: "user!token-id"
|
||||
token_secret: "secret"
|
||||
region: cluster-2
|
||||
`))
|
||||
|
||||
return &cfg, err
|
||||
}
|
||||
|
||||
func TestNewClient(t *testing.T) {
|
||||
cfg, err := newClientEnv()
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
client, err := NewClient(&ClustersConfig{})
|
||||
client, err := cluster.NewCluster(&cluster.ClustersConfig{}, nil)
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, client)
|
||||
|
||||
client, err = NewClient(cfg)
|
||||
client, err = cluster.NewCluster(cfg, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.Equal(t, 1, len(client.proxmox))
|
||||
}
|
||||
|
||||
func TestCheckClusters(t *testing.T) {
|
||||
cfg, err := newClientEnv()
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
client, err := NewClient(cfg)
|
||||
client, err := cluster.NewCluster(cfg, nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.Equal(t, 1, len(client.proxmox))
|
||||
|
||||
pxapi, err := client.GetProxmoxCluster("test")
|
||||
assert.NotNil(t, err)
|
||||
@@ -74,3 +82,139 @@ func TestCheckClusters(t *testing.T) {
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to initialized proxmox client in region")
|
||||
}
|
||||
|
||||
func TestFindVMByNameNonExist(t *testing.T) {
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/cluster/resources",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-1",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test1-vm",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/cluster/resources",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-2",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test2-vm",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
client, err := cluster.NewCluster(cfg, &http.Client{})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
|
||||
vmr, cluster, err := client.FindVMByName("non-existing-vm")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "", cluster)
|
||||
assert.Nil(t, vmr)
|
||||
assert.Contains(t, err.Error(), "vm 'non-existing-vm' not found")
|
||||
}
|
||||
|
||||
func TestFindVMByNameExist(t *testing.T) {
|
||||
cfg, err := newClusterEnv()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/cluster/resources",
|
||||
httpmock.NewJsonResponderOrPanic(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-1",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test1-vm",
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/cluster/resources",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "node-2",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "test2-vm",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
client, err := cluster.NewCluster(cfg, &http.Client{})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, client)
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
vmName string
|
||||
expectedError error
|
||||
expectedVMID int
|
||||
expectedCluster string
|
||||
}{
|
||||
{
|
||||
msg: "vm not found",
|
||||
vmName: "non-existing-vm",
|
||||
expectedError: fmt.Errorf("vm 'non-existing-vm' not found"),
|
||||
},
|
||||
{
|
||||
msg: "Test1-VM",
|
||||
vmName: "test1-vm",
|
||||
expectedVMID: 100,
|
||||
expectedCluster: "cluster-1",
|
||||
},
|
||||
{
|
||||
msg: "Test2-VM",
|
||||
vmName: "test2-vm",
|
||||
expectedVMID: 100,
|
||||
expectedCluster: "cluster-2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
testCase := testCase
|
||||
|
||||
t.Run(fmt.Sprint(testCase.msg), func(t *testing.T) {
|
||||
vmr, cluster, err := client.FindVMByName(testCase.vmName)
|
||||
|
||||
if testCase.expectedError == nil {
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, vmr)
|
||||
assert.Equal(t, testCase.expectedVMID, vmr.VmId())
|
||||
assert.Equal(t, testCase.expectedCluster, cluster)
|
||||
} else {
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "", cluster)
|
||||
assert.Nil(t, vmr)
|
||||
assert.Contains(t, err.Error(), "vm 'non-existing-vm' not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,29 +14,31 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cluster
|
||||
package cluster_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
)
|
||||
|
||||
func TestReadCloudConfig(t *testing.T) {
|
||||
cfg, err := ReadCloudConfig(nil)
|
||||
cfg, err := cluster.ReadCloudConfig(nil)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Empty config
|
||||
cfg, err = ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
`))
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Wrong config
|
||||
cfg, err = ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
test: false
|
||||
`))
|
||||
@@ -45,7 +47,7 @@ clusters:
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
// Valid config with one cluster
|
||||
cfg, err = ReadCloudConfig(strings.NewReader(`
|
||||
cfg, err = cluster.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://example.com
|
||||
insecure: false
|
||||
@@ -59,12 +61,12 @@ clusters:
|
||||
}
|
||||
|
||||
func TestReadCloudConfigFromFile(t *testing.T) {
|
||||
cfg, err := ReadCloudConfigFromFile("testdata/cloud-config.yaml")
|
||||
cfg, err := cluster.ReadCloudConfigFromFile("testdata/cloud-config.yaml")
|
||||
assert.NotNil(t, err)
|
||||
assert.EqualError(t, err, "error reading testdata/cloud-config.yaml: open testdata/cloud-config.yaml: no such file or directory")
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
cfg, err = ReadCloudConfigFromFile("../../hack/proxmox-config.yaml")
|
||||
cfg, err = cluster.ReadCloudConfigFromFile("../../hack/proxmox-config.yaml")
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
assert.Equal(t, 2, len(cfg.Clusters))
|
||||
|
||||
@@ -20,7 +20,7 @@ const (
|
||||
)
|
||||
|
||||
type cloud struct {
|
||||
client *cluster.Client
|
||||
client *cluster.Cluster
|
||||
kclient clientkubernetes.Interface
|
||||
instancesV2 cloudprovider.InstancesV2
|
||||
|
||||
@@ -42,7 +42,7 @@ func init() {
|
||||
}
|
||||
|
||||
func newCloud(config *cluster.ClustersConfig) (cloudprovider.Interface, error) {
|
||||
client, err := cluster.NewClient(config)
|
||||
client, err := cluster.NewCluster(config, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -69,7 +69,7 @@ func (c *cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder,
|
||||
|
||||
err := c.client.CheckClusters()
|
||||
if err != nil {
|
||||
klog.Errorf("failed to initialized proxmox client: %v", err)
|
||||
klog.Errorf("failed to check proxmox cluster: %v", err)
|
||||
}
|
||||
|
||||
// Broadcast the upstream stop signal to all provider-level goroutines
|
||||
|
||||
@@ -34,10 +34,10 @@ import (
|
||||
)
|
||||
|
||||
type instances struct {
|
||||
c *cluster.Client
|
||||
c *cluster.Cluster
|
||||
}
|
||||
|
||||
func newInstances(client *cluster.Client) *instances {
|
||||
func newInstances(client *cluster.Cluster) *instances {
|
||||
return &instances{
|
||||
c: client,
|
||||
}
|
||||
@@ -79,17 +79,21 @@ func (i *instances) InstanceShutdown(_ context.Context, node *v1.Node) (bool, er
|
||||
return false, nil
|
||||
}
|
||||
|
||||
vmRef, region, err := i.getInstance(node)
|
||||
vmr, region, err := i.parseProviderID(node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
klog.Warningf("instances.InstanceShutdown() failed to parse providerID %s: %v", node.Spec.ProviderID, err)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
px, err := i.c.GetProxmoxCluster(region)
|
||||
if err != nil {
|
||||
return false, err
|
||||
klog.Warningf("instances.InstanceShutdown() failed to get Proxmox cluster: %v", err)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
vmState, err := px.GetVmState(vmRef)
|
||||
vmState, err := px.GetVmState(vmr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -158,12 +162,6 @@ func (i *instances) InstanceMetadata(_ context.Context, node *v1.Node) (*cloudpr
|
||||
}
|
||||
|
||||
func (i *instances) getInstance(node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||
if !strings.HasPrefix(node.Spec.ProviderID, ProviderName) {
|
||||
klog.V(4).Infof("instances.getInstance() node %s has foreign providerID: %s, skipped", node.Name, node.Spec.ProviderID)
|
||||
|
||||
return nil, "", fmt.Errorf("node %s has foreign providerID: %s", node.Name, node.Spec.ProviderID)
|
||||
}
|
||||
|
||||
vm, region, err := i.parseProviderID(node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("instances.getInstance() error: %v", err)
|
||||
@@ -183,6 +181,10 @@ func (i *instances) getInstance(node *v1.Node) (*pxapi.VmRef, string, error) {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if vmInfo["name"].(string) != node.Name {
|
||||
return nil, "", fmt.Errorf("instances.getInstance() vm.name(%s) != node.name(%s)", vmInfo["name"].(string), node.Name)
|
||||
}
|
||||
|
||||
klog.V(5).Infof("instances.getInstance() vmInfo %+v", vmInfo)
|
||||
|
||||
return vm, region, nil
|
||||
|
||||
@@ -17,13 +17,421 @@ limitations under the License.
|
||||
package proxmox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
pxapi "github.com/Telmate/proxmox-api-go/proxmox"
|
||||
"github.com/jarcoal/httpmock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/cluster"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
cloudproviderapi "k8s.io/cloud-provider/api"
|
||||
)
|
||||
|
||||
type ccmTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
i *instances
|
||||
}
|
||||
|
||||
func (ts *ccmTestSuite) SetupTest() {
|
||||
cfg, err := cluster.ReadCloudConfig(strings.NewReader(`
|
||||
clusters:
|
||||
- url: https://127.0.0.1:8006/api2/json
|
||||
insecure: false
|
||||
token_id: "user!token-id"
|
||||
token_secret: "secret"
|
||||
region: cluster-1
|
||||
- url: https://127.0.0.2:8006/api2/json
|
||||
insecure: false
|
||||
token_id: "user!token-id"
|
||||
token_secret: "secret"
|
||||
region: cluster-2
|
||||
`))
|
||||
if err != nil {
|
||||
ts.T().Fatalf("failed to read config: %v", err)
|
||||
}
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/cluster/resources",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "pve-1",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "cluster-1-node-1",
|
||||
"maxcpu": 4,
|
||||
"maxmem": 10 * 1024 * 1024 * 1024,
|
||||
},
|
||||
map[string]interface{}{
|
||||
"node": "pve-2",
|
||||
"type": "qemu",
|
||||
"vmid": 101,
|
||||
"name": "cluster-1-node-2",
|
||||
"maxcpu": 2,
|
||||
"maxmem": 5 * 1024 * 1024 * 1024,
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/cluster/resources",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": []interface{}{
|
||||
map[string]interface{}{
|
||||
"node": "pve-3",
|
||||
"type": "qemu",
|
||||
"vmid": 100,
|
||||
"name": "cluster-2-node-1",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/nodes/pve-1/qemu/100/status/current",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"status": "running",
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/nodes/pve-3/qemu/100/status/current",
|
||||
func(req *http.Request) (*http.Response, error) {
|
||||
return httpmock.NewJsonResponse(200, map[string]interface{}{
|
||||
"data": map[string]interface{}{
|
||||
"status": "stopped",
|
||||
},
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
cluster, err := cluster.NewCluster(&cfg, &http.Client{})
|
||||
if err != nil {
|
||||
ts.T().Fatalf("failed to create cluster client: %v", err)
|
||||
}
|
||||
|
||||
ts.i = newInstances(cluster)
|
||||
}
|
||||
|
||||
func (ts *ccmTestSuite) TearDownTest() {
|
||||
}
|
||||
|
||||
func TestSuiteCCM(t *testing.T) {
|
||||
suite.Run(t, new(ccmTestSuite))
|
||||
}
|
||||
|
||||
// nolint:dupl
|
||||
func (ts *ccmTestSuite) TestInstanceExists() {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
node *v1.Node
|
||||
expectedError string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "NodeForeignProviderID",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "foreign://provider-id",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
msg: "NodeWrongCluster",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-3-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-3/100",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
expectedError: "instances.getInstance() error: proxmox cluster cluster-3 not found",
|
||||
},
|
||||
{
|
||||
msg: "NodeNotExists",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-500",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-1/500",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "NodeExists",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-1/100",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
msg: "NodeExistsWithDifferentName",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-3",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-1/100",
|
||||
},
|
||||
},
|
||||
expectedError: "vm.name(cluster-1-node-1) != node.name(cluster-1-node-3)",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
testCase := testCase
|
||||
|
||||
ts.Run(fmt.Sprint(testCase.msg), func() {
|
||||
exists, err := ts.i.InstanceExists(context.Background(), testCase.node)
|
||||
|
||||
if testCase.expectedError != "" {
|
||||
ts.Require().Error(err)
|
||||
ts.Require().False(exists)
|
||||
ts.Require().Contains(err.Error(), testCase.expectedError)
|
||||
} else {
|
||||
ts.Require().NoError(err)
|
||||
ts.Require().Equal(testCase.expected, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:dupl
|
||||
func (ts *ccmTestSuite) TestInstanceShutdown() {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
node *v1.Node
|
||||
expectedError string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
msg: "NodeForeignProviderID",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "foreign://provider-id",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "NodeWrongCluster",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-3-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-3/100",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "NodeNotExists",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-500",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-1/500",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
expectedError: "vm '500' not found",
|
||||
},
|
||||
{
|
||||
msg: "NodeExists",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-1",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-1/100",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
msg: "NodeExistsStopped",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-3",
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-2/100",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
testCase := testCase
|
||||
|
||||
ts.Run(fmt.Sprint(testCase.msg), func() {
|
||||
exists, err := ts.i.InstanceShutdown(context.Background(), testCase.node)
|
||||
|
||||
if testCase.expectedError != "" {
|
||||
ts.Require().Error(err)
|
||||
ts.Require().False(exists)
|
||||
ts.Require().Contains(err.Error(), testCase.expectedError)
|
||||
} else {
|
||||
ts.Require().NoError(err)
|
||||
ts.Require().Equal(testCase.expected, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *ccmTestSuite) TestInstanceMetadata() {
|
||||
httpmock.Activate()
|
||||
defer httpmock.DeactivateAndReset()
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
node *v1.Node
|
||||
expectedError string
|
||||
expected *cloudprovider.InstanceMetadata
|
||||
}{
|
||||
{
|
||||
msg: "NodeAnnotations",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-1",
|
||||
},
|
||||
},
|
||||
expected: &cloudprovider.InstanceMetadata{},
|
||||
},
|
||||
{
|
||||
msg: "NodeForeignProviderID",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-node-1",
|
||||
Annotations: map[string]string{
|
||||
cloudproviderapi.AnnotationAlphaProvidedIPAddr: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "foreign://provider-id",
|
||||
},
|
||||
},
|
||||
expected: &cloudprovider.InstanceMetadata{},
|
||||
},
|
||||
{
|
||||
msg: "NodeWrongCluster",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-3-node-1",
|
||||
Annotations: map[string]string{
|
||||
cloudproviderapi.AnnotationAlphaProvidedIPAddr: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-3/100",
|
||||
},
|
||||
},
|
||||
expected: &cloudprovider.InstanceMetadata{},
|
||||
expectedError: "instances.getInstance() error: proxmox cluster cluster-3 not found",
|
||||
},
|
||||
{
|
||||
msg: "NodeNotExists",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-500",
|
||||
Annotations: map[string]string{
|
||||
cloudproviderapi.AnnotationAlphaProvidedIPAddr: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "proxmox://cluster-1/500",
|
||||
},
|
||||
},
|
||||
expected: &cloudprovider.InstanceMetadata{},
|
||||
expectedError: cloudprovider.InstanceNotFound.Error(),
|
||||
},
|
||||
{
|
||||
msg: "NodeExists",
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-1-node-1",
|
||||
Annotations: map[string]string{
|
||||
cloudproviderapi.AnnotationAlphaProvidedIPAddr: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &cloudprovider.InstanceMetadata{
|
||||
ProviderID: "proxmox://cluster-1/100",
|
||||
NodeAddresses: []v1.NodeAddress{
|
||||
{
|
||||
Type: v1.NodeInternalIP,
|
||||
Address: "1.2.3.4",
|
||||
},
|
||||
{
|
||||
Type: v1.NodeHostName,
|
||||
Address: "cluster-1-node-1",
|
||||
},
|
||||
},
|
||||
InstanceType: "4VCPU-10GB",
|
||||
Region: "cluster-1",
|
||||
Zone: "pve-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
testCase := testCase
|
||||
|
||||
ts.Run(fmt.Sprint(testCase.msg), func() {
|
||||
meta, err := ts.i.InstanceMetadata(context.Background(), testCase.node)
|
||||
|
||||
if testCase.expectedError != "" {
|
||||
ts.Require().Error(err)
|
||||
ts.Require().Contains(err.Error(), testCase.expectedError)
|
||||
} else {
|
||||
ts.Require().NoError(err)
|
||||
ts.Require().Equal(testCase.expected, meta)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetProviderID(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user