chore: bump deps

Updated dependencies

Signed-off-by: Serge Logvinov <serge.logvinov@sinextra.dev>
This commit is contained in:
Serge Logvinov
2025-11-10 12:29:30 +07:00
committed by Serge
parent d2181a88f6
commit 01e3ce854c
19 changed files with 587 additions and 276 deletions

View File

@@ -30,7 +30,7 @@ jobs:
run: git fetch --prune --unshallow
- name: Install Cosign
uses: sigstore/cosign-installer@v3.10.0
uses: sigstore/cosign-installer@v4.0.0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:

View File

@@ -31,7 +31,7 @@ jobs:
- name: Lint
uses: golangci/golangci-lint-action@v8
with:
version: v2.5.0
version: v2.6.0
args: --timeout=5m --config=.golangci.yml
- name: Unit
run: make unit

View File

@@ -27,7 +27,7 @@ jobs:
with:
version: v3.13.3
- name: Install Cosign
uses: sigstore/cosign-installer@v3.10.0
uses: sigstore/cosign-installer@v4.0.0
- name: Github registry login
uses: docker/login-action@v3

View File

@@ -21,7 +21,7 @@ jobs:
run: git fetch --prune --unshallow
- name: Install Cosign
uses: sigstore/cosign-installer@v3.10.0
uses: sigstore/cosign-installer@v4.0.0
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:

View File

@@ -1,12 +1,12 @@
# syntax = docker/dockerfile:1.18
########################################
FROM --platform=${BUILDPLATFORM} golang:1.25.1-alpine AS builder
FROM --platform=${BUILDPLATFORM} golang:1.25.3-alpine AS builder
RUN apk update && apk add --no-cache make
ENV GO111MODULE=on
WORKDIR /src
COPY go.mod go.sum /src
COPY ["go.mod", "go.sum", "/src/"]
RUN go mod download && go mod verify
COPY . .
@@ -22,7 +22,7 @@ LABEL org.opencontainers.image.source="https://github.com/sergelogvinov/proxmox-
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.description="Proxmox VE CCM for Kubernetes"
COPY --from=gcr.io/distroless/static-debian12:nonroot . .
COPY --from=gcr.io/distroless/static-debian13:nonroot . .
ARG TARGETARCH
COPY --from=builder /src/bin/proxmox-cloud-controller-manager-${TARGETARCH} /bin/proxmox-cloud-controller-manager

5
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/sergelogvinov/proxmox-cloud-controller-manager
go 1.25.1
go 1.25.3
// replace github.com/sergelogvinov/go-proxmox => ../proxmox/go-proxmox
@@ -8,7 +8,8 @@ require (
github.com/jarcoal/httpmock v1.4.1
github.com/luthermonson/go-proxmox v0.2.4-0.20250923162601-ef332f9e265b
github.com/pkg/errors v0.9.1
github.com/sergelogvinov/go-proxmox v0.0.0-20250920041813-b003ecb58e03
github.com/samber/lo v1.52.0
github.com/sergelogvinov/go-proxmox v0.0.0-20251110010552-654365b267da
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
gopkg.in/yaml.v3 v3.0.1

6
go.sum
View File

@@ -161,8 +161,10 @@ github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUO
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergelogvinov/go-proxmox v0.0.0-20250920041813-b003ecb58e03 h1:VgJ9fgOADXLW72oMAjOM6PAA9qbqibhqnuqQfpB49k0=
github.com/sergelogvinov/go-proxmox v0.0.0-20250920041813-b003ecb58e03/go.mod h1:vSTg/WC771SByc5087tu7uyGaXUv6fS8q3ak2X+xwqk=
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
github.com/sergelogvinov/go-proxmox v0.0.0-20251110010552-654365b267da h1:uK/GNZyaU+b1o4Ax8TJ/c99dNtT1S5pM2nj91mj1S6Q=
github.com/sergelogvinov/go-proxmox v0.0.0-20251110010552-654365b267da/go.mod h1:vSTg/WC771SByc5087tu7uyGaXUv6fS8q3ak2X+xwqk=
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0=
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=

View File

@@ -71,7 +71,7 @@ func init() {
func newCloud(config *ccmConfig.ClustersConfig) (cloudprovider.Interface, error) {
ctx, cancel := context.WithCancel(context.Background())
px, err := pxpool.NewProxmoxPool(ctx, config.Clusters)
px, err := pxpool.NewProxmoxPool(config.Clusters)
if err != nil {
cancel()

View File

@@ -309,7 +309,7 @@ func (i *instances) getInstanceInfo(ctx context.Context, node *v1.Node) (*instan
vmID, region, err = provider.ParseProviderID(providerID)
if err != nil {
if i.provider == providerconfig.ProviderDefault {
return nil, fmt.Errorf("instances.getInstanceInfo() error: %v", err)
klog.V(4).InfoS("instances.getInstanceInfo() failed to parse providerID, trying find by name", "node", klog.KObj(node), "providerID", providerID)
}
vmID, region, err = i.c.pxpool.FindVMByUUID(ctx, node.Status.NodeInfo.SystemUUID)

View File

@@ -18,17 +18,16 @@ package proxmox
import (
"fmt"
"net/http"
"strings"
"testing"
"github.com/jarcoal/httpmock"
proxmox "github.com/luthermonson/go-proxmox"
"github.com/samber/lo"
"github.com/stretchr/testify/suite"
goproxmox "github.com/sergelogvinov/go-proxmox"
providerconfig "github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/config"
"github.com/sergelogvinov/proxmox-cloud-controller-manager/pkg/proxmoxpool"
testcluster "github.com/sergelogvinov/proxmox-cloud-controller-manager/test/cluster"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -43,160 +42,40 @@ type ccmTestSuite struct {
i *instances
}
func (ts *ccmTestSuite) SetupTest() {
cfg, err := providerconfig.ReadCloudConfig(strings.NewReader(`
clusters:
- url: https://127.0.0.1:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-1
- url: https://127.0.0.2:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-2
`))
type configTestCase struct {
name string
config string
}
func getTestConfigs() []configTestCase {
return []configTestCase{
{
name: "DefaultProvider",
config: "../../test/config/cluster-config-1.yaml",
},
{
name: "CapMoxProvider",
config: "../../test/config/cluster-config-2.yaml",
},
}
}
// configuredTestSuite wraps the base suite with a specific configuration
type configuredTestSuite struct {
*ccmTestSuite
configCase configTestCase
}
func (ts *configuredTestSuite) SetupTest() {
testcluster.SetupMockResponders()
cfg, err := providerconfig.ReadCloudConfigFromFile(ts.configCase.config)
if err != nil {
ts.T().Fatalf("failed to read config: %v", err)
}
httpmock.RegisterResponder(http.MethodGet, `=~/cluster/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.NodeStatuses{{Name: "pve-1"}, {Name: "pve-2"}, {Name: "pve-3"}},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Node{},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Node{},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Node{},
})
})
httpmock.RegisterResponderWithQuery("GET", "https://127.0.0.1:8006/api2/json/cluster/resources", "type=vm",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]interface{}{
"data": []interface{}{
map[string]interface{}{
"node": "pve-1",
"type": "qemu",
"vmid": 100,
"name": "cluster-1-node-1",
"maxcpu": 4,
"maxmem": 10 * 1024 * 1024 * 1024,
},
map[string]interface{}{
"node": "pve-2",
"type": "qemu",
"vmid": 101,
"name": "cluster-1-node-2",
"maxcpu": 2,
"maxmem": 5 * 1024 * 1024 * 1024,
},
},
})
},
)
httpmock.RegisterResponderWithQuery("GET", "https://127.0.0.2:8006/api2/json/cluster/resources", "type=vm",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]interface{}{
"data": []interface{}{
map[string]interface{}{
"node": "pve-3",
"type": "qemu",
"vmid": 100,
"name": "cluster-2-node-1",
"maxcpu": 1,
"maxmem": 2 * 1024 * 1024 * 1024,
"status": "stopped",
},
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/qemu/100/status/current`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.VirtualMachine{Node: "pve-1", Name: "cluster-1-node-1", VMID: 100, CPUs: 4, MaxMem: 10 * 1024 * 1024 * 1024, Status: "running"},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/qemu/101/status/current`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.VirtualMachine{Node: "pve-2", Name: "cluster-1-node-2", VMID: 101, CPUs: 2, MaxMem: 5 * 1024 * 1024 * 1024, Status: "running"},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/qemu/100/status/current`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.VirtualMachine{Node: "pve-3", Name: "cluster-2-node-1", VMID: 100, CPUs: 1, MaxMem: 2 * 1024 * 1024 * 1024, Status: "stopped"},
})
})
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/nodes/pve-1/qemu/100/config",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]interface{}{
"data": map[string]interface{}{
"name": "cluster-1-node-1",
"node": "pve-1",
"type": "qemu",
"vmid": 100,
"cores": 4,
"memory": "10240",
"smbios1": "uuid=8af7110d-bfad-407a-a663-9527d10a6583",
},
})
},
)
httpmock.RegisterResponder("GET", "https://127.0.0.1:8006/api2/json/nodes/pve-2/qemu/101/config",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]interface{}{
"data": map[string]interface{}{
"name": "cluster-1-node-2",
"node": "pve-2",
"type": "qemu",
"vmid": 101,
"cores": 2,
"memory": "5120",
"smbios1": "uuid=5d04cb23-ea78-40a3-af2e-dd54798dc887",
},
})
},
)
httpmock.RegisterResponder("GET", "https://127.0.0.2:8006/api2/json/nodes/pve-3/qemu/100/config",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]interface{}{
"data": map[string]interface{}{
"name": "cluster-2-node-1",
"node": "pve-3",
"type": "qemu",
"vmid": 100,
"cores": 1,
"memory": "2048",
"smbios1": "uuid=3d3db687-89dd-473e-8463-6599f25b36a8,sku=YzEubWVkaXVt",
},
})
},
)
px, err := proxmoxpool.NewProxmoxPool(ts.T().Context(), cfg.Clusters, proxmox.WithHTTPClient(&http.Client{}))
px, err := proxmoxpool.NewProxmoxPool(cfg.Clusters)
if err != nil {
ts.T().Fatalf("failed to create cluster client: %v", err)
}
@@ -207,24 +86,31 @@ clusters:
}
features := providerconfig.ClustersFeatures{
Provider: providerconfig.ProviderDefault,
Provider: cfg.Features.Provider,
Network: providerconfig.NetworkOpts{},
}
ts.i = newInstances(client, features)
}
func (ts *ccmTestSuite) TearDownTest() {
}
func TestSuiteCCM(t *testing.T) {
suite.Run(t, new(ccmTestSuite))
configs := getTestConfigs()
for _, cfg := range configs {
// Create a new test suite for each configuration
ts := &ccmTestSuite{}
// Run the suite with the current configuration
suite.Run(t, &configuredTestSuite{
ccmTestSuite: ts,
configCase: cfg,
})
}
}
// nolint:dupl
func (ts *ccmTestSuite) TestInstanceExists() {
func (ts *configuredTestSuite) TestInstanceExists() {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
defer httpmock.DeactivateAndReset() //nolint: wsl_v5
tests := []struct {
msg string
@@ -276,11 +162,11 @@ func (ts *ccmTestSuite) TestInstanceExists() {
Name: "cluster-1-node-1",
},
Spec: v1.NodeSpec{
ProviderID: "proxmox://cluster-1/100",
ProviderID: lo.Ternary(ts.i.provider == providerconfig.ProviderCapmox, "proxmox://11833f4c-341f-4bd3-aad7-f7abed000000", "proxmox://cluster-1/100"),
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
SystemUUID: "8af7110d-bfad-407a-a663-9527d10a6583",
SystemUUID: "11833f4c-341f-4bd3-aad7-f7abed000000",
},
},
},
@@ -293,11 +179,11 @@ func (ts *ccmTestSuite) TestInstanceExists() {
Name: "cluster-1-node-3",
},
Spec: v1.NodeSpec{
ProviderID: "proxmox://cluster-1/100",
ProviderID: lo.Ternary(ts.i.provider == providerconfig.ProviderCapmox, "proxmox://11833f4c-341f-4bd3-aad7-f7abed000000", "proxmox://cluster-1/100"),
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
SystemUUID: "8af7110d-bfad-407a-a663-9527d10a6583",
SystemUUID: "11833f4c-341f-4bd3-aad7-f7abed000000",
},
},
},
@@ -310,7 +196,7 @@ func (ts *ccmTestSuite) TestInstanceExists() {
Name: "cluster-1-node-1",
},
Spec: v1.NodeSpec{
ProviderID: "proxmox://cluster-1/100",
ProviderID: lo.Ternary(ts.i.provider == providerconfig.ProviderCapmox, "proxmox://8af7110d-0000-0000-0000-9527d10a6583", "proxmox://cluster-1/100"),
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
@@ -337,61 +223,6 @@ func (ts *ccmTestSuite) TestInstanceExists() {
},
expected: false,
},
}
for _, testCase := range tests {
ts.Run(fmt.Sprint(testCase.msg), func() {
exists, err := ts.i.InstanceExists(ts.T().Context(), testCase.node)
if testCase.expectedError != "" {
ts.Require().Error(err)
ts.Require().False(exists)
ts.Require().Contains(err.Error(), testCase.expectedError)
} else {
ts.Require().NoError(err)
ts.Require().Equal(testCase.expected, exists)
}
})
}
}
func (ts *ccmTestSuite) TestInstanceExistsCAPMox() {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
// Set up a CAPMox provider instance for this test
cfg, err := providerconfig.ReadCloudConfig(strings.NewReader(`
features:
provider: 'capmox'
clusters:
- url: https://127.0.0.1:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-1
`))
if err != nil {
ts.T().Fatalf("failed to read config: %v", err)
}
px, err := proxmoxpool.NewProxmoxPool(ts.T().Context(), cfg.Clusters, proxmox.WithHTTPClient(&http.Client{}))
if err != nil {
ts.T().Fatalf("failed to create cluster client: %v", err)
}
client := &client{
pxpool: px,
kclient: fake.NewSimpleClientset(),
}
capmoxInstance := newInstances(client, cfg.Features)
tests := []struct {
msg string
node *v1.Node
expectedError string
expected bool
}{
{
msg: "NodeUUIDNotFoundCAPMox",
node: &v1.Node{
@@ -433,11 +264,11 @@ clusters:
Name: "cluster-1-node-1",
},
Spec: v1.NodeSpec{
ProviderID: "proxmox://8af7110d-bfad-407a-a663-9527d10a6583",
ProviderID: "proxmox://11833f4c-341f-4bd3-aad7-f7abed000000",
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
SystemUUID: "8af7110d-bfad-407a-a663-9527d10a6583",
SystemUUID: "11833f4c-341f-4bd3-aad7-f7abed000000",
},
},
},
@@ -447,7 +278,7 @@ clusters:
for _, testCase := range tests {
ts.Run(fmt.Sprint(testCase.msg), func() {
exists, err := capmoxInstance.InstanceExists(ts.T().Context(), testCase.node)
exists, err := ts.i.InstanceExists(ts.T().Context(), testCase.node)
if testCase.expectedError != "" {
ts.Require().Error(err)
@@ -462,9 +293,9 @@ clusters:
}
// nolint:dupl
func (ts *ccmTestSuite) TestInstanceShutdown() {
func (ts *configuredTestSuite) TestInstanceShutdown() {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
defer httpmock.DeactivateAndReset() //nolint: wsl_v5
tests := []struct {
msg string
@@ -533,7 +364,7 @@ func (ts *ccmTestSuite) TestInstanceShutdown() {
Name: "cluster-2-node-1",
},
Spec: v1.NodeSpec{
ProviderID: "proxmox://cluster-2/100",
ProviderID: "proxmox://cluster-2/103",
},
},
expected: true,
@@ -607,9 +438,9 @@ func (ts *ccmTestSuite) TestInstanceShutdown() {
}
}
func (ts *ccmTestSuite) TestInstanceMetadata() {
func (ts *configuredTestSuite) TestInstanceMetadata() {
httpmock.Activate()
defer httpmock.DeactivateAndReset()
defer httpmock.DeactivateAndReset() //nolint: wsl_v5
tests := []struct {
msg string
@@ -684,7 +515,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
SystemUUID: "8af7110d-bfad-407a-a663-9527d10a6583",
SystemUUID: "11833f4c-341f-4bd3-aad7-f7abed000000",
},
},
Spec: v1.NodeSpec{
@@ -698,7 +529,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
},
},
expected: &cloudprovider.InstanceMetadata{
ProviderID: "proxmox://cluster-1/100",
ProviderID: lo.Ternary(ts.i.provider == providerconfig.ProviderCapmox, "proxmox://11833f4c-341f-4bd3-aad7-f7abed000000", "proxmox://cluster-1/100"),
NodeAddresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
@@ -729,7 +560,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
SystemUUID: "8af7110d-bfad-407a-a663-9527d10a6583",
SystemUUID: "11833f4c-341f-4bd3-aad7-f7abed000000",
},
},
Spec: v1.NodeSpec{
@@ -743,7 +574,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
},
},
expected: &cloudprovider.InstanceMetadata{
ProviderID: "proxmox://cluster-1/100",
ProviderID: lo.Ternary(ts.i.provider == providerconfig.ProviderCapmox, "proxmox://11833f4c-341f-4bd3-aad7-f7abed000000", "proxmox://cluster-1/100"),
NodeAddresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
@@ -778,7 +609,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
},
Status: v1.NodeStatus{
NodeInfo: v1.NodeSystemInfo{
SystemUUID: "3d3db687-89dd-473e-8463-6599f25b36a8",
SystemUUID: "11833f4c-341f-4bd3-aad7-f7abea000000",
},
},
Spec: v1.NodeSpec{
@@ -792,7 +623,7 @@ func (ts *ccmTestSuite) TestInstanceMetadata() {
},
},
expected: &cloudprovider.InstanceMetadata{
ProviderID: "proxmox://cluster-2/100",
ProviderID: lo.Ternary(ts.i.provider == providerconfig.ProviderCapmox, "proxmox://11833f4c-341f-4bd3-aad7-f7abea000000", "proxmox://cluster-2/103"),
NodeAddresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,

View File

@@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"net"
"strings"
"unicode"
@@ -59,12 +60,7 @@ func ParseCIDRRuleset(cidrList string) (allowList, ignoreList []*net.IPNet, err
}
for _, item := range cidrlist {
isIgnore := false
if strings.HasPrefix(item, "!") {
item = strings.TrimPrefix(item, "!")
isIgnore = true
}
item, isIgnore := strings.CutPrefix(item, "!")
_, cidr, err := net.ParseCIDR(item)
if err != nil {
@@ -148,9 +144,7 @@ func syncNodeAnnotations(ctx context.Context, kclient clientkubernetes.Interface
newNode.Annotations = make(map[string]string)
}
for k, v := range annotationsToUpdate {
newNode.Annotations[k] = v
}
maps.Copy(newNode.Annotations, annotationsToUpdate)
newData, err := json.Marshal(newNode)
if err != nil {

View File

@@ -34,35 +34,35 @@ func TestParseCIDRRuleset(t *testing.T) {
cidrs string
expectedAllowList []*net.IPNet
expectedIgnoreList []*net.IPNet
expectedError []interface{}
expectedError []any
}{
{
msg: "Empty CIDR ruleset",
cidrs: "",
expectedAllowList: []*net.IPNet{},
expectedIgnoreList: []*net.IPNet{},
expectedError: []interface{}{},
expectedError: []any{},
},
{
msg: "Conflicting CIDRs",
cidrs: "192.168.0.1/16,!192.168.0.1/24",
expectedAllowList: []*net.IPNet{},
expectedIgnoreList: []*net.IPNet{},
expectedError: []interface{}{"192.168.0.0/16", "192.168.0.0/24"},
expectedError: []any{"192.168.0.0/16", "192.168.0.0/24"},
},
{
msg: "Ignores invalid CIDRs",
cidrs: "722.887.0.1/16,!588.0.1/24",
expectedAllowList: []*net.IPNet{},
expectedIgnoreList: []*net.IPNet{},
expectedError: []interface{}{},
expectedError: []any{},
},
{
msg: "Valid CIDRs with ignore",
cidrs: "192.168.0.1/16,!10.0.0.5/8,144.0.0.7/16,!13.0.0.9/8",
expectedAllowList: []*net.IPNet{mustParseCIDR("192.168.0.0/16"), mustParseCIDR("144.0.0.0/16")},
expectedIgnoreList: []*net.IPNet{mustParseCIDR("10.0.0.0/8"), mustParseCIDR("13.0.0.0/8")},
expectedError: []interface{}{},
expectedError: []any{},
},
}

View File

@@ -25,6 +25,8 @@ var (
ErrHAGroupNotFound = errors.New("ha-group not found")
// ErrRegionNotFound is returned when a region is not found in the Proxmox
ErrRegionNotFound = errors.New("region not found")
// ErrZoneNotFound is returned when a zone is not found in the Proxmox
ErrZoneNotFound = errors.New("zone not found")
// ErrInstanceNotFound is returned when an instance is not found in the Proxmox
ErrInstanceNotFound = errors.New("instance not found")
)

View File

@@ -21,6 +21,7 @@ import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"net/http"
"os"
@@ -53,20 +54,24 @@ type ProxmoxPool struct {
}
// NewProxmoxPool creates a new Proxmox cluster client.
func NewProxmoxPool(ctx context.Context, config []*ProxmoxCluster, options ...proxmox.Option) (*ProxmoxPool, error) {
func NewProxmoxPool(config []*ProxmoxCluster, options ...proxmox.Option) (*ProxmoxPool, error) {
clusters := len(config)
if clusters > 0 {
clients := make(map[string]*goproxmox.APIClient, clusters)
for _, cfg := range config {
options = append(options, proxmox.WithUserAgent("ProxmoxCCM v1.0"))
opts := []proxmox.Option{proxmox.WithUserAgent("ProxmoxCCM/1.0")}
opts = append(opts, options...)
if cfg.Insecure {
httpTr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
MinVersion: tls.VersionTLS12,
},
}
options = append(options, proxmox.WithHTTPClient(&http.Client{Transport: httpTr}))
opts = append(opts, proxmox.WithHTTPClient(&http.Client{Transport: httpTr}))
}
if cfg.TokenID == "" && cfg.TokenIDFile != "" {
@@ -88,15 +93,15 @@ func NewProxmoxPool(ctx context.Context, config []*ProxmoxCluster, options ...pr
}
if cfg.Username != "" && cfg.Password != "" {
options = append(options, proxmox.WithCredentials(&proxmox.Credentials{
opts = append(opts, proxmox.WithCredentials(&proxmox.Credentials{
Username: cfg.Username,
Password: cfg.Password,
}))
} else if cfg.TokenID != "" && cfg.TokenSecret != "" {
options = append(options, proxmox.WithAPIToken(cfg.TokenID, cfg.TokenSecret))
opts = append(opts, proxmox.WithAPIToken(cfg.TokenID, cfg.TokenSecret))
}
pxClient, err := goproxmox.NewAPIClient(ctx, cfg.URL, options...)
pxClient, err := goproxmox.NewAPIClient(cfg.URL, opts...)
if err != nil {
return nil, err
}
@@ -126,7 +131,8 @@ func (c *ProxmoxPool) GetRegions() []string {
// CheckClusters checks if the Proxmox connection is working.
func (c *ProxmoxPool) CheckClusters(ctx context.Context) error {
for region, pxClient := range c.clients {
if _, err := pxClient.Version(ctx); err != nil {
info, err := pxClient.Version(ctx)
if err != nil {
return fmt.Errorf("failed to initialized proxmox client in region %s, error: %v", region, err)
}
@@ -142,7 +148,7 @@ func (c *ProxmoxPool) CheckClusters(ctx context.Context) error {
}
if len(vms) > 0 {
klog.V(4).InfoS("Proxmox cluster has VMs", "region", region, "count", len(vms))
klog.V(4).InfoS("Proxmox cluster information", "region", region, "version", info.Version, "vms", len(vms))
} else {
klog.InfoS("Proxmox cluster has no VMs, or check the account permission", "region", region)
}
@@ -202,7 +208,7 @@ func (c *ProxmoxPool) GetNodeGroup(ctx context.Context, region string, node stri
continue
}
for _, n := range strings.Split(g.Nodes, ",") {
for n := range strings.SplitSeq(g.Nodes, ",") {
if node == strings.Split(n, ":")[0] {
return g.Group, nil
}
@@ -286,7 +292,11 @@ func (c *ProxmoxPool) FindVMByUUID(ctx context.Context, uuid string) (vmID int,
return false, nil
})
if err != nil {
return 0, "", ErrInstanceNotFound
if errors.Is(err, goproxmox.ErrVirtualMachineNotFound) {
continue
}
return 0, "", err
}
return vmid, region, nil

View File

@@ -64,11 +64,11 @@ func TestNewClient(t *testing.T) {
cfg := newClusterEnv()
assert.NotNil(t, cfg)
pxClient, err := pxpool.NewProxmoxPool(t.Context(), []*pxpool.ProxmoxCluster{})
pxClient, err := pxpool.NewProxmoxPool([]*pxpool.ProxmoxCluster{})
assert.NotNil(t, err)
assert.Nil(t, pxClient)
pxClient, err = pxpool.NewProxmoxPool(t.Context(), cfg)
pxClient, err = pxpool.NewProxmoxPool(cfg)
assert.Nil(t, err)
assert.NotNil(t, pxClient)
}
@@ -89,7 +89,7 @@ func TestNewClientWithCredentialsFromFile(t *testing.T) {
cfg := newClusterEnvWithFiles(tokenIDFile.Name(), tokenSecretFile.Name())
pxClient, err := pxpool.NewProxmoxPool(t.Context(), cfg)
pxClient, err := pxpool.NewProxmoxPool(cfg)
assert.Nil(t, err)
assert.NotNil(t, pxClient)
assert.Equal(t, "user!token-id", cfg[0].TokenID)
@@ -100,7 +100,7 @@ func TestCheckClusters(t *testing.T) {
cfg := newClusterEnv()
assert.NotNil(t, cfg)
pxClient, err := pxpool.NewProxmoxPool(t.Context(), cfg)
pxClient, err := pxpool.NewProxmoxPool(cfg)
assert.Nil(t, err)
assert.NotNil(t, pxClient)

427
test/cluster/cluster.go Normal file
View File

@@ -0,0 +1,427 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"fmt"
"net/http"
"github.com/jarcoal/httpmock"
"github.com/luthermonson/go-proxmox"
)
// SetupMockResponders sets up the HTTP mock responders for Proxmox API calls.
func SetupMockResponders() {
httpmock.RegisterResponder(http.MethodGet, `=~/version$`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Version{Version: "8.4"},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/cluster/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.NodeStatuses{{Name: "pve-1"}, {Name: "pve-2"}, {Name: "pve-3"}},
})
})
httpmock.RegisterResponder(http.MethodGet, "https://127.0.0.2:8006/api2/json/cluster/resources",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.ClusterResources{
&proxmox.ClusterResource{
Node: "pve-3",
Type: "qemu",
VMID: 103,
Name: "cluster-2-node-1",
MaxCPU: 2,
MaxMem: 5 * 1024 * 1024 * 1024,
Status: "stopped",
},
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, "=~/cluster/resources",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.ClusterResources{
&proxmox.ClusterResource{
Node: "pve-1",
Type: "qemu",
VMID: 100,
Name: "cluster-1-node-1",
MaxCPU: 4,
MaxMem: 10 * 1024 * 1024 * 1024,
Status: "running",
},
&proxmox.ClusterResource{
Node: "pve-2",
Type: "qemu",
VMID: 101,
Name: "cluster-1-node-2",
MaxCPU: 2,
MaxMem: 5 * 1024 * 1024 * 1024,
Status: "running",
},
&proxmox.ClusterResource{
ID: "storage/smb",
Type: "storage",
PluginType: "cifs",
Node: "pve-1",
Storage: "smb",
Content: "rootdir,images",
Shared: 1,
Status: "available",
},
&proxmox.ClusterResource{
ID: "storage/rbd",
Type: "storage",
PluginType: "dir",
Node: "pve-1",
Storage: "rbd",
Content: "images",
Shared: 1,
Status: "available",
},
&proxmox.ClusterResource{
ID: "storage/zfs",
Type: "storage",
PluginType: "zfspool",
Node: "pve-1",
Storage: "zfs",
Content: "images",
Status: "available",
},
&proxmox.ClusterResource{
ID: "storage/zfs",
Type: "storage",
PluginType: "zfspool",
Node: "pve-2",
Storage: "zfs",
Content: "images",
Status: "available",
},
&proxmox.ClusterResource{
ID: "storage/lvm",
Type: "storage",
PluginType: "lvm",
Node: "pve-1",
Storage: "local-lvm",
Content: "images",
Status: "available",
},
&proxmox.ClusterResource{
ID: "storage/lvm",
Type: "storage",
PluginType: "lvm",
Node: "pve-2",
Storage: "local-lvm",
Content: "images",
Status: "available",
},
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Node{},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Node{},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Node{},
})
})
httpmock.RegisterResponder(http.MethodGet, "=~/nodes$",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": []proxmox.NodeStatus{
{
Node: "pve-1",
Status: "online",
},
{
Node: "pve-2",
Status: "online",
},
{
Node: "pve-3",
Status: "online",
},
},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/\S+/storage/rbd/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Storage{
Type: "dir",
Enabled: 1,
Active: 1,
Shared: 1,
Content: "images",
Total: 100 * 1024 * 1024 * 1024,
Used: 50 * 1024 * 1024 * 1024,
Avail: 50 * 1024 * 1024 * 1024,
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/\S+/storage/zfs/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Storage{
Type: "zfspool",
Enabled: 1,
Active: 1,
Content: "images",
Total: 100 * 1024 * 1024 * 1024,
Used: 50 * 1024 * 1024 * 1024,
Avail: 50 * 1024 * 1024 * 1024,
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/\S+/storage/local-lvm/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.Storage{
Type: "lvmthin",
Enabled: 1,
Active: 1,
Content: "images",
Total: 100 * 1024 * 1024 * 1024,
Used: 50 * 1024 * 1024 * 1024,
Avail: 50 * 1024 * 1024 * 1024,
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/\S+/storage/\S+/status`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(400, map[string]any{
"data": nil,
"message": "Parameter verification failed",
"errors": map[string]string{
"storage": "No such storage.",
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/\S+/storage/smb/content`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": []proxmox.StorageContent{
{
Format: "raw",
Volid: "smb:9999/vm-9999-volume-smb.raw",
VMID: 9999,
Size: 1024 * 1024 * 1024,
},
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/\S+/storage/rbd/content`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": []proxmox.StorageContent{
{
Format: "raw",
Volid: "rbd:9999/vm-9999-volume-rbd.raw",
VMID: 9999,
Size: 1024 * 1024 * 1024,
},
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/qemu$`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": []proxmox.VirtualMachine{
{
VMID: 100,
Status: "running",
Name: "cluster-1-node-1",
Node: "pve-1",
},
},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/qemu$`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": []proxmox.VirtualMachine{
{
VMID: 101,
Status: "running",
Name: "cluster-1-node-2",
Node: "pve-2",
},
},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/qemu$`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": []proxmox.VirtualMachine{
{
VMID: 103,
Status: "stopped",
Name: "cluster-2-node-1",
Node: "pve-3",
},
},
})
})
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/qemu/100/status/current`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.VirtualMachine{
VMID: 100,
Name: "cluster-1-node-1",
Node: "pve-1",
CPUs: 4,
MaxMem: 10 * 1024 * 1024 * 1024,
Status: "running",
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-1/qemu/100/config`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": map[string]any{
"vmid": 100,
"cores": 4,
"memory": "10240",
"scsi0": "local-lvm:vm-100-disk-0,size=10G",
"scsi1": "local-lvm:vm-9999-pvc-123,backup=0,iothread=1,wwn=0x5056432d49443031",
"smbios1": "uuid=11833f4c-341f-4bd3-aad7-f7abed000000",
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/qemu/101/status/current`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.VirtualMachine{
VMID: 101,
Name: "cluster-1-node-2",
Node: "pve-2",
CPUs: 2,
MaxMem: 5 * 1024 * 1024 * 1024,
Status: "running",
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-2/qemu/101/config`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": map[string]any{
"vmid": 101,
"scsi0": "local-lvm:vm-101-disk-0,size=10G",
"scsi1": "local-lvm:vm-101-disk-1,size=1G",
"scsi3": "local-lvm:vm-101-disk-2,size=1G",
"smbios1": "uuid=11833f4c-341f-4bd3-aad7-f7abed000001",
},
})
},
)
httpmock.RegisterResponder(http.MethodGet, `=~/nodes/pve-3/qemu/103/status/current`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": proxmox.VirtualMachine{
VMID: 103,
Name: "cluster-2-node-1",
Node: "pve-3",
CPUs: 1,
MaxMem: 2 * 1024 * 1024 * 1024,
Status: "running",
},
})
},
)
httpmock.RegisterResponder("GET", `=~/nodes/pve-3/qemu/103/config`,
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": map[string]any{
"vmid": 103,
"smbios1": "uuid=11833f4c-341f-4bd3-aad7-f7abea000000,sku=YzEubWVkaXVt",
},
})
},
)
httpmock.RegisterResponder("PUT", "https://127.0.0.1:8006/api2/json/nodes/pve-1/qemu/100/resize",
func(_ *http.Request) (*http.Response, error) {
return httpmock.NewJsonResponse(200, map[string]any{
"data": "",
})
},
)
task := &proxmox.Task{
UPID: "UPID:pve-1:003B4235:1DF4ABCA:667C1C45:csi:103:root@pam:",
Type: "delete",
User: "root",
Status: "completed",
Node: "pve-1",
IsRunning: false,
}
taskErr := &proxmox.Task{
UPID: "UPID:pve-1:003B4235:1DF4ABCA:667C1C45:csi:104:root@pam:",
Type: "delete",
User: "root",
Status: "stopped",
ExitStatus: "ERROR",
Node: "pve-1",
IsRunning: false,
}
httpmock.RegisterResponder(http.MethodGet, fmt.Sprintf(`=~/nodes/%s/tasks/%s/status`, "pve-1", string(task.UPID)),
httpmock.NewJsonResponderOrPanic(200, map[string]any{"data": task}))
httpmock.RegisterResponder(http.MethodGet, fmt.Sprintf(`=~/nodes/%s/tasks/%s/status`, "pve-1", string(taskErr.UPID)),
httpmock.NewJsonResponderOrPanic(200, map[string]any{"data": taskErr}))
httpmock.RegisterResponder(http.MethodDelete, `=~/nodes/pve-1/storage/local-lvm/content/vm-9999-pvc-123`,
httpmock.NewJsonResponderOrPanic(200, map[string]any{"data": task.UPID}).Times(1))
httpmock.RegisterResponder(http.MethodDelete, `=~/nodes/pve-1/storage/local-lvm/content/vm-9999-pvc-error`,
httpmock.NewJsonResponderOrPanic(200, map[string]any{"data": taskErr.UPID}).Times(1))
}

18
test/cluster/docs.go Normal file
View File

@@ -0,0 +1,18 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cluster implements the http mock server for testing purposes.
package cluster

View File

@@ -0,0 +1,13 @@
features:
provider: default
clusters:
- url: https://127.0.0.1:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-1
- url: https://127.0.0.2:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-2

View File

@@ -0,0 +1,13 @@
features:
provider: capmox
clusters:
- url: https://127.0.0.1:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-1
- url: https://127.0.0.2:8006/api2/json
insecure: false
token_id: "user!token-id"
token_secret: "secret"
region: cluster-2