test: fix CLI nodes discovery without provisioner data

When integration tests run without data from Talos provisioner (e.g.
against AWS/GCP), it should work only with `talosconfig` as an input.

This specific flow was missing filling out `infoWrapper` properly.

Clean up things a bit by reducing code duplication.

Signed-off-by: Andrey Smirnov <andrey.smirnov@talos-systems.com>
This commit is contained in:
Andrey Smirnov
2022-06-20 22:55:49 +04:00
parent 916a306829
commit a167a54021
11 changed files with 353 additions and 128 deletions

View File

@@ -95,9 +95,6 @@ func (hr *healthReporter) Update(condition conditions.Condition) {
} }
type clusterState struct { type clusterState struct {
controlPlaneNodes []string
workerNodes []string
nodeInfos []cluster.NodeInfo nodeInfos []cluster.NodeInfo
nodeInfosByType map[machine.Type][]cluster.NodeInfo nodeInfosByType map[machine.Type][]cluster.NodeInfo
} }
@@ -111,7 +108,13 @@ func (cl *clusterState) NodesByType(t machine.Type) []cluster.NodeInfo {
} }
func (cl *clusterState) String() string { func (cl *clusterState) String() string {
return fmt.Sprintf("control plane: %q, worker: %q", cl.controlPlaneNodes, cl.workerNodes) return fmt.Sprintf("control plane: %q, worker: %q",
slices.Map(cl.nodeInfosByType[machine.TypeControlPlane], func(info cluster.NodeInfo) string {
return info.InternalIP.String()
}),
slices.Map(cl.nodeInfosByType[machine.TypeWorker], func(info cluster.NodeInfo) string {
return info.InternalIP.String()
}))
} }
//nolint:gocyclo,cyclop //nolint:gocyclo,cyclop
@@ -136,9 +139,7 @@ func buildClusterInfo(ctx context.Context,
} }
return &clusterState{ return &clusterState{
controlPlaneNodes: controlPlaneNodes, nodeInfos: append(append([]cluster.NodeInfo(nil), controlPlaneNodeInfos...), workerNodeInfos...),
workerNodes: workerNodes,
nodeInfos: append(controlPlaneNodeInfos, workerNodeInfos...),
nodeInfosByType: map[machine.Type][]cluster.NodeInfo{ nodeInfosByType: map[machine.Type][]cluster.NodeInfo{
machine.TypeControlPlane: controlPlaneNodeInfos, machine.TypeControlPlane: controlPlaneNodeInfos,
machine.TypeWorker: workerNodeInfos, machine.TypeWorker: workerNodeInfos,

View File

@@ -32,7 +32,6 @@ import (
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime" "github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime/v1alpha1/bootloader" "github.com/talos-systems/talos/internal/app/machined/pkg/runtime/v1alpha1/bootloader"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime/v1alpha1/bootloader/adv" "github.com/talos-systems/talos/internal/app/machined/pkg/runtime/v1alpha1/bootloader/adv"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime/v1alpha1/platform"
"github.com/talos-systems/talos/internal/app/machined/pkg/system/events" "github.com/talos-systems/talos/internal/app/machined/pkg/system/events"
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health" "github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner" "github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
@@ -461,14 +460,9 @@ func (e *Etcd) argsForInit(ctx context.Context, r runtime.Runtime) error {
return err return err
} }
p, err := platform.CurrentPlatform()
if err != nil {
return err
}
var upgraded bool var upgraded bool
if p.Mode() != runtime.ModeContainer { if r.State().Platform().Mode() != runtime.ModeContainer {
var meta *bootloader.Meta var meta *bootloader.Meta
if meta, err = bootloader.NewMeta(); err != nil { if meta, err = bootloader.NewMeta(); err != nil {

View File

@@ -95,10 +95,13 @@ func (cliSuite *CLISuite) discoverKubectl() cluster.Info {
"-o", "jsonpath={.items[*].status.addresses[?(@.type==\"InternalIP\")].address}", fmt.Sprintf("--selector=!%s", constants.LabelNodeRoleMaster)) "-o", "jsonpath={.items[*].status.addresses[?(@.type==\"InternalIP\")].address}", fmt.Sprintf("--selector=!%s", constants.LabelNodeRoleMaster))
cliSuite.Require().NoError(err) cliSuite.Require().NoError(err)
return &infoWrapper{ nodeInfo, err := newNodeInfo(
masterNodes: strings.Fields(strings.TrimSpace(masterNodes)), strings.Fields(strings.TrimSpace(masterNodes)),
workerNodes: strings.Fields(strings.TrimSpace(workerNodes)), strings.Fields(strings.TrimSpace(workerNodes)),
} )
cliSuite.Require().NoError(err)
return nodeInfo
} }
// buildCLICmd builds exec.Cmd from TalosSuite and args. // buildCLICmd builds exec.Cmd from TalosSuite and args.

View File

@@ -13,13 +13,30 @@ import (
) )
type infoWrapper struct { type infoWrapper struct {
masterNodes []string
workerNodes []string
nodeInfos []cluster.NodeInfo nodeInfos []cluster.NodeInfo
nodeInfosByType map[machine.Type][]cluster.NodeInfo nodeInfosByType map[machine.Type][]cluster.NodeInfo
} }
func newNodeInfo(masterNodes, workerNodes []string) (*infoWrapper, error) {
controlPlaneNodeInfos, err := cluster.IPsToNodeInfos(masterNodes)
if err != nil {
return nil, err
}
workerNodeInfos, err := cluster.IPsToNodeInfos(workerNodes)
if err != nil {
return nil, err
}
return &infoWrapper{
nodeInfos: append(append([]cluster.NodeInfo(nil), controlPlaneNodeInfos...), workerNodeInfos...),
nodeInfosByType: map[machine.Type][]cluster.NodeInfo{
machine.TypeControlPlane: controlPlaneNodeInfos,
machine.TypeWorker: workerNodeInfos,
},
}, nil
}
func (wrapper *infoWrapper) Nodes() []cluster.NodeInfo { func (wrapper *infoWrapper) Nodes() []cluster.NodeInfo {
return wrapper.nodeInfos return wrapper.nodeInfos
} }

View File

@@ -19,7 +19,6 @@ import (
"github.com/talos-systems/talos/pkg/cluster" "github.com/talos-systems/talos/pkg/cluster"
"github.com/talos-systems/talos/pkg/machinery/client" "github.com/talos-systems/talos/pkg/machinery/client"
"github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/machine"
"github.com/talos-systems/talos/pkg/machinery/constants" "github.com/talos-systems/talos/pkg/machinery/constants"
) )
@@ -53,7 +52,7 @@ func discoverNodesK8s(ctx context.Context, client *client.Client, suite *TalosSu
return nil, err return nil, err
} }
result := &infoWrapper{} var masterNodes, workerNodes []string
for _, node := range nodes.Items { for _, node := range nodes.Items {
var address string var address string
@@ -71,31 +70,11 @@ func discoverNodesK8s(ctx context.Context, client *client.Client, suite *TalosSu
} }
if _, ok := node.Labels[constants.LabelNodeRoleMaster]; ok { if _, ok := node.Labels[constants.LabelNodeRoleMaster]; ok {
result.masterNodes = append(result.masterNodes, address) masterNodes = append(masterNodes, address)
} else { } else {
result.workerNodes = append(result.workerNodes, address) workerNodes = append(workerNodes, address)
} }
} }
controlPlaneNodeInfos, err := cluster.IPsToNodeInfos(result.masterNodes) return newNodeInfo(masterNodes, workerNodes)
if err != nil {
return nil, err
}
workerNodeInfos, err := cluster.IPsToNodeInfos(result.workerNodes)
if err != nil {
return nil, err
}
var allNodeInfos []cluster.NodeInfo
allNodeInfos = append(allNodeInfos, controlPlaneNodeInfos...)
allNodeInfos = append(allNodeInfos, workerNodeInfos...)
result.nodeInfos = allNodeInfos
result.nodeInfosByType = map[machine.Type][]cluster.NodeInfo{
machine.TypeControlPlane: controlPlaneNodeInfos,
machine.TypeWorker: workerNodeInfos,
}
return result, nil
} }

View File

@@ -8,6 +8,7 @@
package cli package cli
import ( import (
"context"
"fmt" "fmt"
"regexp" "regexp"
"strings" "strings"
@@ -33,33 +34,13 @@ func (suite *HealthSuite) SuiteName() string {
// //
//nolint:gocyclo //nolint:gocyclo
func (suite *HealthSuite) TestClientSideWithExplicitNodes() { func (suite *HealthSuite) TestClientSideWithExplicitNodes() {
bootstrapAPIIsUsed := true info := suite.DiscoverNodes(context.TODO())
for _, node := range suite.Cluster.Info().Nodes {
if node.Type == machine.TypeInit {
bootstrapAPIIsUsed = false
}
}
var args []string var args []string
if bootstrapAPIIsUsed { for _, machineType := range []machine.Type{machine.TypeInit, machine.TypeControlPlane, machine.TypeWorker} {
for _, node := range suite.Cluster.Info().Nodes { for _, node := range info.NodesByType(machineType) {
switch node.Type { switch machineType {
case machine.TypeControlPlane:
args = append(args, "--control-plane-nodes", node.IPs[0].String())
case machine.TypeWorker:
args = append(args, "--worker-nodes", node.IPs[0].String())
// todo: add more
case machine.TypeInit, machine.TypeUnknown:
fallthrough
default:
panic(fmt.Sprintf("unexpected machine type %v", node.Type))
}
}
} else {
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case machine.TypeInit: case machine.TypeInit:
args = append(args, "--init-node", node.IPs[0].String()) args = append(args, "--init-node", node.IPs[0].String())
case machine.TypeControlPlane: case machine.TypeControlPlane:
@@ -67,9 +48,9 @@ func (suite *HealthSuite) TestClientSideWithExplicitNodes() {
case machine.TypeWorker: case machine.TypeWorker:
args = append(args, "--worker-nodes", node.IPs[0].String()) args = append(args, "--worker-nodes", node.IPs[0].String())
case machine.TypeUnknown: case machine.TypeUnknown:
fallthrough // skip it
default: default:
panic(fmt.Sprintf("unexpected machine type %v", node.Type)) panic(fmt.Sprintf("unexpected machine type: %v", machineType))
} }
} }
} }
@@ -102,10 +83,6 @@ func (suite *HealthSuite) TestServerSide() {
} }
func (suite *HealthSuite) testClientSide(extraArgs ...string) { func (suite *HealthSuite) testClientSide(extraArgs ...string) {
if suite.Cluster == nil {
suite.T().Skip("Cluster is not available, skipping test")
}
args := append([]string{"--server=false"}, extraArgs...) args := append([]string{"--server=false"}, extraArgs...)
if suite.K8sEndpoint != "" { if suite.K8sEndpoint != "" {

View File

@@ -9,7 +9,6 @@ import (
"context" "context"
"fmt" "fmt"
"net/netip" "net/netip"
"sort"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -17,8 +16,6 @@ import (
"github.com/talos-systems/talos/pkg/cluster" "github.com/talos-systems/talos/pkg/cluster"
"github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/machine" "github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/machine"
"github.com/talos-systems/talos/pkg/machinery/constants" "github.com/talos-systems/talos/pkg/machinery/constants"
"github.com/talos-systems/talos/pkg/machinery/generic/maps"
"github.com/talos-systems/talos/pkg/machinery/generic/slices"
) )
// K8sAllNodesReportedAssertion checks whether all the nodes show up in node list. // K8sAllNodesReportedAssertion checks whether all the nodes show up in node list.
@@ -69,7 +66,7 @@ func K8sAllNodesReportedAssertion(ctx context.Context, cl ClusterInfo) error {
actualNodeInfos = append(actualNodeInfos, actualNodeInfo) actualNodeInfos = append(actualNodeInfos, actualNodeInfo)
} }
return assertNodes(expectedNodeInfos, actualNodeInfos) return cluster.NodesMatch(expectedNodeInfos, actualNodeInfos)
} }
// K8sFullControlPlaneAssertion checks whether all the master nodes are k8s master nodes. // K8sFullControlPlaneAssertion checks whether all the master nodes are k8s master nodes.
@@ -127,7 +124,7 @@ func K8sFullControlPlaneAssertion(ctx context.Context, cl ClusterInfo) error {
} }
} }
err = assertNodes(expectedNodes, actualNodes) err = cluster.NodesMatch(expectedNodes, actualNodes)
if err != nil { if err != nil {
return err return err
} }
@@ -381,44 +378,3 @@ func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespa
return len(rss.Items) > 0, nil return len(rss.Items) > 0, nil
} }
// assertNodes asserts that the provided expected set of nodes match the actual set of nodes.
// For the assertion to pass, the actual set of nodes
// must be equal to the expected set of nodes, compared by their internal IPs.
//
// Additionally, for each node, the IPs of the actual node must be a *subset* of the IPs of the expected node.
func assertNodes(expected []cluster.NodeInfo, actual []cluster.NodeInfo) error {
toMapFunc := func(t cluster.NodeInfo) (string, []string) {
return t.InternalIP.String(), mapIPsToStrings(t.IPs)
}
expectedNodeInternalIPToNodeIPs := slices.ToMap(expected, toMapFunc)
actualNodeInternalIPToNodeIPs := slices.ToMap(actual, toMapFunc)
if len(expectedNodeInternalIPToNodeIPs) != len(actualNodeInternalIPToNodeIPs) {
expectedNodeInternalIPs := maps.Keys(expectedNodeInternalIPToNodeIPs)
sort.Strings(expectedNodeInternalIPs)
actualNodeInternalIPs := maps.Keys(actualNodeInternalIPToNodeIPs)
sort.Strings(actualNodeInternalIPs)
return fmt.Errorf("expected node internal IPs %q but got %q",
expectedNodeInternalIPs, actualNodeInternalIPs)
}
for internalIP, ips := range expectedNodeInternalIPToNodeIPs {
actualIPs, found := actualNodeInternalIPToNodeIPs[internalIP]
if !found {
return fmt.Errorf("couldn't find expected node with internal IP: %v", internalIP)
}
sort.Strings(actualIPs)
sort.Strings(ips)
if !maps.Contains(slices.ToSet(ips), actualIPs) {
return fmt.Errorf("expected IPs %q for node but got %q", ips, actualIPs)
}
}
return nil
}

View File

@@ -7,8 +7,10 @@ package cluster
import ( import (
"context" "context"
"fmt"
"io" "io"
"net/netip" "net/netip"
"sort"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
@@ -16,6 +18,8 @@ import (
k8s "github.com/talos-systems/talos/pkg/kubernetes" k8s "github.com/talos-systems/talos/pkg/kubernetes"
"github.com/talos-systems/talos/pkg/machinery/client" "github.com/talos-systems/talos/pkg/machinery/client"
"github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/machine" "github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/machine"
"github.com/talos-systems/talos/pkg/machinery/generic/maps"
"github.com/talos-systems/talos/pkg/machinery/generic/slices"
) )
// ClientProvider builds Talos client by endpoint. // ClientProvider builds Talos client by endpoint.
@@ -90,3 +94,39 @@ func IPToNodeInfo(ip string) (*NodeInfo, error) {
IPs: []netip.Addr{parsed}, IPs: []netip.Addr{parsed},
}, nil }, nil
} }
// NodesMatch asserts that the provided expected set of nodes match the actual set of nodes.
//
// Each expectedNode IPs should have a non-empty intersection with actualNode IPs.
func NodesMatch(expected, actual []NodeInfo) error {
actualNodes := slices.ToMap(actual, func(n NodeInfo) (*NodeInfo, struct{}) { return &n, struct{}{} })
for _, expectedNodeInfo := range expected {
found := false
for actualNodeInfo := range actualNodes {
// expectedNodeInfo.IPs intersection with actualNodeInfo.IPs is not empty
if len(maps.Intersect(slices.ToSet(actualNodeInfo.IPs), slices.ToSet(expectedNodeInfo.IPs))) > 0 {
delete(actualNodes, actualNodeInfo)
found = true
break
}
}
if !found {
return fmt.Errorf("can't find expected node with IPs %q", expectedNodeInfo.IPs)
}
}
if len(actualNodes) > 0 {
unexpectedIPs := slices.FlatMap(maps.Keys(actualNodes), func(n *NodeInfo) []netip.Addr { return n.IPs })
sort.Slice(unexpectedIPs, func(i, j int) bool { return unexpectedIPs[i].Less(unexpectedIPs[j]) })
return fmt.Errorf("unexpected nodes with IPs %q", unexpectedIPs)
}
return nil
}

View File

@@ -4,11 +4,138 @@
package cluster_test package cluster_test
import "testing" import (
"net/netip"
"testing"
func TestEmpty(t *testing.T) { "github.com/stretchr/testify/assert"
// added for accurate coverage estimation
// "github.com/talos-systems/talos/pkg/cluster"
// please remove it once any unit-test is added )
// for this package
func TestAssertNodes(t *testing.T) {
for _, tt := range []struct {
name string
expectedNodes []cluster.NodeInfo
actualNodes []cluster.NodeInfo
expectedError string
}{
{
name: "aws+discovery",
expectedNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("5.6.7.8"), netip.MustParseAddr("172.23.1.3")},
},
},
actualNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("5.6.7.8"), netip.MustParseAddr("172.23.1.3")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("172.23.1.2")},
},
},
},
{
name: "aws+private",
expectedNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.3")},
},
},
actualNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("5.6.7.8"), netip.MustParseAddr("172.23.1.3")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("172.23.1.2")},
},
},
},
{
name: "more internal IPs",
expectedNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("ff::1"), netip.MustParseAddr("172.23.1.3")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("ff::2"), netip.MustParseAddr("172.23.1.2")},
},
},
actualNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.3")},
},
},
},
{
name: "extra node expected",
expectedNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.3")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.4")},
},
},
actualNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("5.6.7.8"), netip.MustParseAddr("172.23.1.3")},
},
},
expectedError: `can't find expected node with IPs ["172.23.1.4"]`,
},
{
name: "extra node actual",
expectedNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.3")},
},
},
actualNodes: []cluster.NodeInfo{
{
IPs: []netip.Addr{netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("172.23.1.2")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("5.6.7.8"), netip.MustParseAddr("172.23.1.3")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.4")},
},
{
IPs: []netip.Addr{netip.MustParseAddr("172.23.1.5"), netip.MustParseAddr("9.10.11.12")},
},
},
expectedError: "unexpected nodes with IPs [\"9.10.11.12\" \"172.23.1.4\" \"172.23.1.5\"]",
},
} {
t.Run(tt.name, func(t *testing.T) {
err := cluster.NodesMatch(tt.expectedNodes, tt.actualNodes)
if tt.expectedError == "" {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, tt.expectedError)
}
})
}
} }

View File

@@ -113,6 +113,33 @@ func Contains[K comparable](m map[K]struct{}, slc []K) bool {
return true return true
} }
// Intersect returns a list of keys contained in both maps.
func Intersect[K comparable](maps ...map[K]struct{}) []K {
var intersection []K
if len(maps) == 0 {
return intersection
}
for k := range maps[0] {
containedInAll := true
for _, m := range maps[1:] {
if _, ok := m[k]; !ok {
containedInAll = false
break
}
}
if containedInAll {
intersection = append(intersection, k)
}
}
return intersection
}
// Filter returns a map containing all the elements of m that satisfy fn. // Filter returns a map containing all the elements of m that satisfy fn.
func Filter[M ~map[K]V, K comparable, V any](m M, fn func(K, V) bool) M { func Filter[M ~map[K]V, K comparable, V any](m M, fn func(K, V) bool) M {
// NOTE(DmitriyMV): We use type parameter M here to return exactly the same tyoe as the input map. // NOTE(DmitriyMV): We use type parameter M here to return exactly the same tyoe as the input map.

View File

@@ -316,3 +316,107 @@ func TestValuesFunc(t *testing.T) {
}) })
} }
} }
func TestIntersection(t *testing.T) {
t.Parallel()
type args struct {
maps []map[int]struct{}
}
tests := map[string]struct {
args args
want []int
}{
"nil": {
args: args{
maps: nil,
},
want: nil,
},
"empty": {
args: args{
maps: []map[int]struct{}{
{},
},
},
want: nil,
},
"single": {
args: args{
maps: []map[int]struct{}{
{
1: {},
2: {},
},
},
},
want: []int{1, 2},
},
"first empty": {
args: args{
maps: []map[int]struct{}{
{},
{
1: {},
2: {},
},
},
},
want: nil,
},
"multiple": {
args: args{
maps: []map[int]struct{}{
{
1: {},
2: {},
4: {},
},
{
3: {},
2: {},
4: {},
},
{
2: {},
4: {},
},
},
},
want: []int{2, 4},
},
"empty intersection": {
args: args{
maps: []map[int]struct{}{
{
4: {},
5: {},
6: {},
},
{
5: {},
6: {},
},
{
1: {},
2: {},
3: {},
},
},
},
want: nil,
},
}
for name, tt := range tests {
tt := tt
t.Run(name, func(t *testing.T) {
t.Parallel()
got := maps.Intersect(tt.args.maps...)
sort.Ints(got)
assert.Equal(t, tt.want, got)
})
}
}