test: improve the reset integration tests

Provide a trace for each step of the reset sequence taken, so if one of
those fails, integration test produces a meaningful message instead of
proceeding and failing somewhere else.

More cleanup/refactor, should be functionally equivalent.

Fixes #8635

Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
This commit is contained in:
Andrey Smirnov
2024-04-22 18:39:24 +04:00
parent 8cdf0f7cb0
commit 05fd042bb3
3 changed files with 173 additions and 226 deletions

View File

@@ -13,7 +13,6 @@ import (
"time"
"github.com/siderolabs/gen/xslices"
"github.com/siderolabs/go-retry/retry"
"github.com/siderolabs/talos/internal/integration/base"
machineapi "github.com/siderolabs/talos/pkg/machinery/api/machine"
@@ -68,48 +67,16 @@ func (suite *ResetSuite) TestResetNodeByNode() {
suite.T().Skip("skipping as talos is explicitly trusted booted")
}
initNodeAddress := ""
for _, node := range suite.Cluster.Info().Nodes {
if node.Type == machine.TypeInit {
initNodeAddress = node.IPs[0].String()
break
}
}
nodes := suite.DiscoverNodeInternalIPs(suite.ctx)
suite.Require().NotEmpty(nodes)
sort.Strings(nodes)
for _, node := range nodes {
if node == initNodeAddress {
// due to the bug with etcd cluster build for the init node after Reset(), skip resetting first node
// there's no problem if bootstrap API was used, so this check only protects legacy init nodes
suite.T().Log("Skipping init node", node, "due to known issue with etcd")
continue
}
suite.T().Log("Resetting node", node)
preReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.AssertRebooted(
suite.ctx, node, func(nodeCtx context.Context) error {
// force reboot after reset, as this is the only mode we can test
return base.IgnoreGRPCUnavailable(suite.Client.Reset(nodeCtx, true, true))
}, 10*time.Minute,
)
suite.ClearConnectionRefused(suite.ctx, node)
postReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.Assert().NotEqual(preReset, postReset, "reset should lead to new kubelet cert being generated")
suite.ResetNode(suite.ctx, node, &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
}, true)
}
}
@@ -121,24 +88,10 @@ func (suite *ResetSuite) testResetNoGraceful(nodeType machine.Type) {
node := suite.RandomDiscoveredNodeInternalIP(nodeType)
suite.T().Logf("Resetting %s node !graceful %s", nodeType, node)
preReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.AssertRebooted(
suite.ctx, node, func(nodeCtx context.Context) error {
// force reboot after reset, as this is the only mode we can test
return base.IgnoreGRPCUnavailable(suite.Client.Reset(nodeCtx, false, true))
}, 5*time.Minute,
)
suite.ClearConnectionRefused(suite.ctx, node)
postReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.Assert().NotEqual(preReset, postReset, "reset should lead to new kubelet cert being generated")
suite.ResetNode(suite.ctx, node, &machineapi.ResetRequest{
Reboot: true,
Graceful: false,
}, true)
}
// TestResetNoGracefulWorker resets a worker in !graceful mode.
@@ -157,37 +110,16 @@ func (suite *ResetSuite) TestResetNoGracefulControlplane() {
func (suite *ResetSuite) TestResetWithSpecEphemeral() {
node := suite.RandomDiscoveredNodeInternalIP()
suite.T().Log("Resetting node with spec=[EPHEMERAL]", node)
preReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.AssertRebooted(
suite.ctx, node, func(nodeCtx context.Context) error {
// force reboot after reset, as this is the only mode we can test
return base.IgnoreGRPCUnavailable(
suite.Client.ResetGeneric(
nodeCtx, &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
SystemPartitionsToWipe: []*machineapi.ResetPartitionSpec{
{
Label: constants.EphemeralPartitionLabel,
Wipe: true,
},
},
},
),
)
}, 5*time.Minute,
)
suite.ClearConnectionRefused(suite.ctx, node)
postReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.Assert().NotEqual(preReset, postReset, "reset should lead to new kubelet cert being generated")
suite.ResetNode(suite.ctx, node, &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
SystemPartitionsToWipe: []*machineapi.ResetPartitionSpec{
{
Label: constants.EphemeralPartitionLabel,
Wipe: true,
},
},
}, true)
}
// TestResetWithSpecState resets only state partition on the node.
@@ -201,12 +133,7 @@ func (suite *ResetSuite) TestResetWithSpecState() {
node := suite.RandomDiscoveredNodeInternalIP()
suite.T().Log("Resetting node with spec=[STATE]", node)
preReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
disks, err := suite.Client.Disks(client.WithNodes(suite.ctx, node))
disks, err := suite.Client.Disks(client.WithNode(suite.ctx, node))
suite.Require().NoError(err)
suite.Require().NotEmpty(disks.Messages)
@@ -219,73 +146,45 @@ func (suite *ResetSuite) TestResetWithSpecState() {
},
)
suite.AssertRebooted(
suite.ctx, node, func(nodeCtx context.Context) error {
// force reboot after reset, as this is the only mode we can test
return base.IgnoreGRPCUnavailable(
suite.Client.ResetGeneric(
nodeCtx, &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
SystemPartitionsToWipe: []*machineapi.ResetPartitionSpec{
{
Label: constants.StatePartitionLabel,
Wipe: true,
},
},
UserDisksToWipe: userDisksToWipe,
},
),
)
}, 5*time.Minute,
)
suite.ClearConnectionRefused(suite.ctx, node)
postReset, err := suite.HashKubeletCert(suite.ctx, node)
suite.Require().NoError(err)
suite.Assert().Equal(preReset, postReset, "ephemeral partition was not reset")
suite.ResetNode(suite.ctx, node, &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
SystemPartitionsToWipe: []*machineapi.ResetPartitionSpec{
{
Label: constants.StatePartitionLabel,
Wipe: true,
},
},
UserDisksToWipe: userDisksToWipe,
}, true)
}
// TestResetDuringBoot resets the node multiple times, second reset is done
// before boot sequence is complete.
// TestResetDuringBoot resets the node while it is in boot sequence.
func (suite *ResetSuite) TestResetDuringBoot() {
node := suite.RandomDiscoveredNodeInternalIP()
nodeCtx := client.WithNodes(suite.ctx, node)
suite.T().Log("Resetting node", node)
suite.T().Log("rebooting node", node)
for range 2 {
bootID := suite.ReadBootIDWithRetry(nodeCtx, time.Minute*5)
bootIDBefore, err := suite.ReadBootID(nodeCtx)
suite.Require().NoError(err)
err := retry.Constant(5*time.Minute, retry.WithUnits(time.Millisecond*1000)).Retry(
func() error {
// force reboot after reset, as this is the only mode we can test
return retry.ExpectedError(
suite.Client.ResetGeneric(
client.WithNodes(suite.ctx, node), &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
SystemPartitionsToWipe: []*machineapi.ResetPartitionSpec{
{
Label: constants.EphemeralPartitionLabel,
Wipe: true,
},
},
},
),
)
suite.Require().NoError(suite.Client.Reboot(nodeCtx))
suite.AssertBootIDChanged(nodeCtx, bootIDBefore, node, 3*time.Minute)
suite.ClearConnectionRefused(suite.ctx, node)
suite.ResetNode(suite.ctx, node, &machineapi.ResetRequest{
Reboot: true,
Graceful: true,
SystemPartitionsToWipe: []*machineapi.ResetPartitionSpec{
{
Label: constants.EphemeralPartitionLabel,
Wipe: true,
},
)
suite.Require().NoError(err)
suite.AssertBootIDChanged(nodeCtx, bootID, node, time.Minute*5)
}
suite.WaitForBootDone(suite.ctx)
suite.AssertClusterHealthy(suite.ctx)
},
}, true)
}
func init() {