test: fix and improve reboot/reset tests

These tests rely on node uptime checks. These checks are quite flaky.

Following fixes were applied:

* code was refactored as common method shared between reset/reboot tests
(reboot all nodes does checks in a different way, so it wasn't updated)

* each request to read uptime times out in 5 seconds, so that checks
don't wait forever when node is down (or connection is aborted)

* to account for node availability vs. lower uptime in the beginning of
test, add extra elapsed time to the check condition

Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
Andrey Smirnov
2020-06-29 23:14:39 +03:00
committed by talos-bot
parent 51112a1d86
commit 6fb55229a2
3 changed files with 80 additions and 81 deletions

View File

@@ -57,43 +57,9 @@ func (suite *RebootSuite) TestRebootNodeByNode() {
for _, node := range nodes {
suite.T().Log("rebooting node", node)
func(node string) {
// timeout for single node reboot
ctx, ctxCancel := context.WithTimeout(suite.ctx, 10*time.Minute)
defer ctxCancel()
nodeCtx := client.WithNodes(ctx, node)
// read uptime before reboot
uptimeBefore, err := suite.ReadUptime(nodeCtx)
suite.Require().NoError(err)
suite.Assert().NoError(suite.Client.Reboot(nodeCtx))
var uptimeAfter float64
suite.Require().NoError(retry.Constant(10 * time.Minute).Retry(func() error {
uptimeAfter, err = suite.ReadUptime(nodeCtx)
if err != nil {
// API might be unresponsive during reboot
return retry.ExpectedError(err)
}
if uptimeAfter >= uptimeBefore {
// uptime should go down after reboot
return retry.ExpectedError(fmt.Errorf("uptime didn't go down: before %f, after %f", uptimeBefore, uptimeAfter))
}
return nil
}))
if suite.Cluster != nil {
// without cluster state we can't do deep checks, but basic reboot test still works
// NB: using `ctx` here to have client talking to init node by default
suite.AssertClusterHealthy(ctx)
}
}(node)
suite.AssertRebooted(suite.ctx, node, func(nodeCtx context.Context) error {
return suite.Client.Reboot(nodeCtx)
}, 10*time.Minute)
}
}
@@ -103,6 +69,9 @@ func (suite *RebootSuite) TestRebootAllNodes() {
suite.T().Skip("cluster doesn't support reboots")
}
// offset to account for uptime measuremenet inaccuracy
const offset = 2 * time.Second
nodes := suite.DiscoverNodes()
suite.Require().NotEmpty(nodes)
@@ -131,6 +100,8 @@ func (suite *RebootSuite) TestRebootAllNodes() {
suite.Require().NoError(<-errCh)
}
rebootTimestamp := time.Now()
allNodesCtx := client.WithNodes(suite.ctx, nodes...)
suite.Require().NoError(suite.Client.Reboot(allNodesCtx))
@@ -143,20 +114,27 @@ func (suite *RebootSuite) TestRebootAllNodes() {
return fmt.Errorf("uptime record not found for %q", node)
}
uptimeBefore := uptimeBeforeInterface.(float64) //nolint: errcheck
uptimeBefore := uptimeBeforeInterface.(time.Duration) //nolint: errcheck
nodeCtx := client.WithNodes(suite.ctx, node)
return retry.Constant(10 * time.Minute).Retry(func() error {
uptimeAfter, err := suite.ReadUptime(nodeCtx)
requestCtx, requestCtxCancel := context.WithTimeout(nodeCtx, 5*time.Second)
defer requestCtxCancel()
elapsed := time.Since(rebootTimestamp) - offset
uptimeAfter, err := suite.ReadUptime(requestCtx)
if err != nil {
// API might be unresponsive during reboot
return retry.ExpectedError(fmt.Errorf("error reading uptime for node %q: %w", node, err))
}
if uptimeAfter >= uptimeBefore {
// uptime of the node before it actually reboots still goes up linearly
// so we can safely add elapsed time here
if uptimeAfter >= uptimeBefore+elapsed {
// uptime should go down after reboot
return retry.ExpectedError(fmt.Errorf("uptime didn't go down for node %q: before %f, after %f", node, uptimeBefore, uptimeAfter))
return retry.ExpectedError(fmt.Errorf("uptime didn't go down for node %q: before %s + %s, after %s", node, uptimeBefore, elapsed, uptimeAfter))
}
return nil

View File

@@ -8,14 +8,11 @@ package api
import (
"context"
"fmt"
"sort"
"testing"
"time"
"github.com/talos-systems/talos/internal/integration/base"
"github.com/talos-systems/talos/pkg/client"
"github.com/talos-systems/talos/pkg/retry"
)
type ResetSuite struct {
@@ -70,43 +67,13 @@ func (suite *ResetSuite) TestResetNodeByNode() {
suite.T().Log("Resetting node", node)
func(node string) {
// timeout for single node Reset
ctx, ctxCancel := context.WithTimeout(suite.ctx, 5*time.Minute)
defer ctxCancel()
nodeCtx := client.WithNodes(ctx, node)
// read uptime before Reset
uptimeBefore, err := suite.ReadUptime(nodeCtx)
suite.Require().NoError(err)
// uptime should go down after Reset, as it reboots the node
suite.AssertRebooted(suite.ctx, node, func(nodeCtx context.Context) error {
// force reboot after reset, as this is the only mode we can test
suite.Assert().NoError(suite.Client.Reset(nodeCtx, true, true))
var uptimeAfter float64
suite.Require().NoError(retry.Constant(10 * time.Minute).Retry(func() error {
uptimeAfter, err = suite.ReadUptime(nodeCtx)
if err != nil {
// API might be unresponsive during reboot
return retry.ExpectedError(err)
}
if uptimeAfter >= uptimeBefore {
// uptime should go down after Reset, as it reboots the node
return retry.ExpectedError(fmt.Errorf("uptime didn't go down: before %f, after %f", uptimeBefore, uptimeAfter))
}
return nil
}))
// TODO: there is no good way to assert that node was reset and disk contents were really wiped
// NB: using `ctx` here to have client talking to init node by default
suite.AssertClusterHealthy(ctx)
}(node)
return suite.Client.Reset(nodeCtx, true, true)
}, 10*time.Minute)
// TODO: there is no good way to assert that node was reset and disk contents were really wiped
}
}