chore: code cleanup

More usage of slices package, less usage of package sort.

Signed-off-by: Dmitriy Matrenichev <dmitry.matrenichev@siderolabs.com>
This commit is contained in:
Dmitriy Matrenichev
2024-11-07 20:30:32 +03:00
parent 43fe3807a8
commit e26d0043e0
62 changed files with 189 additions and 204 deletions

View File

@@ -837,7 +837,7 @@ func create(ctx context.Context) error {
types := []machine.Type{machine.TypeControlPlane, machine.TypeWorker}
if withInitNode {
types = append([]machine.Type{machine.TypeInit}, types...)
types = slices.Insert(types, 0, machine.TypeInit)
}
if err = configBundle.Write(".", encoder.CommentsAll, types...); err != nil {

View File

@@ -5,11 +5,12 @@
package cluster
import (
"cmp"
"context"
"fmt"
"net/netip"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
@@ -76,7 +77,7 @@ func showCluster(cluster provision.Cluster) error {
fmt.Fprintf(w, "NAME\tTYPE\tIP\tCPU\tRAM\tDISK\n")
nodes := cluster.Info().Nodes
sort.Slice(nodes, func(i, j int) bool { return nodes[i].Name < nodes[j].Name })
slices.SortFunc(nodes, func(a, b provision.NodeInfo) int { return cmp.Compare(a.Name, b.Name) })
for _, node := range nodes {
cpus := "-"

View File

@@ -14,7 +14,7 @@ import (
"errors"
"fmt"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
"text/template"
@@ -279,7 +279,7 @@ var configRemoveCmd = &cobra.Command{
}
func sortInPlace(slc []string) []string {
sort.Slice(slc, func(i, j int) bool { return slc[i] < slc[j] })
slices.Sort(slc)
return slc
}
@@ -326,7 +326,7 @@ var configGetContextsCmd = &cobra.Command{
}
keys := maps.Keys(c.Contexts)
sort.Strings(keys)
slices.Sort(keys)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "CURRENT\tNAME\tENDPOINTS\tNODES")
@@ -587,7 +587,7 @@ func CompleteConfigContext(*cobra.Command, []string, string) ([]string, cobra.Sh
}
contextnames := maps.Keys(c.Contexts)
sort.Strings(contextnames)
slices.Sort(contextnames)
return contextnames, cobra.ShellCompDirectiveNoFileComp
}

View File

@@ -8,7 +8,7 @@ import (
"context"
"fmt"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
@@ -68,10 +68,7 @@ func containerRender(remotePeer *peer.Peer, resp *machineapi.ContainersResponse)
defaultNode := client.AddrFromPeer(remotePeer)
for _, msg := range resp.Messages {
sort.Slice(msg.Containers,
func(i, j int) bool {
return strings.Compare(msg.Containers[i].Id, msg.Containers[j].Id) < 0
})
slices.SortFunc(msg.Containers, func(a, b *machineapi.ContainerInfo) int { return strings.Compare(a.Id, b.Id) })
for _, p := range msg.Containers {
display := p.Id

View File

@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"os"
"slices"
"strconv"
"text/tabwriter"
@@ -109,7 +110,7 @@ var duCmd = &cobra.Command{
if multipleNodes {
pattern = "%s\t%s\t%s\n"
args = append([]any{node}, args...)
args = slices.Insert(args, 0, any(node))
}
fmt.Fprintf(w, pattern, args...)

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"os"
"slices"
"strings"
"text/tabwriter"
@@ -72,7 +73,7 @@ func displayAlarms(messages []alarmMessage) error {
alarm.GetAlarm().String(),
}
if node != "" {
args = append([]any{node}, args...)
args = slices.Insert(args, 0, any(node))
}
fmt.Fprintf(w, pattern, args...)
@@ -241,7 +242,7 @@ var etcdMemberListCmd = &cobra.Command{
member.IsLearner,
}
if node != "" {
args = append([]any{node}, args...)
args = slices.Insert(args, 0, any(node))
}
fmt.Fprintf(w, pattern, args...)
@@ -305,7 +306,7 @@ var etcdStatusCmd = &cobra.Command{
strings.Join(message.GetMemberStatus().GetErrors(), ", "),
}
if node != "" {
args = append([]any{node}, args...)
args = slices.Insert(args, 0, any(node))
}
fmt.Fprintf(w, pattern, args...)

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"io"
"os"
"slices"
"time"
"github.com/cosi-project/runtime/pkg/safe"
@@ -62,11 +63,7 @@ func (cl *clusterNodes) InitNodeInfos() error {
nodesByType[machine.TypeWorker] = workerNodeInfos
cl.nodesByType = nodesByType
nodes := make([]cluster.NodeInfo, 0, len(initNodeInfos)+len(controlPlaneNodeInfos)+len(workerNodeInfos))
nodes = append(nodes, initNodeInfos...)
nodes = append(nodes, controlPlaneNodeInfos...)
nodes = append(nodes, workerNodeInfos...)
cl.nodes = nodes
cl.nodes = slices.Concat(initNodeInfos, controlPlaneNodeInfos, workerNodeInfos)
return nil
}

View File

@@ -8,7 +8,7 @@ import (
"context"
"errors"
"fmt"
"sort"
"slices"
"strings"
"github.com/siderolabs/gen/maps"
@@ -63,7 +63,7 @@ func (m *WipeMode) Set(value string) error {
// Type implements Flag interface.
func (m *WipeMode) Type() string {
options := maps.Keys(wipeOptions)
sort.Strings(options)
slices.Sort(options)
return strings.Join(options, ", ")
}

View File

@@ -8,7 +8,7 @@ import (
"context"
"fmt"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
@@ -68,10 +68,7 @@ func statsRender(remotePeer *peer.Peer, resp *machineapi.StatsResponse) error {
defaultNode := client.AddrFromPeer(remotePeer)
for _, msg := range resp.Messages {
sort.Slice(msg.Stats,
func(i, j int) bool {
return strings.Compare(msg.Stats[i].Id, msg.Stats[j].Id) < 0
})
slices.SortFunc(msg.Stats, func(a, b *machineapi.Stat) int { return strings.Compare(a.Id, b.Id) })
for _, s := range msg.Stats {
display := s.Id

View File

@@ -5,11 +5,12 @@
package talos
import (
"cmp"
"context"
"errors"
"fmt"
"os"
"sort"
"slices"
"strings"
"text/tabwriter"
"time"
@@ -146,8 +147,8 @@ func upgradeGetActorID(ctx context.Context, c *client.Client, opts []client.Upgr
func init() {
rebootModes := maps.Keys(machine.UpgradeRequest_RebootMode_value)
sort.Slice(rebootModes, func(i, j int) bool {
return machine.UpgradeRequest_RebootMode_value[rebootModes[i]] < machine.UpgradeRequest_RebootMode_value[rebootModes[j]]
slices.SortFunc(rebootModes, func(a, b string) int {
return cmp.Compare(machine.UpgradeRequest_RebootMode_value[a], machine.UpgradeRequest_RebootMode_value[b])
})
rebootModes = xslices.Map(rebootModes, strings.ToLower)

View File

@@ -7,7 +7,7 @@ package helpers
import (
"fmt"
"os"
"sort"
"slices"
"strings"
"github.com/siderolabs/gen/maps"
@@ -61,7 +61,7 @@ func (m *Mode) Set(value string) error {
// Type implements Flag interface.
func (m *Mode) Type() string {
options := maps.Keys(m.options)
sort.Strings(options)
slices.Sort(options)
return strings.Join(options, ", ")
}

View File

@@ -82,12 +82,6 @@ func main() {
}
}
type noOPCloser struct {
io.ReadSeeker
}
func (noOPCloser) Close() error { return nil }
func parseModuleInput(module string) (io.ReadSeekCloser, error) {
if module == "-" {
moduleData, err := io.ReadAll(os.Stdin)
@@ -95,7 +89,13 @@ func parseModuleInput(module string) (io.ReadSeekCloser, error) {
return nil, fmt.Errorf("failed to read module from stdin: %w", err)
}
return noOPCloser{bytes.NewReader(moduleData)}, nil
return struct {
io.ReadSeeker
io.Closer
}{
bytes.NewReader(moduleData),
io.NopCloser(nil),
}, nil
}
moduleData, err := os.Open(module)

View File

@@ -8,6 +8,7 @@ import (
"context"
"crypto/tls"
"fmt"
"slices"
"sync"
"time"
@@ -212,9 +213,8 @@ func (a *APID) AppendInfo(streaming bool, resp []byte) ([]byte, error) {
protowire.AppendVarint(nil, (metadataField<<3)|metadataType),
uint64(len(resp)+len(payload)),
)
resp = append(prefix, resp...)
return append(resp, payload...), err
return slices.Concat(prefix, resp, payload), err
}
// BuildError is called to convert error from upstream into response field.

View File

@@ -140,7 +140,7 @@ func buildClusterInfo(ctx context.Context,
}
return &clusterState{
nodeInfos: append(slices.Clone(controlPlaneNodeInfos), workerNodeInfos...),
nodeInfos: slices.Concat(controlPlaneNodeInfos, workerNodeInfos),
nodeInfosByType: map[machine.Type][]cluster.NodeInfo{
machine.TypeControlPlane: controlPlaneNodeInfos,
machine.TypeWorker: workerNodeInfos,

View File

@@ -676,36 +676,16 @@ func (a nftablesRule) Compile() (*NfTablesCompiled, error) {
result.Rules = [][]expr.Any{append(rulePre, rulePost...)}
case rule4 != nil && rule6 == nil:
result.Rules = [][]expr.Any{
append(rulePre,
append(
append(matchV4, rule4...),
rulePost...,
)...,
),
slices.Concat(rulePre, matchV4, rule4, rulePost),
}
case rule4 == nil && rule6 != nil:
result.Rules = [][]expr.Any{
append(rulePre,
append(
append(matchV6, rule6...),
rulePost...,
)...,
),
slices.Concat(rulePre, matchV6, rule6, rulePost),
}
case rule4 != nil && rule6 != nil:
result.Rules = [][]expr.Any{
append(slices.Clone(rulePre),
append(
append(matchV4, rule4...),
rulePost...,
)...,
),
append(slices.Clone(rulePre),
append(
append(matchV6, rule6...),
rulePost...,
)...,
),
slices.Concat(rulePre, matchV4, rule4, rulePost),
slices.Concat(rulePre, matchV6, rule6, rulePost),
}
}

View File

@@ -9,7 +9,6 @@ import (
"fmt"
"net/netip"
"slices"
"sort"
"github.com/cosi-project/runtime/pkg/controller"
"github.com/cosi-project/runtime/pkg/safe"
@@ -75,7 +74,7 @@ func (ctrl *EndpointController) Run(ctx context.Context, r controller.Runtime, l
endpoints = append(endpoints, memberSpec.Addresses...)
}
sort.Slice(endpoints, func(i, j int) bool { return endpoints[i].Compare(endpoints[j]) < 0 })
slices.SortFunc(endpoints, func(a, b netip.Addr) int { return a.Compare(b) })
if err := safe.WriterModify(
ctx,

View File

@@ -6,7 +6,7 @@ package ctest
import (
"fmt"
"sort"
"slices"
"strings"
"github.com/siderolabs/go-retry/retry"
@@ -46,7 +46,7 @@ func (agg *assertionAggregator) Error() error {
lines = append(lines, " * "+errorString)
}
sort.Strings(lines)
slices.Sort(lines)
return fmt.Errorf("%s", strings.Join(lines, "\n"))
}

View File

@@ -8,6 +8,7 @@ import (
"context"
"fmt"
"net/netip"
"slices"
"github.com/cosi-project/runtime/pkg/controller"
"github.com/cosi-project/runtime/pkg/resource"
@@ -144,13 +145,15 @@ func (ctrl *SpecController) Run(ctx context.Context, r controller.Runtime, _ *za
advertiseValidSubnets = []string{"0.0.0.0/0", "::/0"}
}
advertisedCIDRs := make([]string, 0, len(advertiseValidSubnets)+len(etcdConfig.TypedSpec().AdvertiseExcludeSubnets))
advertisedCIDRs = append(advertisedCIDRs, advertiseValidSubnets...)
advertisedCIDRs = append(advertisedCIDRs, xslices.Map(etcdConfig.TypedSpec().AdvertiseExcludeSubnets, func(cidr string) string { return "!" + cidr })...)
advertisedCIDRs := slices.Concat(
advertiseValidSubnets,
xslices.Map(etcdConfig.TypedSpec().AdvertiseExcludeSubnets, func(cidr string) string { return "!" + cidr }),
)
listenCIDRs := make([]string, 0, len(etcdConfig.TypedSpec().ListenValidSubnets)+len(etcdConfig.TypedSpec().ListenExcludeSubnets))
listenCIDRs = append(listenCIDRs, etcdConfig.TypedSpec().ListenValidSubnets...)
listenCIDRs = append(listenCIDRs, xslices.Map(etcdConfig.TypedSpec().ListenExcludeSubnets, func(cidr string) string { return "!" + cidr })...)
listenCIDRs := slices.Concat(
etcdConfig.TypedSpec().ListenValidSubnets,
xslices.Map(etcdConfig.TypedSpec().ListenExcludeSubnets, func(cidr string) string { return "!" + cidr }),
)
defaultListenAddress := netip.AddrFrom4([4]byte{0, 0, 0, 0})
loopbackAddress := netip.AddrFrom4([4]byte{127, 0, 0, 1})

View File

@@ -8,7 +8,7 @@ import (
"context"
"fmt"
"path/filepath"
"sort"
"slices"
"github.com/cosi-project/runtime/pkg/controller"
"github.com/cosi-project/runtime/pkg/safe"
@@ -70,7 +70,7 @@ func (ctrl *CRIConfigPartsController) Run(ctx context.Context, r controller.Runt
return err
}
sort.Strings(parts)
slices.Sort(parts)
out, err := toml.Merge(parts)
if err != nil {

View File

@@ -97,7 +97,7 @@ func (ctrl *AddressFilterController) Run(ctx context.Context, r controller.Runti
}
if err = safe.WriterModify(ctx, r, network.NewNodeAddressFilter(network.NamespaceName, k8s.NodeAddressFilterNoK8s), func(r *network.NodeAddressFilter) error {
r.TypedSpec().ExcludeSubnets = append(slices.Clone(podCIDRs), serviceCIDRs...)
r.TypedSpec().ExcludeSubnets = slices.Concat(podCIDRs, serviceCIDRs)
return nil
}); err != nil {
@@ -105,7 +105,7 @@ func (ctrl *AddressFilterController) Run(ctx context.Context, r controller.Runti
}
if err = safe.WriterModify(ctx, r, network.NewNodeAddressFilter(network.NamespaceName, k8s.NodeAddressFilterOnlyK8s), func(r *network.NodeAddressFilter) error {
r.TypedSpec().IncludeSubnets = append(slices.Clone(podCIDRs), serviceCIDRs...)
r.TypedSpec().IncludeSubnets = slices.Concat(podCIDRs, serviceCIDRs)
return nil
}); err != nil {

View File

@@ -8,6 +8,7 @@ import (
"context"
"fmt"
"net/netip"
"slices"
"github.com/cosi-project/runtime/pkg/controller"
"github.com/cosi-project/runtime/pkg/resource"
@@ -99,10 +100,10 @@ func (ctrl *NodeIPController) Run(ctx context.Context, r controller.Runtime, log
}
addrs := nodeAddrs.TypedSpec().IPs()
cidrs := make([]string, 0, len(cfgSpec.ValidSubnets)+len(cfgSpec.ExcludeSubnets))
cidrs = append(cidrs, cfgSpec.ValidSubnets...)
cidrs = append(cidrs, xslices.Map(cfgSpec.ExcludeSubnets, func(cidr string) string { return "!" + cidr })...)
cidrs := slices.Concat(
cfgSpec.ValidSubnets,
xslices.Map(cfgSpec.ExcludeSubnets, func(cidr string) string { return "!" + cidr }),
)
ips, err := net.FilterIPs(addrs, cidrs)
if err != nil {

View File

@@ -13,7 +13,6 @@ import (
"errors"
"fmt"
"slices"
"sort"
"sync"
"time"
@@ -584,8 +583,8 @@ func (t *CRDController) needsUpdate(secret *corev1.Secret, desiredRoles []string
actualRoles := certificate.Subject.Organization
sort.Strings(actualRoles)
sort.Strings(desiredRoles)
slices.Sort(actualRoles)
slices.Sort(desiredRoles)
if !slices.Equal(actualRoles, desiredRoles) {
t.logger.Debug("roles in certificate do not match desired roles",

View File

@@ -7,7 +7,6 @@ package kubespan_test
import (
"context"
"slices"
"sort"
"sync"
"time"
@@ -64,7 +63,7 @@ func (suite *KubeSpanSuite) assertResourceIDs(md resource.Metadata, expectedIDs
actualIDs := xslices.Map(l.Items, func(r resource.Resource) string { return r.Metadata().ID() })
sort.Strings(expectedIDs)
slices.Sort(expectedIDs)
if !slices.Equal(actualIDs, expectedIDs) {
return retry.ExpectedErrorf("ids do no match expected %v != actual %v", expectedIDs, actualIDs)

View File

@@ -5,11 +5,12 @@
package network_test
import (
"cmp"
"context"
"fmt"
"net"
"net/url"
"sort"
"slices"
"sync"
"testing"
"time"
@@ -126,7 +127,7 @@ func (suite *AddressConfigSuite) TestCmdlineNoNetmask() {
ifaces, _ := net.Interfaces() //nolint:errcheck // ignoring error here as ifaces will be empty
sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].Name < ifaces[j].Name })
slices.SortFunc(ifaces, func(a, b net.Interface) int { return cmp.Compare(a.Name, b.Name) })
ifaceName := ""

View File

@@ -5,11 +5,12 @@
package network
import (
"cmp"
"errors"
"fmt"
"net"
"net/netip"
"sort"
"slices"
"strconv"
"strings"
@@ -230,7 +231,7 @@ func ParseCmdlineNetwork(cmdline *procfs.Cmdline) (CmdlineNetworking, error) {
if linkConfig.LinkName == "" {
ifaces, _ := net.Interfaces() //nolint:errcheck // ignoring error here as ifaces will be empty
sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].Name < ifaces[j].Name })
slices.SortFunc(ifaces, func(a, b net.Interface) int { return cmp.Compare(a.Name, b.Name) })
for _, iface := range ifaces {
if iface.Flags&net.FlagLoopback != 0 {

View File

@@ -5,10 +5,11 @@
package network_test
import (
"cmp"
"fmt"
"net"
"net/netip"
"sort"
"slices"
"testing"
"github.com/siderolabs/go-procfs/procfs"
@@ -26,7 +27,7 @@ type CmdlineSuite struct {
func (suite *CmdlineSuite) TestParse() {
ifaces, _ := net.Interfaces() //nolint:errcheck // ignoring error here as ifaces will be empty
sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].Name < ifaces[j].Name })
slices.SortFunc(ifaces, func(a, b net.Interface) int { return cmp.Compare(a.Name, b.Name) })
defaultIfaceName := ""

View File

@@ -6,9 +6,9 @@
package network
import (
"cmp"
"context"
"fmt"
"sort"
"github.com/cosi-project/runtime/pkg/controller"
"github.com/cosi-project/runtime/pkg/resource"
@@ -65,32 +65,24 @@ func (ctrl *LinkMergeController) Run(ctx context.Context, r controller.Runtime,
}
// list source network configuration resources
list, err := r.List(ctx, resource.NewMetadata(network.ConfigNamespaceName, network.LinkSpecType, "", resource.VersionUndefined))
list, err := safe.ReaderList[*network.LinkSpec](ctx, r, resource.NewMetadata(network.ConfigNamespaceName, network.LinkSpecType, "", resource.VersionUndefined))
if err != nil {
return fmt.Errorf("error listing source network routes: %w", err)
}
// sort by link name, configuration layer
sort.Slice(list.Items, func(i, j int) bool {
left := list.Items[i].(*network.LinkSpec) //nolint:forcetypeassert
right := list.Items[j].(*network.LinkSpec) //nolint:forcetypeassert
if left.TypedSpec().Name < right.TypedSpec().Name {
return false
list.SortFunc(func(left, right *network.LinkSpec) int {
if res := cmp.Compare(left.TypedSpec().Name, right.TypedSpec().Name); res != 0 {
return res
}
if left.TypedSpec().Name == right.TypedSpec().Name {
return left.TypedSpec().ConfigLayer < right.TypedSpec().ConfigLayer
}
return true
return cmp.Compare(left.TypedSpec().ConfigLayer, right.TypedSpec().ConfigLayer)
})
// build final link definition merging multiple layers
links := map[string]*network.LinkSpecSpec{}
links := make(map[string]*network.LinkSpecSpec, list.Len())
for _, res := range list.Items {
link := res.(*network.LinkSpec) //nolint:forcetypeassert
for link := range list.All() {
id := network.LinkID(link.TypedSpec().Name)
existing, ok := links[id]
@@ -124,12 +116,12 @@ func (ctrl *LinkMergeController) Run(ctx context.Context, r controller.Runtime,
}
// list link for cleanup
list, err = r.List(ctx, resource.NewMetadata(network.NamespaceName, network.LinkSpecType, "", resource.VersionUndefined))
list, err = safe.ReaderList[*network.LinkSpec](ctx, r, resource.NewMetadata(network.NamespaceName, network.LinkSpecType, "", resource.VersionUndefined))
if err != nil {
return fmt.Errorf("error listing resources: %w", err)
}
for _, res := range list.Items {
for res := range list.All() {
if _, ok := links[res.Metadata().ID()]; !ok {
var okToDestroy bool

View File

@@ -192,13 +192,13 @@ func (ctrl *NfTablesChainConfigController) Run(ctx context.Context, r controller
network.NfTablesRule{
MatchSourceAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
append(slices.Clone(cfg.Config().Cluster().Network().PodCIDRs()), cfg.Config().Cluster().Network().ServiceCIDRs()...),
slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()),
netip.MustParsePrefix,
),
},
MatchDestinationAddress: &network.NfTablesAddressMatch{
IncludeSubnets: xslices.Map(
append(slices.Clone(cfg.Config().Cluster().Network().PodCIDRs()), cfg.Config().Cluster().Network().ServiceCIDRs()...),
slices.Concat(cfg.Config().Cluster().Network().PodCIDRs(), cfg.Config().Cluster().Network().ServiceCIDRs()),
netip.MustParsePrefix,
),
},

View File

@@ -320,16 +320,16 @@ func updateAccumulativeAddresses(ctx context.Context, r controller.Runtime, id r
for _, ip := range accumulative {
// find insert position using binary search
i := sort.Search(len(spec.Addresses), func(j int) bool {
return !spec.Addresses[j].Addr().Less(ip.Addr())
pos, _ := slices.BinarySearchFunc(spec.Addresses, ip.Addr(), func(prefix netip.Prefix, addr netip.Addr) int {
return prefix.Addr().Compare(ip.Addr())
})
if i < len(spec.Addresses) && spec.Addresses[i].Addr().Compare(ip.Addr()) == 0 {
if pos < len(spec.Addresses) && spec.Addresses[pos].Addr().Compare(ip.Addr()) == 0 {
continue
}
// insert at position i
spec.Addresses = slices.Insert(spec.Addresses, i, ip)
spec.Addresses = slices.Insert(spec.Addresses, pos, ip)
}
return nil

View File

@@ -7,7 +7,7 @@ package network_test
import (
"net/netip"
"sort"
"slices"
"strings"
"testing"
"time"
@@ -51,9 +51,10 @@ func (suite *NodeAddressSuite) TestDefaults() {
suite.T().Logf("id %q val %s", r.Metadata().ID(), addrs)
asrt.True(
sort.SliceIsSorted(
addrs, func(i, j int) bool {
return addrs[i].Addr().Compare(addrs[j].Addr()) < 0
slices.IsSortedFunc(
addrs,
func(a, b netip.Prefix) int {
return a.Addr().Compare(b.Addr())
},
), "addresses %s", addrs,
)

View File

@@ -7,7 +7,6 @@ package runtime_test
import (
"context"
"slices"
"sort"
"sync"
"testing"
"time"
@@ -288,8 +287,8 @@ func TestBuildExpectedImageNames(t *testing.T) {
expectedImageNames := maps.Keys(expectedImages)
sort.Strings(test.expectedImageNames)
sort.Strings(expectedImageNames)
slices.Sort(test.expectedImageNames)
slices.Sort(expectedImageNames)
assert.Equal(t, test.expectedImageNames, expectedImageNames)
})

View File

@@ -7,6 +7,7 @@ package runtime
import (
"context"
"errors"
"slices"
"sync"
)
@@ -98,7 +99,7 @@ func (s *DrainSubscription) Cancel() {
for i, sub := range s.drainer.subscriptions {
if sub == s {
s.drainer.subscriptions = append(s.drainer.subscriptions[:i], s.drainer.subscriptions[i+1:]...)
s.drainer.subscriptions = slices.Delete(s.drainer.subscriptions, i, i+1)
break
}

View File

@@ -14,7 +14,7 @@ import (
"net/url"
"os"
"path/filepath"
"sort"
"slices"
"strings"
"github.com/cosi-project/runtime/pkg/safe"
@@ -577,7 +577,7 @@ func (n *Nocloud) applyNetworkConfigV2(config *NetworkConfig, st state.State, ne
}
ethernetNames := maps.Keys(config.Ethernets)
sort.Strings(ethernetNames)
slices.Sort(ethernetNames)
for _, name := range ethernetNames {
eth := config.Ethernets[name]

View File

@@ -5,11 +5,12 @@
package system
import (
"cmp"
"context"
"errors"
"fmt"
"log"
"sort"
"slices"
"strings"
"sync"
"time"
@@ -386,7 +387,7 @@ func (s *singleton) List() (result []*ServiceRunner) {
// TODO: results should be sorted properly with topological sort on dependencies
// but, we don't have dependencies yet, so sort by service id for now to get stable order
sort.Slice(result, func(i, j int) bool { return result[i].id < result[j].id })
slices.SortFunc(result, func(a, b *ServiceRunner) int { return cmp.Compare(a.id, b.id) })
return
}

View File

@@ -8,7 +8,7 @@ package api
import (
"context"
"sort"
"slices"
"testing"
"time"
@@ -73,7 +73,7 @@ func (suite *ResetSuite) TestResetNodeByNode() {
nodes := suite.DiscoverNodeInternalIPs(suite.ctx)
suite.Require().NotEmpty(nodes)
sort.Strings(nodes)
slices.Sort(nodes)
for _, node := range nodes {
suite.ResetNode(suite.ctx, node, &machineapi.ResetRequest{

View File

@@ -30,7 +30,7 @@ func newNodeInfo(masterNodes, workerNodes []string) (*infoWrapper, error) {
}
return &infoWrapper{
nodeInfos: append(slices.Clone(controlPlaneNodeInfos), workerNodeInfos...),
nodeInfos: slices.Concat(controlPlaneNodeInfos, workerNodeInfos),
nodeInfosByType: map[machine.Type][]cluster.NodeInfo{
machine.TypeControlPlane: controlPlaneNodeInfos,
machine.TypeWorker: workerNodeInfos,

View File

@@ -6,7 +6,12 @@
package base
import "strings"
import (
"slices"
"strings"
"github.com/siderolabs/gen/xiter/xstrings"
)
// StringList implements flag.Value for list of strings.
type StringList []string
@@ -18,7 +23,7 @@ func (l *StringList) String() string {
// Set implements flag.Value.
func (l *StringList) Set(value string) error {
*l = append(*l, strings.Split(value, ",")...)
*l = slices.AppendSeq(*l, xstrings.SplitSeq(value, ","))
return nil
}

View File

@@ -6,7 +6,7 @@ package components
import (
"fmt"
"sort"
"slices"
"strings"
"github.com/gdamore/tcell/v2"
@@ -109,7 +109,7 @@ func (widget *Footer) nodesText() string {
func (widget *Footer) screensText() string {
screenKeys := maps.Keys(widget.screenKeyToName)
sort.Strings(screenKeys)
slices.Sort(screenKeys)
screenTexts := make([]string, 0, len(widget.screenKeyToName))

View File

@@ -6,7 +6,7 @@ package components
import (
"net/netip"
"sort"
"slices"
"strings"
"github.com/cosi-project/runtime/pkg/resource"
@@ -197,11 +197,9 @@ func (widget *NetworkInfo) setAddresses(data resourcedata.Data, nodeAddress *net
return notAvailable
}
strs := xslices.Map(res.TypedSpec().Addresses, func(prefix netip.Prefix) string {
return prefix.String()
})
strs := xslices.Map(res.TypedSpec().Addresses, netip.Prefix.String)
sort.Strings(strs)
slices.Sort(strs)
return strings.Join(strs, ", ")
}
@@ -238,8 +236,8 @@ func (widget *NetworkInfo) gateway(statuses []*network.RouteStatus) string {
return notAvailable
}
sort.Strings(gatewaysV4)
sort.Strings(gatewaysV6)
slices.Sort(gatewaysV4)
slices.Sort(gatewaysV6)
return strings.Join(append(gatewaysV4, gatewaysV6...), ", ")
}

View File

@@ -7,7 +7,7 @@ package dashboard
import (
"context"
"fmt"
"sort"
"slices"
"strings"
"github.com/gdamore/tcell/v2"
@@ -348,7 +348,7 @@ func (widget *NetworkConfigGrid) updateNodeData(data resourcedata.Data) {
links := maps.Keys(nodeData.linkSet)
sort.Strings(links)
slices.Sort(links)
allLinks := append([]string{interfaceNone}, links...)

View File

@@ -6,11 +6,12 @@
package encryption
import (
"cmp"
"context"
"encoding/json"
"errors"
"fmt"
"sort"
"slices"
"strconv"
"time"
@@ -66,7 +67,7 @@ func NewHandler(encryptionConfig block.EncryptionSpec, volumeID string, getSyste
}
//nolint:scopelint
sort.Slice(keyHandlers, func(i, j int) bool { return keyHandlers[i].Slot() < keyHandlers[j].Slot() })
slices.SortFunc(keyHandlers, func(a, b keys.Handler) int { return cmp.Compare(a.Slot(), b.Slot()) })
provider := luks.New(
cipher,

View File

@@ -5,10 +5,11 @@
package extensions
import (
"cmp"
"fmt"
"os"
"path/filepath"
"sort"
"slices"
"github.com/siderolabs/talos/pkg/machinery/extensions"
)
@@ -28,7 +29,7 @@ func List(rootPath string) ([]*Extension, error) {
return nil, nil
}
sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() })
slices.SortFunc(items, func(a, b os.DirEntry) int { return cmp.Compare(a.Name(), b.Name()) })
result := make([]*Extension, 0, len(items))

View File

@@ -7,7 +7,6 @@ package talos_test
import (
"bytes"
"slices"
"sort"
"testing"
"github.com/stretchr/testify/assert"
@@ -86,7 +85,7 @@ func TestMarshalUnmarshal(t *testing.T) {
assert.Equal(t, val3, val)
tags := a.ListTags()
sort.Slice(tags, func(i, j int) bool { return tags[i] < tags[j] })
slices.Sort(tags)
assert.Equal(t, []uint8{adv.Reserved1, adv.Reserved2, adv.Reserved3}, tags)
}
}

View File

@@ -8,6 +8,7 @@ import (
"bytes"
"os"
"path/filepath"
"slices"
"github.com/stretchr/testify/suite"
)
@@ -74,8 +75,10 @@ func (suite *CommonSuite) SetupSuite() {
var contents []byte
if file.Size > 0 {
contents = bytes.Repeat(file.Contents, file.Size/len(file.Contents))
contents = append(contents, file.Contents[:file.Size-file.Size/len(file.Contents)*len(file.Contents)]...)
contents = slices.Concat(
bytes.Repeat(file.Contents, file.Size/len(file.Contents)),
file.Contents[:file.Size-file.Size/len(file.Contents)*len(file.Contents)],
)
} else {
contents = file.Contents
}

View File

@@ -6,7 +6,7 @@ package argsbuilder
import (
"fmt"
"sort"
"slices"
"strings"
"github.com/siderolabs/gen/maps"
@@ -92,7 +92,7 @@ func (a Args) Set(k, v Key) ArgsBuilder {
// Args implements the ArgsBuilder interface.
func (a Args) Args() []string {
keys := maps.Keys(a)
sort.Strings(keys)
slices.Sort(keys)
args := make([]string, 0, len(a))

View File

@@ -9,7 +9,7 @@ import (
"errors"
"fmt"
"io"
"sort"
"slices"
"strings"
"time"
@@ -45,9 +45,7 @@ func (s *APIBootstrapper) Bootstrap(ctx context.Context, out io.Writer) error {
return errors.New("no control plane nodes to bootstrap")
}
sort.Slice(controlPlaneNodes, func(i, j int) bool {
return controlPlaneNodes[i].IPs[0].String() < controlPlaneNodes[j].IPs[0].String()
})
slices.SortFunc(controlPlaneNodes, func(a, b NodeInfo) int { return strings.Compare(a.IPs[0].String(), b.IPs[0].String()) })
nodeIP := controlPlaneNodes[0].IPs[0]
nodeCtx := client.WithNodes(ctx, nodeIP.String())

View File

@@ -5,11 +5,12 @@
package check
import (
"cmp"
"context"
"errors"
"fmt"
"net/url"
"sort"
"slices"
"github.com/siderolabs/gen/maps"
"github.com/siderolabs/gen/xslices"
@@ -55,8 +56,8 @@ func EtcdConsistentAssertion(ctx context.Context, cl ClusterInfo) error {
return errors.New("no messages returned")
}
sort.Slice(messages, func(i, j int) bool {
return messages[i].GetMetadata().GetHostname() < messages[j].GetMetadata().GetHostname()
slices.SortFunc(messages, func(a, b *machineapi.EtcdMembers) int {
return cmp.Compare(a.GetMetadata().GetHostname(), b.GetMetadata().GetHostname())
})
for i, message := range messages {

View File

@@ -6,10 +6,11 @@
package check
import (
"cmp"
"context"
"errors"
"fmt"
"sort"
"slices"
"github.com/hashicorp/go-multierror"
@@ -111,8 +112,8 @@ func ServiceHealthAssertion(ctx context.Context, cl ClusterInfo, service string,
var multiErr *multierror.Error
// sort service info list so that errors returned are consistent
sort.Slice(servicesInfo, func(i, j int) bool {
return servicesInfo[i].Metadata.GetHostname() < servicesInfo[j].Metadata.GetHostname()
slices.SortFunc(servicesInfo, func(a, b client.ServiceInfo) int {
return cmp.Compare(a.Metadata.GetHostname(), b.Metadata.GetHostname())
})
for _, serviceInfo := range servicesInfo {

View File

@@ -10,7 +10,7 @@ import (
"fmt"
"io"
"net/netip"
"sort"
"slices"
"github.com/siderolabs/gen/maps"
"github.com/siderolabs/gen/xslices"
@@ -123,7 +123,7 @@ func NodesMatch(expected, actual []NodeInfo) error {
if len(actualNodes) > 0 {
unexpectedIPs := xslices.FlatMap(maps.Keys(actualNodes), func(n *NodeInfo) []netip.Addr { return n.IPs })
sort.Slice(unexpectedIPs, func(i, j int) bool { return unexpectedIPs[i].Less(unexpectedIPs[j]) })
slices.SortFunc(unexpectedIPs, func(a, b netip.Addr) int { return a.Compare(b) })
return fmt.Errorf("unexpected nodes with IPs %q", unexpectedIPs)
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/cosi-project/runtime/pkg/resource"
"github.com/cosi-project/runtime/pkg/safe"
"github.com/cosi-project/runtime/pkg/state"
"github.com/siderolabs/gen/xiter"
"github.com/siderolabs/go-retry/retry"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -37,7 +38,7 @@ func upgradeKubelet(ctx context.Context, cluster UpgradeProvider, options Upgrad
options.Log("updating kubelet to version %q", options.Path.ToVersion())
for _, node := range append(slices.Clone(options.controlPlaneNodes), options.workerNodes...) {
for node := range xiter.Concat(slices.Values(options.controlPlaneNodes), slices.Values(options.workerNodes)) {
if err := upgradeKubeletOnNode(ctx, cluster, options, node); err != nil {
return fmt.Errorf("error updating node %q: %w", node, err)
}

View File

@@ -8,12 +8,14 @@ import (
"context"
"errors"
"fmt"
"slices"
"strings"
"time"
"github.com/cosi-project/runtime/pkg/resource"
"github.com/cosi-project/runtime/pkg/state"
"github.com/siderolabs/gen/channel"
"github.com/siderolabs/gen/xiter"
"github.com/siderolabs/go-kubernetes/kubernetes/manifests"
"github.com/siderolabs/go-kubernetes/kubernetes/upgrade"
"google.golang.org/grpc/codes"
@@ -148,7 +150,7 @@ func prePullImages(ctx context.Context, talosClient *client.Client, options Upgr
imageRef := fmt.Sprintf("%s:v%s", options.KubeletImage, options.Path.ToVersion())
for _, node := range append(append([]string(nil), options.controlPlaneNodes...), options.workerNodes...) {
for node := range xiter.Concat(slices.Values(options.controlPlaneNodes), slices.Values(options.workerNodes)) {
options.Log(" > %q: pre-pulling %s", node, imageRef)
err := talosClient.ImagePull(client.WithNode(ctx, node), common.ContainerdNamespace_NS_SYSTEM, imageRef)

View File

@@ -8,7 +8,7 @@ package log
import (
"context"
"log"
"sort"
"slices"
"strings"
"time"
@@ -38,7 +38,7 @@ var sensitiveFields = map[string]struct{}{
func ExtractMetadata(ctx context.Context) string {
md, _ := metadata.FromIncomingContext(ctx)
keys := maps.Keys(md)
sort.Strings(keys)
slices.Sort(keys)
pairs := make([]string, 0, len(keys))

View File

@@ -7,10 +7,11 @@ package filemap
import (
"archive/tar"
"cmp"
"io"
"os"
"path/filepath"
"sort"
"slices"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/tarball"
@@ -105,9 +106,7 @@ func build(filemap []File) io.ReadCloser {
//
// A filemap is a path -> file content map representing a file system.
func Layer(filemap []File) (v1.Layer, error) {
sort.Slice(filemap, func(i, j int) bool {
return filemap[i].ImagePath < filemap[j].ImagePath
})
slices.SortFunc(filemap, func(a, b File) int { return cmp.Compare(a.ImagePath, b.ImagePath) })
// Return a new copy of the buffer each time it's opened.
return tarball.LayerFromOpener(func() (io.ReadCloser, error) {

View File

@@ -10,6 +10,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"strings"
"github.com/cosi-project/runtime/pkg/state"
@@ -122,7 +123,7 @@ func (container *Container) PatchV1Alpha1(patcher func(*v1alpha1.Config) error)
return !ok
})
return New(append([]config.Document{cfg}, otherDocs...)...)
return New(slices.Insert(otherDocs, 0, config.Document(cfg))...)
}
// Readonly implements config.Container interface.

View File

@@ -5,9 +5,10 @@
package encoder
import (
"cmp"
"encoding"
"reflect"
"sort"
"slices"
"strings"
yaml "gopkg.in/yaml.v3"
@@ -287,9 +288,7 @@ func toYamlNode(in any, options *Options) (*yaml.Node, error) {
node.Kind = yaml.MappingNode
keys := v.MapKeys()
// always interate keys in alphabetical order to preserve the same output for maps
sort.Slice(keys, func(i, j int) bool {
return keys[i].String() < keys[j].String()
})
slices.SortFunc(keys, func(a, b reflect.Value) int { return cmp.Compare(a.String(), b.String()) })
for _, k := range keys {
element := v.MapIndex(k)

View File

@@ -6,7 +6,7 @@ package merge_test
import (
"fmt"
"sort"
"slices"
"testing"
"github.com/siderolabs/go-pointer"
@@ -40,7 +40,7 @@ func (s *CustomSlice) Merge(other any) error {
}
*s = append(*s, otherSlice...)
sort.Strings(*s)
slices.Sort(*s)
return nil
}

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"math"
"slices"
"strings"
"text/tabwriter"
"time"
@@ -58,7 +59,7 @@ func RenderMounts(resp *machine.MountsResponse, output io.Writer, remotePeer *pe
if defaultNode != "" {
format = "%s\t" + format
args = append([]any{node}, args...)
args = slices.Insert(args, 0, any(node))
}
fmt.Fprintf(w, format, args...)

View File

@@ -6,7 +6,7 @@ package k8s
import (
"net/netip"
"sort"
"slices"
"github.com/cosi-project/runtime/pkg/resource"
"github.com/cosi-project/runtime/pkg/resource/meta"
@@ -71,13 +71,14 @@ type EndpointList []netip.Addr
// Merge endpoints from multiple Endpoint resources into a single list.
func (l EndpointList) Merge(endpoint *Endpoint) EndpointList {
for _, ip := range endpoint.TypedSpec().Addresses {
idx := sort.Search(len(l), func(i int) bool { return !l[i].Less(ip) })
idx, _ := slices.BinarySearchFunc(l, ip, func(a netip.Addr, target netip.Addr) int {
return a.Compare(target)
})
if idx < len(l) && l[idx].Compare(ip) == 0 {
continue
}
l = append(l[:idx], append([]netip.Addr{ip}, l[idx:]...)...)
l = slices.Insert(l, idx, ip)
}
return l

View File

@@ -7,7 +7,7 @@ package secrets
import (
"net"
"net/netip"
"sort"
"slices"
"github.com/cosi-project/runtime/pkg/resource"
"github.com/cosi-project/runtime/pkg/resource/meta"
@@ -126,8 +126,8 @@ func (spec *CertSANSpec) StdIPs() []net.IP {
// Sort the CertSANs.
func (spec *CertSANSpec) Sort() {
sort.Strings(spec.DNSNames)
sort.Slice(spec.IPs, func(i, j int) bool { return spec.IPs[i].Compare(spec.IPs[j]) < 0 })
slices.Sort(spec.DNSNames)
slices.SortFunc(spec.IPs, func(a, b netip.Addr) int { return a.Compare(b) })
}
func init() {

View File

@@ -5,7 +5,7 @@
package role
import (
"sort"
"slices"
"strings"
"github.com/siderolabs/gen/maps"
@@ -93,7 +93,7 @@ func Parse(str []string) (Set, []string) {
// Strings returns a set as a slice of strings.
func (s Set) Strings() []string {
res := maps.KeysFunc(s.roles, func(r Role) string { return string(r) })
sort.Strings(res)
slices.Sort(res)
return res
}

View File

@@ -12,6 +12,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"slices"
"strings"
"github.com/coreos/go-iptables/iptables"
@@ -52,7 +53,7 @@ type preflightCheckContext struct {
arch Arch
}
func (check *preflightCheckContext) verifyRoot(ctx context.Context) error {
func (check *preflightCheckContext) verifyRoot(context.Context) error {
if os.Geteuid() != 0 {
return errors.New("error: please run as root user (CNI requirement), we recommend running with `sudo -E`")
}
@@ -60,7 +61,7 @@ func (check *preflightCheckContext) verifyRoot(ctx context.Context) error {
return nil
}
func (check *preflightCheckContext) checkKVM(ctx context.Context) error {
func (check *preflightCheckContext) checkKVM(context.Context) error {
f, err := os.OpenFile("/dev/kvm", os.O_RDWR, 0)
if err != nil {
return fmt.Errorf("error opening /dev/kvm, please make sure KVM support is enabled in Linux kernel: %w", err)
@@ -69,7 +70,7 @@ func (check *preflightCheckContext) checkKVM(ctx context.Context) error {
return f.Close()
}
func (check *preflightCheckContext) qemuExecutable(ctx context.Context) error {
func (check *preflightCheckContext) qemuExecutable(context.Context) error {
if check.arch.QemuExecutable() == "" {
return fmt.Errorf("QEMU executable (qemu-system-%s or qemu-kvm) not found, please install QEMU with package manager", check.arch.QemuArch())
}
@@ -77,7 +78,7 @@ func (check *preflightCheckContext) qemuExecutable(ctx context.Context) error {
return nil
}
func (check *preflightCheckContext) checkFlashImages(ctx context.Context) error {
func (check *preflightCheckContext) checkFlashImages(context.Context) error {
for _, flashImage := range check.arch.PFlash(check.options.UEFIEnabled, check.options.ExtraUEFISearchPaths) {
if len(flashImage.SourcePaths) == 0 {
continue
@@ -103,7 +104,7 @@ func (check *preflightCheckContext) checkFlashImages(ctx context.Context) error
return nil
}
func (check *preflightCheckContext) swtpmExecutable(ctx context.Context) error {
func (check *preflightCheckContext) swtpmExecutable(context.Context) error {
if check.options.TPM2Enabled {
if _, err := exec.LookPath("swtpm"); err != nil {
return fmt.Errorf("swtpm not found in PATH, please install swtpm-tools with the package manager: %w", err)
@@ -113,8 +114,8 @@ func (check *preflightCheckContext) swtpmExecutable(ctx context.Context) error {
return nil
}
func (check *preflightCheckContext) cniDirectories(ctx context.Context) error {
cniDirs := append([]string{}, check.request.Network.CNI.BinPath...)
func (check *preflightCheckContext) cniDirectories(context.Context) error {
cniDirs := slices.Clone(check.request.Network.CNI.BinPath)
cniDirs = append(cniDirs, check.request.Network.CNI.CacheDir, check.request.Network.CNI.ConfDir)
for _, cniDir := range cniDirs {

View File

@@ -6,6 +6,7 @@ package vm
import (
"fmt"
"slices"
multierror "github.com/hashicorp/go-multierror"
@@ -16,8 +17,7 @@ import (
func (p *Provisioner) DestroyNodes(cluster provision.ClusterInfo, options *provision.Options) error {
errCh := make(chan error)
nodes := append([]provision.NodeInfo{}, cluster.Nodes...)
nodes = append(nodes, cluster.ExtraNodes...)
nodes := slices.Concat(cluster.Nodes, cluster.ExtraNodes)
for _, node := range nodes {
go func(node provision.NodeInfo) {