mirror of
https://github.com/lingble/talos.git
synced 2025-12-02 13:53:40 +00:00
chore: enable 'wsl' linter and fix all the issues
I wish there were less of them :) Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
committed by
Andrey Smirnov
parent
edc21ea910
commit
c2cb0f9778
@@ -77,6 +77,7 @@ var clusterDownCmd = &cobra.Command{
|
||||
// nolint: gocyclo
|
||||
func create() (err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
cli, err := client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -90,6 +91,7 @@ func create() (err error) {
|
||||
if err != nil {
|
||||
helpers.Fatalf("error parsing --cpus: %s", err)
|
||||
}
|
||||
|
||||
memory := int64(clusterMemory) * 1024 * 1024
|
||||
|
||||
// Ensure the image is present.
|
||||
@@ -147,6 +149,7 @@ func create() (err error) {
|
||||
// Create the worker nodes.
|
||||
|
||||
requests = []*node.Request{}
|
||||
|
||||
for i := 1; i <= workers; i++ {
|
||||
r := &node.Request{
|
||||
Type: generate.TypeJoin,
|
||||
@@ -171,6 +174,7 @@ func create() (err error) {
|
||||
// nolint: gocyclo
|
||||
func createNodes(requests []*node.Request) (err error) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(requests))
|
||||
|
||||
for _, req := range requests {
|
||||
@@ -199,12 +203,14 @@ func destroy() error {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "talos.owned=true")
|
||||
filters.Add("label", "talos.cluster.name="+clusterName)
|
||||
|
||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filters})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(containers))
|
||||
|
||||
for _, container := range containers {
|
||||
@@ -241,6 +247,7 @@ func ensureImageExists(ctx context.Context, cli *client.Client, image string) er
|
||||
// (e.g. domain/repo/image:tag => repo/image:tag).
|
||||
familiarName := reference.FamiliarName(ref)
|
||||
tag := ""
|
||||
|
||||
if tagged, isTagged := ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
@@ -255,10 +262,13 @@ func ensureImageExists(ctx context.Context, cli *client.Client, image string) er
|
||||
|
||||
if len(images) == 0 {
|
||||
fmt.Println("downloading", image)
|
||||
|
||||
var reader io.ReadCloser
|
||||
|
||||
if reader, err = cli.ImagePull(ctx, image, types.ImagePullOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(ioutil.Discard, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -303,6 +313,7 @@ func destroyNetwork(cli *client.Client) error {
|
||||
}
|
||||
|
||||
var result *multierror.Error
|
||||
|
||||
for _, network := range networks {
|
||||
if err := cli.NetworkRemove(context.Background(), network.ID); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
@@ -329,9 +340,11 @@ func saveConfig(input *generate.Input) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Contexts == nil {
|
||||
c.Contexts = map[string]*config.Context{}
|
||||
}
|
||||
|
||||
c.Contexts[input.ClusterName] = newConfig.Contexts[input.ClusterName]
|
||||
|
||||
c.Context = input.ClusterName
|
||||
@@ -344,10 +357,12 @@ func parseCPUShare() (int64, error) {
|
||||
if !ok {
|
||||
return 0, errors.Errorf("failed to parsing as a rational number: %s", clusterCpus)
|
||||
}
|
||||
|
||||
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
|
||||
if !nano.IsInt() {
|
||||
return 0, errors.New("value is too precise")
|
||||
}
|
||||
|
||||
return nano.Num().Int64(), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -87,12 +87,14 @@ func NewNode(clusterName string, req *Request) (err error) {
|
||||
case generate.TypeInit:
|
||||
var osdPort nat.Port
|
||||
osdPort, err = nat.NewPort("tcp", "50000")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var apiServerPort nat.Port
|
||||
apiServerPort, err = nat.NewPort("tcp", "443")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,9 +118,11 @@ func NewNode(clusterName string, req *Request) (err error) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case generate.TypeControlPlane:
|
||||
containerConfig.Volumes["/var/lib/etcd"] = struct{}{}
|
||||
|
||||
if req.IP == nil {
|
||||
return errors.New("an IP address must be provided when creating a master node")
|
||||
}
|
||||
@@ -131,6 +135,7 @@ func NewNode(clusterName string, req *Request) (err error) {
|
||||
// Create the container.
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
cli, err := client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -144,6 +144,7 @@ func genV1Alpha1Config(args []string) {
|
||||
if err != nil {
|
||||
helpers.Fatalf("failed to generate PKI and tokens: %v", err)
|
||||
}
|
||||
|
||||
input.AdditionalSubjectAltNames = additionalSANs
|
||||
input.ControlPlaneEndpoint = canonicalControlplaneEndpoint
|
||||
|
||||
@@ -153,6 +154,7 @@ func genV1Alpha1Config(args []string) {
|
||||
}
|
||||
|
||||
var udType genv1alpha1.Type
|
||||
|
||||
for idx := range strings.Split(args[1], ",") {
|
||||
if idx == 0 {
|
||||
udType = genv1alpha1.TypeInit
|
||||
@@ -163,12 +165,14 @@ func genV1Alpha1Config(args []string) {
|
||||
if err = writeV1Alpha1Config(input, udType, "master-"+strconv.Itoa(idx+1)); err != nil {
|
||||
helpers.Fatalf("failed to generate config for %s: %v", "master-"+strconv.Itoa(idx+1), err)
|
||||
}
|
||||
|
||||
fmt.Println("created file", workingDir+"/master-"+strconv.Itoa(idx+1)+".yaml")
|
||||
}
|
||||
|
||||
if err = writeV1Alpha1Config(input, genv1alpha1.TypeJoin, "worker"); err != nil {
|
||||
helpers.Fatalf("failed to generate config for %s: %v", "worker", err)
|
||||
}
|
||||
|
||||
fmt.Println("created file", workingDir+"/worker.yaml")
|
||||
|
||||
newConfig := &config.Config{
|
||||
@@ -187,6 +191,7 @@ func genV1Alpha1Config(args []string) {
|
||||
if err != nil {
|
||||
helpers.Fatalf("failed to marshal config: %+v", err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile("talosconfig", data, 0644); err != nil {
|
||||
helpers.Fatalf("%v", err)
|
||||
}
|
||||
@@ -196,6 +201,7 @@ func genV1Alpha1Config(args []string) {
|
||||
|
||||
func writeV1Alpha1Config(input *genv1alpha1.Input, t genv1alpha1.Type, name string) (err error) {
|
||||
var data string
|
||||
|
||||
data, err = genv1alpha1.Config(t, input)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -62,14 +62,17 @@ func containerRender(reply *osapi.ContainersReply) {
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "NAMESPACE\tID\tIMAGE\tPID\tSTATUS")
|
||||
|
||||
for _, p := range reply.Containers {
|
||||
display := p.Id
|
||||
if p.Id != p.PodId {
|
||||
// container in a sandbox
|
||||
display = "└─ " + display
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%s\n", p.Namespace, display, p.Image, p.Pid, p.Status)
|
||||
}
|
||||
|
||||
helpers.Should(w.Flush())
|
||||
}
|
||||
|
||||
|
||||
@@ -42,11 +42,13 @@ var interfacesCmd = &cobra.Command{
|
||||
func intersRender(reply *networkapi.InterfacesReply) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "INDEX\tNAME\tMAC\tMTU\tADDRESS")
|
||||
|
||||
for _, r := range reply.Interfaces {
|
||||
for _, addr := range r.Ipaddress {
|
||||
fmt.Fprintf(w, "%d\t%s\t%s\t%d\t%s\n", r.Index, r.Name, r.Hardwareaddr, r.Mtu, addr)
|
||||
}
|
||||
}
|
||||
|
||||
helpers.Should(w.Flush())
|
||||
}
|
||||
|
||||
|
||||
@@ -41,11 +41,13 @@ func mountsRender(reply *machineapi.MountsReply, err error) {
|
||||
if err != nil {
|
||||
helpers.Fatalf("error getting mounts: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "FILESYSTEM\tSIZE(GB)\tUSED(GB)\tAVAILABLE(GB)\tPERCENT USED\tMOUNTED ON")
|
||||
|
||||
for _, r := range reply.Stats {
|
||||
percentAvailable := 100.0 - 100.0*(float64(r.Available)/float64(r.Size))
|
||||
|
||||
@@ -55,6 +57,7 @@ func mountsRender(reply *machineapi.MountsReply, err error) {
|
||||
|
||||
fmt.Fprintf(w, "%s\t%.02f\t%.02f\t%.02f\t%.02f%%\t%s\n", r.Filesystem, float64(r.Size)*1e-9, float64(r.Size-r.Available)*1e-9, float64(r.Available)*1e-9, percentAvailable, r.MountedOn)
|
||||
}
|
||||
|
||||
helpers.Should(w.Flush())
|
||||
}
|
||||
|
||||
|
||||
@@ -107,6 +107,7 @@ func processesUI(ctx context.Context, c *client.Client) {
|
||||
|
||||
uiEvents := ui.PollEvents()
|
||||
ticker := time.NewTicker(time.Second).C
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -188,7 +189,9 @@ func processesOutput(ctx context.Context, c *client.Client) (output string, err
|
||||
|
||||
s := make([]string, 0, len(procs))
|
||||
s = append(s, "PID | STATE | THREADS | CPU-TIME | VIRTMEM | RESMEM | COMMAND")
|
||||
|
||||
var args string
|
||||
|
||||
for _, p := range procs {
|
||||
switch {
|
||||
case p.Executable == "":
|
||||
|
||||
@@ -54,6 +54,7 @@ var globalCtx = context.Background()
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
var globalCtxCancel context.CancelFunc
|
||||
|
||||
globalCtx, globalCtxCancel = context.WithCancel(context.Background())
|
||||
defer globalCtxCancel()
|
||||
|
||||
@@ -84,15 +85,19 @@ func Execute() {
|
||||
defaultTalosConfig string
|
||||
ok bool
|
||||
)
|
||||
|
||||
if defaultTalosConfig, ok = os.LookupEnv(constants.TalosConfigEnvVar); !ok {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defaultTalosConfig = path.Join(home, ".talos", "config")
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&talosconfig, "talosconfig", defaultTalosConfig, "The path to the Talos configuration file")
|
||||
rootCmd.PersistentFlags().StringVarP(&target, "target", "t", "", "target the specificed node")
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
helpers.Fatalf("%s", err)
|
||||
}
|
||||
@@ -104,9 +109,11 @@ func setupClient(action func(*client.Client)) {
|
||||
if err != nil {
|
||||
helpers.Fatalf("error getting client credentials: %s", err)
|
||||
}
|
||||
|
||||
if target != "" {
|
||||
t = target
|
||||
}
|
||||
|
||||
c, err := client.NewClient(creds, t, constants.OsdPort)
|
||||
if err != nil {
|
||||
helpers.Fatalf("error constructing client: %s", err)
|
||||
|
||||
@@ -42,9 +42,11 @@ var routesCmd = &cobra.Command{
|
||||
func routesRender(reply *networkapi.RoutesReply) {
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "INTERFACE\tDESTINATION\tGATEWAY\tMETRIC")
|
||||
|
||||
for _, r := range reply.Routes {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%d\n", r.Interface, r.Destination, r.Gateway, r.Metric)
|
||||
}
|
||||
|
||||
helpers.Should(w.Flush())
|
||||
}
|
||||
|
||||
|
||||
@@ -70,10 +70,12 @@ func serviceList(c *client.Client) {
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "SERVICE\tSTATE\tHEALTH\tLAST CHANGE\tLAST EVENT")
|
||||
|
||||
for _, s := range reply.Services {
|
||||
svc := serviceInfoWrapper{s}
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", svc.Id, svc.State, svc.HealthStatus(), svc.LastUpdated(), svc.LastEvent())
|
||||
}
|
||||
|
||||
if err := w.Flush(); err != nil {
|
||||
helpers.Fatalf("error writing response: %s", err)
|
||||
}
|
||||
@@ -84,6 +86,7 @@ func serviceInfo(c *client.Client, id string) {
|
||||
if err != nil {
|
||||
helpers.Fatalf("error listing services: %s", err)
|
||||
}
|
||||
|
||||
if s == nil {
|
||||
helpers.Fatalf("service %q is not registered", id)
|
||||
}
|
||||
@@ -93,10 +96,13 @@ func serviceInfo(c *client.Client, id string) {
|
||||
fmt.Fprintf(w, "ID\t%s\n", svc.Id)
|
||||
fmt.Fprintf(w, "STATE\t%s\n", svc.State)
|
||||
fmt.Fprintf(w, "HEALTH\t%s\n", svc.HealthStatus())
|
||||
|
||||
if svc.Health.LastMessage != "" {
|
||||
fmt.Fprintf(w, "LAST HEALTH MESSAGE\t%s\n", svc.Health.LastMessage)
|
||||
}
|
||||
|
||||
label := "EVENTS"
|
||||
|
||||
for _, event := range svc.Events.Events {
|
||||
// nolint: errcheck
|
||||
ts, _ := ptypes.Timestamp(event.Ts)
|
||||
|
||||
@@ -61,14 +61,17 @@ func statsRender(reply *osapi.StatsReply) {
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
|
||||
fmt.Fprintln(w, "NAMESPACE\tID\tMEMORY(MB)\tCPU")
|
||||
|
||||
for _, s := range reply.Stats {
|
||||
display := s.Id
|
||||
if s.Id != s.PodId {
|
||||
// container in a sandbox
|
||||
display = "└─ " + display
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%.2f\t%d\n", s.Namespace, display, float64(s.MemoryUsage)*1e-6, s.CpuUsage)
|
||||
}
|
||||
|
||||
helpers.Should(w.Flush())
|
||||
}
|
||||
|
||||
|
||||
@@ -103,14 +103,17 @@ func NewClient(creds *Credentials, target string, port int) (c *Client, err erro
|
||||
grpcOpts := []grpc.DialOption{}
|
||||
|
||||
c = &Client{}
|
||||
|
||||
crt, err := tls.X509KeyPair(creds.crt, creds.key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load client key pair: %s", err)
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
if ok := certPool.AppendCertsFromPEM(creds.ca); !ok {
|
||||
return nil, fmt.Errorf("failed to append client certs")
|
||||
}
|
||||
|
||||
// TODO(andrewrynhard): Do not parse the address. Pass the IP and port in as separate
|
||||
// parameters.
|
||||
transportCreds := credentials.NewTLS(&tls.Config{
|
||||
@@ -122,6 +125,7 @@ func NewClient(creds *Credentials, target string, port int) (c *Client, err erro
|
||||
})
|
||||
|
||||
grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds))
|
||||
|
||||
c.conn, err = grpc.Dial(fmt.Sprintf("%s:%d", net.FormatAddress(target), port), grpcOpts...)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -146,6 +150,7 @@ func (c *Client) Kubeconfig(ctx context.Context) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r.Bytes, nil
|
||||
}
|
||||
|
||||
@@ -155,6 +160,7 @@ func (c *Client) Stats(ctx context.Context, namespace string, driver osapi.Conta
|
||||
Namespace: namespace,
|
||||
Driver: driver,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -164,6 +170,7 @@ func (c *Client) Containers(ctx context.Context, namespace string, driver osapi.
|
||||
Namespace: namespace,
|
||||
Driver: driver,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -174,6 +181,7 @@ func (c *Client) Restart(ctx context.Context, namespace string, driver osapi.Con
|
||||
Namespace: namespace,
|
||||
Driver: driver,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,6 +220,7 @@ func (c *Client) Logs(ctx context.Context, namespace string, driver osapi.Contai
|
||||
Driver: driver,
|
||||
Id: id,
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -299,6 +308,7 @@ func (c *Client) Upgrade(ctx context.Context, image string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return reply.Ack, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -79,6 +79,7 @@ func ensure(filename string) (err error) {
|
||||
Context: "",
|
||||
Contexts: map[string]*Context{},
|
||||
}
|
||||
|
||||
return config.Save(filename)
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,6 @@ linters:
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- funlen
|
||||
- wsl
|
||||
- godox
|
||||
- gocognit
|
||||
disable-all: false
|
||||
|
||||
@@ -26,6 +26,7 @@ func run() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtual := manager.NewManager(mountpoints)
|
||||
if err = virtual.MountAll(); err != nil {
|
||||
return err
|
||||
@@ -39,10 +40,12 @@ func run() (err error) {
|
||||
|
||||
// Mount the rootfs.
|
||||
log.Println("mounting the rootfs")
|
||||
|
||||
mountpoints, err = squashfs.MountPoints(constants.NewRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
squashfs := manager.NewManager(mountpoints)
|
||||
if err = squashfs.MountAll(); err != nil {
|
||||
return err
|
||||
@@ -50,6 +53,7 @@ func run() (err error) {
|
||||
|
||||
// Switch into the new rootfs.
|
||||
log.Println("entering the rootfs")
|
||||
|
||||
if err = switchroot.Switch(constants.NewRoot, virtual); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,6 +64,7 @@ func run() (err error) {
|
||||
func recovery() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("recovered from: %+v\n", r)
|
||||
|
||||
for i := 10; i >= 0; i-- {
|
||||
log.Printf("rebooting in %d seconds\n", i)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@@ -26,6 +26,7 @@ func NewService() *Service {
|
||||
func (s *Service) Main(ctx context.Context, config config.Configurator, logWriter io.Writer) error {
|
||||
api := reg.NewRegistrator(config)
|
||||
server := factory.NewServer(api)
|
||||
|
||||
listener, err := factory.NewListener(factory.Network("unix"), factory.SocketPath(constants.InitSocketPath))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -74,6 +74,7 @@ func (r *Registrator) Shutdown(ctx context.Context, in *empty.Empty) (reply *mac
|
||||
// Upgrade initiates a Talos upgrade
|
||||
func (r *Registrator) Upgrade(ctx context.Context, in *machineapi.UpgradeRequest) (data *machineapi.UpgradeReply, err error) {
|
||||
event.Bus().Notify(event.Event{Type: event.Upgrade, Data: in})
|
||||
|
||||
data = &machineapi.UpgradeReply{Ack: "Upgrade request received"}
|
||||
|
||||
return data, err
|
||||
@@ -117,6 +118,7 @@ func (r *Registrator) ServiceStart(ctx context.Context, in *machineapi.ServiceSt
|
||||
}
|
||||
|
||||
reply = &machineapi.ServiceStartReply{Resp: fmt.Sprintf("Service %q started", in.Id)}
|
||||
|
||||
return reply, err
|
||||
}
|
||||
|
||||
@@ -124,6 +126,7 @@ func (r *Registrator) ServiceStart(ctx context.Context, in *machineapi.ServiceSt
|
||||
//nolint: staticcheck
|
||||
func (r *Registrator) Start(ctx context.Context, in *machineapi.StartRequest) (reply *machineapi.StartReply, err error) {
|
||||
var rep *machineapi.ServiceStartReply
|
||||
|
||||
rep, err = r.ServiceStart(ctx, &machineapi.ServiceStartRequest{Id: in.Id})
|
||||
if rep != nil {
|
||||
reply = &machineapi.StartReply{
|
||||
@@ -138,6 +141,7 @@ func (r *Registrator) Start(ctx context.Context, in *machineapi.StartRequest) (r
|
||||
//nolint: staticcheck
|
||||
func (r *Registrator) Stop(ctx context.Context, in *machineapi.StopRequest) (reply *machineapi.StopReply, err error) {
|
||||
var rep *machineapi.ServiceStopReply
|
||||
|
||||
rep, err = r.ServiceStop(ctx, &machineapi.ServiceStopRequest{Id: in.Id})
|
||||
if rep != nil {
|
||||
reply = &machineapi.StopReply{
|
||||
@@ -156,6 +160,7 @@ func (r *Registrator) ServiceStop(ctx context.Context, in *machineapi.ServiceSto
|
||||
}
|
||||
|
||||
reply = &machineapi.ServiceStopReply{Resp: fmt.Sprintf("Service %q stopped", in.Id)}
|
||||
|
||||
return reply, err
|
||||
}
|
||||
|
||||
@@ -167,6 +172,7 @@ func (r *Registrator) ServiceRestart(ctx context.Context, in *machineapi.Service
|
||||
}
|
||||
|
||||
reply = &machineapi.ServiceRestartReply{Resp: fmt.Sprintf("Service %q restarted", in.Id)}
|
||||
|
||||
return reply, err
|
||||
}
|
||||
|
||||
@@ -215,16 +221,19 @@ func (r *Registrator) LS(req *machineapi.LSRequest, s machineapi.Machine_LSServe
|
||||
if req == nil {
|
||||
req = new(machineapi.LSRequest)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(req.Root, OSPathSeparator) {
|
||||
// Make sure we use complete paths
|
||||
req.Root = OSPathSeparator + req.Root
|
||||
}
|
||||
|
||||
req.Root = strings.TrimSuffix(req.Root, OSPathSeparator)
|
||||
if req.Root == "" {
|
||||
req.Root = "/"
|
||||
}
|
||||
|
||||
var maxDepth int
|
||||
|
||||
if req.Recurse {
|
||||
if req.RecursionDepth == 0 {
|
||||
maxDepth = -1
|
||||
@@ -256,6 +265,7 @@ func (r *Registrator) LS(req *machineapi.LSRequest, s machineapi.Machine_LSServe
|
||||
Link: fi.Link,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -280,6 +290,7 @@ func (r *Registrator) Mounts(ctx context.Context, in *empty.Empty) (reply *machi
|
||||
|
||||
stats := []*machineapi.MountStat{}
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
|
||||
|
||||
@@ -50,7 +50,6 @@ const (
|
||||
|
||||
func listenForPowerButton() (err error) {
|
||||
// Get the acpi_event family.
|
||||
|
||||
conn, err := genetlink.Dial(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -64,11 +63,13 @@ func listenForPowerButton() (err error) {
|
||||
}
|
||||
|
||||
var id uint32
|
||||
|
||||
for _, group := range f.Groups {
|
||||
if group.Name == acpiGenlMcastGroupName {
|
||||
id = group.ID
|
||||
}
|
||||
}
|
||||
|
||||
if err = conn.JoinGroup(id); err != nil {
|
||||
// nolint: errcheck
|
||||
conn.Close()
|
||||
|
||||
@@ -27,6 +27,7 @@ func (task *Task) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *Task) standard(args *phase.RuntimeArgs) (err error) {
|
||||
var b []byte
|
||||
|
||||
if b, err = args.Platform().Configuration(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ func (task *ExtraDevices) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *ExtraDevices) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
mountpoints := mount.NewMountPoints()
|
||||
|
||||
for _, extra := range args.Config().Machine().Install().ExtraDisks() {
|
||||
for i, part := range extra.Partitions {
|
||||
devname := fmt.Sprintf("%s%d", extra.Device, i+1)
|
||||
|
||||
@@ -36,6 +36,7 @@ func (task *ExtraFiles) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
if err = os.MkdirAll(filepath.Dir(p), os.ModeDir); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(p, []byte(f.Contents), f.Permissions); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ func (task *ResetDisk) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *ResetDisk) standard() (err error) {
|
||||
var bd *blockdevice.BlockDevice
|
||||
|
||||
if bd, err = blockdevice.Open(task.devname); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -42,9 +43,11 @@ func (task *ResetDisk) standard() (err error) {
|
||||
defer bd.Close()
|
||||
|
||||
var pt table.PartitionTable
|
||||
|
||||
if pt, err = bd.PartitionTable(true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, p := range pt.Partitions() {
|
||||
if err = pt.Delete(p); err != nil {
|
||||
return errors.Wrap(err, "failed to delete partition")
|
||||
|
||||
@@ -30,11 +30,13 @@ func (task *CordonAndDrain) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *CordonAndDrain) standard() (err error) {
|
||||
var hostname string
|
||||
|
||||
if hostname, err = os.Hostname(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var kubeHelper *kubernetes.Helper
|
||||
|
||||
if kubeHelper, err = kubernetes.NewHelper(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ func (task *KillKubernetesTasks) standard() (err error) {
|
||||
s := client.TaskService()
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), "k8s.io")
|
||||
|
||||
response, err := s.List(ctx, &tasks.ListTasksRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -42,6 +42,7 @@ func (task *UserDefinedNetwork) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
|
||||
// Convert links to nic
|
||||
log.Println("discovering local network interfaces")
|
||||
|
||||
netconf, err := nwd.Discover()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -49,7 +50,9 @@ func (task *UserDefinedNetwork) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
|
||||
// Configure specified interface
|
||||
netIfaces := make([]*nic.NetworkInterface, 0, len(netconf))
|
||||
|
||||
var iface *nic.NetworkInterface
|
||||
|
||||
for link, opts := range netconf {
|
||||
iface, err = nic.Create(link, opts...)
|
||||
if err != nil {
|
||||
|
||||
@@ -101,6 +101,7 @@ func (r *Runner) runPhase(phase *Phase) error {
|
||||
errCh := make(chan error)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
log.Printf("[phase]: %s", phase.description)
|
||||
|
||||
for _, task := range phase.tasks {
|
||||
@@ -114,6 +115,7 @@ func (r *Runner) runPhase(phase *Phase) error {
|
||||
if err != nil {
|
||||
log.Printf("[phase]: %s error running task: %s", phase.description, err)
|
||||
}
|
||||
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
@@ -154,6 +156,7 @@ func (r *Runner) Add(phase ...*Phase) {
|
||||
// NewPhase initializes and returns a Phase.
|
||||
func NewPhase(description string, tasks ...Task) *Phase {
|
||||
tasks = append([]Task{}, tasks...)
|
||||
|
||||
return &Phase{
|
||||
description: description,
|
||||
tasks: tasks,
|
||||
|
||||
@@ -70,6 +70,7 @@ func (suite *PhaseSuite) TestRunSuccess() {
|
||||
r.Add(phase.NewPhase("phase2", ®ularTask{errCh: taskErr}, &nilTask{}))
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
errCh <- r.Run()
|
||||
}()
|
||||
|
||||
@@ -28,6 +28,7 @@ func (task *Platform) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = i.Initialize(args.Platform(), args.Config().Machine().Install()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -63,8 +63,11 @@ func Hosts(hostname string) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
|
||||
writer := bytes.NewBuffer(buf)
|
||||
|
||||
err = tmpl.Execute(writer, data)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -85,10 +88,13 @@ func Hosts(hostname string) (err error) {
|
||||
// root.
|
||||
func ResolvConf() (err error) {
|
||||
target := "/run/system/etc/resolv.conf"
|
||||
|
||||
var f *os.File
|
||||
|
||||
if f, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
defer f.Close()
|
||||
|
||||
@@ -103,12 +109,14 @@ func ResolvConf() (err error) {
|
||||
// node's OS Image field is reported by the node from /etc/os-release.
|
||||
func OSRelease() (err error) {
|
||||
var v string
|
||||
|
||||
switch version.Tag {
|
||||
case "none":
|
||||
v = version.SHA
|
||||
default:
|
||||
v = version.Tag
|
||||
}
|
||||
|
||||
data := struct {
|
||||
Name string
|
||||
ID string
|
||||
@@ -123,8 +131,11 @@ func OSRelease() (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
|
||||
writer := bytes.NewBuffer(buf)
|
||||
|
||||
err = tmpl.Execute(writer, data)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -146,6 +157,7 @@ func ip() string {
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, address := range addrs {
|
||||
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
|
||||
@@ -43,12 +43,14 @@ func (task *Hostname) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
kernelHostname := kernel.ProcCmdline().Get(constants.KernelParamHostname).First()
|
||||
|
||||
var platformHostname []byte
|
||||
|
||||
platformHostname, err = args.Platform().Hostname()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configHostname := args.Config().Machine().Network().Hostname()
|
||||
|
||||
switch {
|
||||
case configHostname != "":
|
||||
log.Printf("using hostname from config: %s\n", configHostname)
|
||||
@@ -58,10 +60,11 @@ func (task *Hostname) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
case platformHostname != nil:
|
||||
args.Config().Machine().Network().SetHostname(string(platformHostname))
|
||||
log.Printf("using hostname provided via platform: %s\n", string(platformHostname))
|
||||
|
||||
// case data.Networking.OS.Hostname != "":
|
||||
// d.Networking.OS.Hostname = data.Networking.OS.Hostname
|
||||
// log.Printf("dhcp hostname %s:", data.Networking.OS.Hostname)
|
||||
}
|
||||
} //nolint: wsl
|
||||
|
||||
return etc.Hosts(args.Config().Machine().Network().Hostname())
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ func (task *MountBPFFS) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *MountBPFFS) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
var mountpoints *mount.Points
|
||||
|
||||
mountpoints, err = bpffs.MountPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -47,6 +47,7 @@ func (task *MountCgroups) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *MountCgroups) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
var mountpoints *mount.Points
|
||||
|
||||
mountpoints, err = cgroups.MountPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -32,6 +32,7 @@ func (task *MountOverlay) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *MountOverlay) standard(args *phase.RuntimeArgs) (err error) {
|
||||
var mountpoints *mount.Points
|
||||
|
||||
mountpoints, err = overlay.MountPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -34,6 +34,7 @@ func (task *MountSubDevices) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *MountSubDevices) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
var mountpoints *mount.Points
|
||||
|
||||
mountpoints, err = virtual.SubMountPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -32,6 +32,7 @@ func (task *UnmountOverlay) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *UnmountOverlay) standard(args *phase.RuntimeArgs) (err error) {
|
||||
var mountpoints *mount.Points
|
||||
|
||||
mountpoints, err = overlay.MountPoints()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -39,6 +39,7 @@ func (task *UnmountPodMounts) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc {
|
||||
|
||||
func (task *UnmountPodMounts) standard(args *phase.RuntimeArgs) (err error) {
|
||||
var b []byte
|
||||
|
||||
if b, err = ioutil.ReadFile("/proc/self/mounts"); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -56,11 +57,13 @@ func (task *UnmountPodMounts) standard(args *phase.RuntimeArgs) (err error) {
|
||||
mountpoint := fields[1]
|
||||
if strings.HasPrefix(mountpoint, constants.EphemeralMountPoint+"/") {
|
||||
log.Printf("unmounting %s\n", mountpoint)
|
||||
|
||||
if err = unix.Unmount(mountpoint, 0); err != nil {
|
||||
return errors.Errorf("error unmounting %s: %v", mountpoint, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ func (task *UnmountSystemDisks) RuntimeFunc(mode runtime.Mode) phase.RuntimeFunc
|
||||
|
||||
func (task *UnmountSystemDisks) standard(args *phase.RuntimeArgs) (err error) {
|
||||
var mountpoints *mount.Points
|
||||
|
||||
mountpoints, err = owned.MountPointsForDevice(task.devname)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -32,6 +32,7 @@ func (task *Security) runtime(args *phase.RuntimeArgs) (err error) {
|
||||
if err = kspp.EnforceKSPPKernelParameters(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = kspp.EnforceKSPPSysctls(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ func (task *LabelNodeAsMaster) standard(args *phase.RuntimeArgs) (err error) {
|
||||
}
|
||||
|
||||
endpoint := net.ParseIP(args.Config().Cluster().IPs()[0])
|
||||
|
||||
h, err := kubernetes.NewTemporaryClientFromPKI(args.Config().Cluster().CA().Crt, args.Config().Cluster().CA().Key, endpoint.String(), "6443")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -50,6 +51,7 @@ func (task *LabelNodeAsMaster) standard(args *phase.RuntimeArgs) (err error) {
|
||||
if err = h.LabelNodeAsMaster(hostname); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
|
||||
|
||||
@@ -73,6 +73,7 @@ func (task *StartServices) loadKubernetesServices(args *phase.RuntimeArgs) {
|
||||
svcs.Load(
|
||||
&services.Kubelet{},
|
||||
)
|
||||
|
||||
if args.Config().Machine().Type() == machine.Bootstrap {
|
||||
svcs.Load(
|
||||
&services.Bootkube{},
|
||||
|
||||
@@ -36,6 +36,7 @@ func (task *StopNonCrucialServices) standard(args *phase.RuntimeArgs) (err error
|
||||
if args.Config().Machine().Type() == machine.Bootstrap || args.Config().Machine().Type() == machine.ControlPlane {
|
||||
services = append(services, "trustd", "proxyd")
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
if err = system.Services(nil).Stop(ctx, service); err != nil {
|
||||
return err
|
||||
|
||||
@@ -32,9 +32,11 @@ func (task *Task) runtime(args *phase.RuntimeArgs) error {
|
||||
if err := sysctl.WriteSystemProperty(&sysctl.SystemProperty{Key: "net.ipv4.ip_forward", Value: "1"}); err != nil {
|
||||
multiErr = multierror.Append(multiErr, errors.Wrapf(err, "failed to set IPv4 forwarding"))
|
||||
}
|
||||
|
||||
if err := sysctl.WriteSystemProperty(&sysctl.SystemProperty{Key: "net.ipv6.conf.default.forwarding", Value: "1"}); err != nil {
|
||||
multiErr = multierror.Append(multiErr, errors.Wrap(err, "failed to set IPv6 forwarding"))
|
||||
}
|
||||
|
||||
if err := sysctl.WriteSystemProperty(&sysctl.SystemProperty{Key: "kernel.pid_max", Value: "262144"}); err != nil {
|
||||
multiErr = multierror.Append(multiErr, errors.Wrap(err, "failed to set pid_max"))
|
||||
}
|
||||
|
||||
@@ -38,19 +38,23 @@ func (task *LeaveEtcd) standard(args *phase.RuntimeArgs) (err error) {
|
||||
if args.Config().Machine().Type() == machine.Worker {
|
||||
return nil
|
||||
}
|
||||
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: constants.KubernetesEtcdPeerCert,
|
||||
KeyFile: constants.KubernetesEtcdPeerKey,
|
||||
TrustedCAFile: constants.KubernetesEtcdCACert,
|
||||
}
|
||||
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
DialTimeout: 5 * time.Second,
|
||||
@@ -59,6 +63,7 @@ func (task *LeaveEtcd) standard(args *phase.RuntimeArgs) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
defer cli.Close()
|
||||
|
||||
@@ -68,19 +73,23 @@ func (task *LeaveEtcd) standard(args *phase.RuntimeArgs) (err error) {
|
||||
}
|
||||
|
||||
var id *uint64
|
||||
|
||||
for _, member := range resp.Members {
|
||||
if member.Name == hostname {
|
||||
id = &member.ID
|
||||
}
|
||||
}
|
||||
|
||||
if id == nil {
|
||||
return errors.Errorf("failed to find %q in list of etcd members", hostname)
|
||||
}
|
||||
|
||||
log.Println("leaving etcd cluster")
|
||||
|
||||
_, err = cli.MemberRemove(context.Background(), *id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ func (task *Upgrade) standard(args *phase.RuntimeArgs) (err error) {
|
||||
if config = kernel.ProcCmdline().Get(constants.KernelParamConfig).First(); config == nil {
|
||||
return errors.Errorf("no config option was found")
|
||||
}
|
||||
|
||||
if err = install.Install(task.ref, task.devname, strings.ToLower(args.Platform().Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ func (d *Sequencer) Boot() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config, err := config.New(content)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -173,12 +174,14 @@ func (d *Sequencer) Upgrade(req *machineapi.UpgradeRequest) error {
|
||||
}
|
||||
|
||||
var dev *probe.ProbedBlockDevice
|
||||
|
||||
dev, err = probe.GetDevWithFileSystemLabel(constants.EphemeralPartitionLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
devname := dev.BlockDevice.Device().Name()
|
||||
|
||||
if err := dev.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -28,10 +28,12 @@ type EventBusObserver struct {
|
||||
func recovery() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("%+v\n", r)
|
||||
|
||||
for i := 10; i >= 0; i-- {
|
||||
log.Printf("rebooting in %d seconds\n", i)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
if unix.Reboot(unix.LINUX_REBOOT_CMD_RESTART) == nil {
|
||||
select {}
|
||||
}
|
||||
@@ -56,6 +58,7 @@ func sync() {
|
||||
return
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
log.Printf("waiting %d more seconds for sync to finish", i)
|
||||
}
|
||||
@@ -84,7 +87,9 @@ func main() {
|
||||
// Subscribe to events.
|
||||
init := EventBusObserver{&event.Embeddable{}}
|
||||
defer close(init.Channel())
|
||||
|
||||
event.Bus().Register(init)
|
||||
|
||||
defer event.Bus().Unregister(init)
|
||||
|
||||
// Initialize process reaper.
|
||||
@@ -137,13 +142,16 @@ func main() {
|
||||
req *machineapi.UpgradeRequest
|
||||
ok bool
|
||||
)
|
||||
|
||||
if req, ok = e.Data.(*machineapi.UpgradeRequest); !ok {
|
||||
log.Println("cannot perform upgrade, unexpected data type")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := seq.Upgrade(req); err != nil {
|
||||
panic(errors.Wrap(err, "upgrade failed"))
|
||||
}
|
||||
|
||||
event.Bus().Notify(event.Event{Type: event.Reboot})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,6 +78,7 @@ func (a *all) String() string {
|
||||
// WaitForAll creates a condition which waits for all the conditions to be successful
|
||||
func WaitForAll(conditions ...Condition) Condition {
|
||||
res := &all{}
|
||||
|
||||
for _, c := range conditions {
|
||||
if multi, ok := c.(*all); ok {
|
||||
// flatten lists
|
||||
|
||||
@@ -54,6 +54,7 @@ func (suite *AllSuite) TestString() {
|
||||
waiter := conditions.WaitForAll(conds...)
|
||||
|
||||
done := make(chan error)
|
||||
|
||||
go func() {
|
||||
done <- waiter.Wait(context.Background())
|
||||
}()
|
||||
@@ -86,6 +87,7 @@ func (suite *AllSuite) TestFlatten() {
|
||||
defer ctxCancel()
|
||||
|
||||
done := make(chan error)
|
||||
|
||||
go func() {
|
||||
done <- waiter.Wait(ctx)
|
||||
}()
|
||||
|
||||
@@ -48,5 +48,6 @@ func WaitForFilesToExist(filenames ...string) Condition {
|
||||
for i := range filenames {
|
||||
conditions[i] = WaitForFileToExist(filenames[i])
|
||||
}
|
||||
|
||||
return WaitForAll(conditions...)
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ func (suite *FilesSuite) TestWaitForFileToExist() {
|
||||
suite.Require().NoError(<-errCh)
|
||||
|
||||
suite.Require().NoError(os.Remove(path))
|
||||
|
||||
ctx, ctxCancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
@@ -122,6 +123,7 @@ func (suite *FilesSuite) TestWaitForAllFilesToExist() {
|
||||
|
||||
suite.Require().NoError(os.Remove(pathA))
|
||||
suite.Require().NoError(os.Remove(pathB))
|
||||
|
||||
ctx, ctxCancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -77,6 +77,7 @@ func (events *ServiceEvents) Push(event ServiceEvent) {
|
||||
// overwriting some entry
|
||||
events.discarded++
|
||||
}
|
||||
|
||||
events.events[events.pos] = event
|
||||
events.pos = (events.pos + 1) % len(events.events)
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ func Run(ctx context.Context, settings *Settings, state *State, check Check) err
|
||||
|
||||
healthy = err == nil
|
||||
message = ""
|
||||
|
||||
if !healthy {
|
||||
message = err.Error()
|
||||
}
|
||||
|
||||
@@ -98,6 +98,7 @@ func (suite *CheckSuite) TestHealthChange() {
|
||||
if state.Get().Healthy != nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
@@ -108,6 +109,7 @@ func (suite *CheckSuite) TestHealthChange() {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
if *state.Get().Healthy {
|
||||
break
|
||||
}
|
||||
@@ -164,6 +166,7 @@ func (suite *CheckSuite) TestCheckAbort() {
|
||||
if state.Get().Healthy != nil {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ func (state *State) Update(healthy bool, message string) {
|
||||
state.status.Healthy = &healthy
|
||||
state.status.LastChange = time.Now()
|
||||
}
|
||||
|
||||
state.status.LastMessage = message
|
||||
|
||||
newStatus := state.status
|
||||
@@ -61,8 +62,7 @@ func (state *State) Update(healthy bool, message string) {
|
||||
for _, ch := range subscribers {
|
||||
select {
|
||||
case ch <- StateChange{oldStatus, newStatus}:
|
||||
default:
|
||||
// drop messages to clients which don't consume them
|
||||
default: // drop messages to clients which don't consume them
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ func (m *MockService) ID(config.Configurator) string {
|
||||
if m.name != "" {
|
||||
return m.name
|
||||
}
|
||||
|
||||
return "MockRunner"
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,9 @@ func NewRunner(debug bool, args *runner.Args, setters ...runner.Option) runner.R
|
||||
func (c *containerdRunner) Open(ctx context.Context) error {
|
||||
// Create the containerd client.
|
||||
var err error
|
||||
|
||||
c.ctx = namespaces.WithNamespace(context.Background(), c.opts.Namespace)
|
||||
|
||||
c.client, err = containerd.New(c.opts.ContainerdAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -70,6 +72,7 @@ func (c *containerdRunner) Open(ctx context.Context) error {
|
||||
|
||||
// See if there's previous container/snapshot to clean up
|
||||
var oldcontainer containerd.Container
|
||||
|
||||
if oldcontainer, err = c.client.LoadContainer(c.ctx, c.args.ID); err == nil {
|
||||
if err = oldcontainer.Delete(c.ctx, containerd.WithSnapshotCleanup); err != nil {
|
||||
return errors.Wrap(err, "error deleting old container instance")
|
||||
@@ -79,6 +82,7 @@ func (c *containerdRunner) Open(ctx context.Context) error {
|
||||
// Create the container.
|
||||
specOpts := c.newOCISpecOpts(image)
|
||||
containerOpts := c.newContainerOpts(image, specOpts)
|
||||
|
||||
c.container, err = c.client.NewContainer(
|
||||
c.ctx,
|
||||
c.args.ID,
|
||||
@@ -118,6 +122,7 @@ func (c *containerdRunner) Run(eventSink events.Recorder) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create task: %q", c.args.ID)
|
||||
}
|
||||
|
||||
defer task.Delete(c.ctx) // nolint: errcheck
|
||||
|
||||
if err = task.Start(c.ctx); err != nil {
|
||||
@@ -137,6 +142,7 @@ func (c *containerdRunner) Run(eventSink events.Recorder) error {
|
||||
if code != 0 {
|
||||
return errors.Errorf("task %q failed: exit code %d", c.args.ID, code)
|
||||
}
|
||||
|
||||
return nil
|
||||
case <-c.stop:
|
||||
// graceful stop the task
|
||||
|
||||
@@ -84,6 +84,7 @@ func (suite *ContainerdSuite) SetupSuite() {
|
||||
)
|
||||
suite.Require().NoError(suite.containerdRunner.Open(context.Background()))
|
||||
suite.containerdWg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer suite.containerdWg.Done()
|
||||
defer func() { suite.Require().NoError(suite.containerdRunner.Close()) }()
|
||||
@@ -135,6 +136,7 @@ func (suite *ContainerdSuite) TestRunSuccess() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -156,6 +158,7 @@ func (suite *ContainerdSuite) TestRunTwice() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
// running same container twice should be fine
|
||||
@@ -168,6 +171,7 @@ func (suite *ContainerdSuite) TestRunTwice() {
|
||||
// TODO: workaround containerd (?) bug: https://github.com/docker/for-linux/issues/643
|
||||
for i := 0; i < 100; i++ {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if _, err := os.Stat("/containerd-shim/" + suite.containerdNamespace + "/" + ID + "/shim.sock"); err != nil && os.IsNotExist(err) {
|
||||
break
|
||||
}
|
||||
@@ -203,6 +207,7 @@ func (suite *ContainerdSuite) TestContainerCleanup() {
|
||||
runner.WithContainerdAddress(suite.containerdAddress),
|
||||
)
|
||||
suite.Require().NoError(r2.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r2.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r2.Run(MockEventSink))
|
||||
@@ -222,6 +227,7 @@ func (suite *ContainerdSuite) TestRunLogs() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -265,6 +271,7 @@ func (suite *ContainerdSuite) TestStopFailingAndRestarting() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
done := make(chan error, 1)
|
||||
@@ -275,6 +282,7 @@ func (suite *ContainerdSuite) TestStopFailingAndRestarting() {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if bytes.Contains(suite.getLogContents("endless.log"), []byte("fail\n")) {
|
||||
break
|
||||
}
|
||||
@@ -293,6 +301,7 @@ func (suite *ContainerdSuite) TestStopFailingAndRestarting() {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if bytes.Contains(suite.getLogContents("endless.log"), []byte("ok\n")) {
|
||||
break
|
||||
}
|
||||
@@ -327,6 +336,7 @@ func (suite *ContainerdSuite) TestStopSigKill() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
done := make(chan error, 1)
|
||||
@@ -367,6 +377,7 @@ func (suite *ContainerdSuite) TestImportSuccess() {
|
||||
suite.containerdNamespace, containerdrunner.WithContainerdAddress(suite.containerdAddress)).Import(reqs...))
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), suite.containerdNamespace)
|
||||
|
||||
for _, imageName := range []string{"testtalos/osd", "testtalos/proxyd"} {
|
||||
image, err := suite.client.ImageService().Get(ctx, imageName)
|
||||
suite.Require().NoError(err)
|
||||
@@ -397,6 +408,7 @@ func TestContainerdSuite(t *testing.T) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("can't run the test as non-root")
|
||||
}
|
||||
|
||||
_, err := os.Stat("/bin/containerd")
|
||||
if err != nil {
|
||||
t.Skip("containerd binary is not available, skipping the test")
|
||||
|
||||
@@ -68,6 +68,7 @@ func (i *Importer) Import(reqs ...*ImportRequest) error {
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), i.namespace)
|
||||
|
||||
client, err := containerd.New(i.options.containerdAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -76,6 +77,7 @@ func (i *Importer) Import(reqs ...*ImportRequest) error {
|
||||
defer client.Close()
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
var result *multierror.Error
|
||||
|
||||
for _, req := range reqs {
|
||||
|
||||
@@ -78,6 +78,7 @@ func (suite *CRISuite) SetupSuite() {
|
||||
)
|
||||
suite.Require().NoError(suite.containerdRunner.Open(context.Background()))
|
||||
suite.containerdWg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer suite.containerdWg.Done()
|
||||
defer func() { suite.Require().NoError(suite.containerdRunner.Close()) }()
|
||||
@@ -129,6 +130,7 @@ func (suite *CRISuite) TestRunSuccess() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -147,6 +149,7 @@ func (suite *CRISuite) TestRunTwice() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
// running same container twice should be fine
|
||||
@@ -187,6 +190,7 @@ func (suite *CRISuite) TestPodCleanup() {
|
||||
runner.WithContainerdAddress(suite.containerdAddress),
|
||||
)
|
||||
suite.Require().NoError(r2.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r2.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r2.Run(MockEventSink))
|
||||
@@ -205,6 +209,7 @@ func (suite *CRISuite) TestRunLogs() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -303,6 +308,7 @@ func (suite *CRISuite) TestStopSigKill() {
|
||||
)
|
||||
|
||||
suite.Require().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
done := make(chan error, 1)
|
||||
@@ -328,6 +334,7 @@ func TestCRISuite(t *testing.T) {
|
||||
if os.Getuid() != 0 {
|
||||
t.Skip("can't run the test as non-root")
|
||||
}
|
||||
|
||||
_, err := os.Stat("/bin/containerd")
|
||||
if err != nil {
|
||||
t.Skip("containerd binary is not available, skipping the test")
|
||||
|
||||
@@ -134,6 +134,7 @@ func (c *criRunner) Open(upstreamCtx context.Context) error {
|
||||
|
||||
// Create the CRI client.
|
||||
var err error
|
||||
|
||||
c.client, err = cri.NewClient("unix:"+c.opts.ContainerdAddress, 10*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -262,6 +263,7 @@ WAIT:
|
||||
}
|
||||
|
||||
eventSink(events.StateStopping, "Stopping container %q in sandbox %q", containerID, c.podSandboxID)
|
||||
|
||||
err = c.client.StopContainer(ctx, containerID, int64(c.opts.GracefulShutdownTimeout/time.Second))
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -82,6 +82,7 @@ func (r *goroutineRunner) wrappedMain() (err error) {
|
||||
}()
|
||||
|
||||
var w *log.Log
|
||||
|
||||
w, err = log.New(r.id, r.opts.LogPath)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "service log handler")
|
||||
|
||||
@@ -53,6 +53,7 @@ func (suite *GoroutineSuite) TestRunSuccess() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -67,6 +68,7 @@ func (suite *GoroutineSuite) TestRunFail() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().EqualError(r.Run(MockEventSink), "service failed")
|
||||
@@ -81,6 +83,7 @@ func (suite *GoroutineSuite) TestRunPanic() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
err := r.Run(MockEventSink)
|
||||
@@ -99,9 +102,11 @@ func (suite *GoroutineSuite) TestStop() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
errCh <- r.Run(MockEventSink)
|
||||
}()
|
||||
@@ -127,6 +132,7 @@ func (suite *GoroutineSuite) TestRunLogs() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
|
||||
@@ -97,6 +97,7 @@ func (p *processRunner) build() (cmd *exec.Cmd, err error) {
|
||||
} else {
|
||||
writer = w
|
||||
}
|
||||
|
||||
cmd.Stdout = writer
|
||||
cmd.Stderr = writer
|
||||
|
||||
@@ -110,6 +111,7 @@ func (p *processRunner) run(eventSink events.Recorder) error {
|
||||
}
|
||||
|
||||
notifyCh := make(chan reaper.ProcessInfo, 8)
|
||||
|
||||
usingReaper := reaper.Notify(notifyCh)
|
||||
if usingReaper {
|
||||
defer reaper.Stop(notifyCh)
|
||||
@@ -153,6 +155,7 @@ func (p *processRunner) run(eventSink events.Recorder) error {
|
||||
|
||||
// wait for process to terminate
|
||||
<-waitCh
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -61,6 +61,7 @@ func (suite *ProcessSuite) TestRunSuccess() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -75,6 +76,7 @@ func (suite *ProcessSuite) TestRunLogs() {
|
||||
}, runner.WithLogPath(suite.tmpDir))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -102,11 +104,13 @@ func (suite *ProcessSuite) TestRunRestartFailed() {
|
||||
}, runner.WithLogPath(suite.tmpDir)), restart.WithType(restart.UntilSuccess), restart.WithRestartInterval(time.Millisecond))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
suite.Assert().NoError(r.Run(MockEventSink))
|
||||
@@ -115,16 +119,19 @@ func (suite *ProcessSuite) TestRunRestartFailed() {
|
||||
fetchLog := func() []byte {
|
||||
logFile, err := os.Open(filepath.Join(suite.tmpDir, "restarter.log"))
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
// nolint: errcheck
|
||||
defer logFile.Close()
|
||||
|
||||
logContents, err := ioutil.ReadAll(logFile)
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
return logContents
|
||||
}
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
if len(fetchLog()) > 20 {
|
||||
break
|
||||
}
|
||||
@@ -150,6 +157,7 @@ func (suite *ProcessSuite) TestStopFailingAndRestarting() {
|
||||
}, runner.WithLogPath(suite.tmpDir)), restart.WithType(restart.Forever), restart.WithRestartInterval(5*time.Millisecond))
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
done := make(chan error, 1)
|
||||
@@ -194,6 +202,7 @@ func (suite *ProcessSuite) TestStopSigKill() {
|
||||
)
|
||||
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
done := make(chan error, 1)
|
||||
|
||||
@@ -135,6 +135,7 @@ func (r *restarter) Run(eventSink events.Recorder) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
eventSink(events.StateWaiting, "Error running %s, going to restart until it succeeds: %s", r.wrappedRunner, err)
|
||||
case Forever:
|
||||
if err == nil {
|
||||
|
||||
@@ -32,6 +32,7 @@ type MockRunner struct {
|
||||
func (m *MockRunner) Open(ctx context.Context) error {
|
||||
m.stop = make(chan struct{})
|
||||
m.stopped = make(chan struct{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -82,6 +83,7 @@ func (suite *RestartSuite) TestRunOnce() {
|
||||
|
||||
r := restart.New(&mock, restart.WithType(restart.Once))
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
failed := errors.New("failed")
|
||||
@@ -101,6 +103,7 @@ func (suite *RestartSuite) TestRunOnceStop() {
|
||||
|
||||
r := restart.New(&mock, restart.WithType(restart.Once))
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
errCh := make(chan error)
|
||||
@@ -120,6 +123,7 @@ func (suite *RestartSuite) TestRunUntilSuccess() {
|
||||
|
||||
r := restart.New(&mock, restart.WithType(restart.UntilSuccess), restart.WithRestartInterval(time.Millisecond))
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
failed := errors.New("failed")
|
||||
@@ -146,6 +150,7 @@ func (suite *RestartSuite) TestRunForever() {
|
||||
|
||||
r := restart.New(&mock, restart.WithType(restart.Forever), restart.WithRestartInterval(time.Millisecond))
|
||||
suite.Assert().NoError(r.Open(context.Background()))
|
||||
|
||||
defer func() { suite.Assert().NoError(r.Close()) }()
|
||||
|
||||
failed := errors.New("failed")
|
||||
|
||||
@@ -37,6 +37,7 @@ func (sc *serviceCondition) Wait(ctx context.Context) error {
|
||||
}
|
||||
|
||||
notifyCh := make(chan struct{}, 1)
|
||||
|
||||
svcrunner.Subscribe(sc.event, notifyCh)
|
||||
defer svcrunner.Unsubscribe(sc.event, notifyCh)
|
||||
|
||||
|
||||
@@ -84,6 +84,7 @@ func (svcrunner *ServiceRunner) UpdateState(newstate events.ServiceState, messag
|
||||
if isUp {
|
||||
svcrunner.notifyEvent(StateEventUp)
|
||||
}
|
||||
|
||||
if isDown {
|
||||
svcrunner.notifyEvent(StateEventDown)
|
||||
}
|
||||
@@ -135,6 +136,7 @@ func (svcrunner *ServiceRunner) waitFor(ctx context.Context, condition condition
|
||||
svcrunner.UpdateState(events.StateWaiting, "Waiting for %s", description)
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
go func() {
|
||||
errCh <- condition.Wait(ctx)
|
||||
}()
|
||||
@@ -174,13 +176,16 @@ func (svcrunner *ServiceRunner) Start() {
|
||||
svcrunner.ctxMu.Unlock()
|
||||
|
||||
condition := svcrunner.service.Condition(svcrunner.config)
|
||||
|
||||
dependencies := svcrunner.service.DependsOn(svcrunner.config)
|
||||
if len(dependencies) > 0 {
|
||||
serviceConditions := make([]conditions.Condition, len(dependencies))
|
||||
for i := range dependencies {
|
||||
serviceConditions[i] = WaitForService(StateEventUp, dependencies[i])
|
||||
}
|
||||
|
||||
serviceDependencies := conditions.WaitForAll(serviceConditions...)
|
||||
|
||||
if condition != nil {
|
||||
condition = conditions.WaitForAll(serviceDependencies, condition)
|
||||
} else {
|
||||
@@ -196,12 +201,14 @@ func (svcrunner *ServiceRunner) Start() {
|
||||
}
|
||||
|
||||
svcrunner.UpdateState(events.StatePreparing, "Running pre state")
|
||||
|
||||
if err := svcrunner.service.PreFunc(ctx, svcrunner.config); err != nil {
|
||||
svcrunner.UpdateState(events.StateFailed, "Failed to run pre stage: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
svcrunner.UpdateState(events.StatePreparing, "Creating service runner")
|
||||
|
||||
runnr, err := svcrunner.service.Runner(svcrunner.config)
|
||||
if err != nil {
|
||||
svcrunner.UpdateState(events.StateFailed, "Failed to create runner: %v", err)
|
||||
@@ -250,6 +257,7 @@ func (svcrunner *ServiceRunner) run(ctx context.Context, runnr runner.Runner) er
|
||||
defer healthWg.Wait()
|
||||
|
||||
healthWg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer healthWg.Done()
|
||||
|
||||
@@ -258,10 +266,12 @@ func (svcrunner *ServiceRunner) run(ctx context.Context, runnr runner.Runner) er
|
||||
}()
|
||||
|
||||
notifyCh := make(chan health.StateChange, 2)
|
||||
|
||||
svcrunner.healthState.Subscribe(notifyCh)
|
||||
defer svcrunner.healthState.Unsubscribe(notifyCh)
|
||||
|
||||
healthWg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer healthWg.Done()
|
||||
|
||||
@@ -283,7 +293,9 @@ func (svcrunner *ServiceRunner) run(ctx context.Context, runnr runner.Runner) er
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := runnr.Stop()
|
||||
|
||||
<-errCh
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error stopping service")
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ func (suite *ServiceRunnerSuite) TestFullFlow() {
|
||||
}, nil)
|
||||
|
||||
finished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(finished)
|
||||
sr.Start()
|
||||
@@ -72,6 +73,7 @@ func (suite *ServiceRunnerSuite) TestFullFlowHealthy() {
|
||||
sr := system.NewServiceRunner(&MockHealthcheckedService{}, nil)
|
||||
|
||||
finished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(finished)
|
||||
sr.Start()
|
||||
@@ -107,6 +109,7 @@ func (suite *ServiceRunnerSuite) TestFullFlowHealthChanges() {
|
||||
sr := system.NewServiceRunner(&m, nil)
|
||||
|
||||
finished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(finished)
|
||||
sr.Start()
|
||||
@@ -141,6 +144,7 @@ func (suite *ServiceRunnerSuite) TestFullFlowHealthChanges() {
|
||||
func (suite *ServiceRunnerSuite) TestWaitingDescriptionChange() {
|
||||
oldWaitConditionCheckInterval := system.WaitConditionCheckInterval
|
||||
system.WaitConditionCheckInterval = 10 * time.Millisecond
|
||||
|
||||
defer func() {
|
||||
system.WaitConditionCheckInterval = oldWaitConditionCheckInterval
|
||||
}()
|
||||
@@ -152,6 +156,7 @@ func (suite *ServiceRunnerSuite) TestWaitingDescriptionChange() {
|
||||
}, nil)
|
||||
|
||||
finished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(finished)
|
||||
sr.Start()
|
||||
@@ -327,6 +332,7 @@ func (suite *ServiceRunnerSuite) TestFullFlowRestart() {
|
||||
}, nil)
|
||||
|
||||
finished := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(finished)
|
||||
sr.Start()
|
||||
|
||||
@@ -72,10 +72,12 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block, _ := pem.Decode(peerCrt)
|
||||
if block == nil {
|
||||
return errors.New("failed to decode peer certificate")
|
||||
}
|
||||
|
||||
peer, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse client certificate")
|
||||
@@ -85,10 +87,12 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block, _ = pem.Decode(caCrt)
|
||||
if block == nil {
|
||||
return errors.New("failed to decode CA certificate")
|
||||
}
|
||||
|
||||
ca, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse etcd CA certificate")
|
||||
@@ -98,10 +102,12 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block, _ = pem.Decode(peerKey)
|
||||
if block == nil {
|
||||
return errors.New("failed to peer key")
|
||||
}
|
||||
|
||||
key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse client key")
|
||||
@@ -113,23 +119,29 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
}
|
||||
|
||||
apiServers := []*url.URL{}
|
||||
|
||||
for _, ip := range config.Cluster().IPs() {
|
||||
var u *url.URL
|
||||
|
||||
if u, err = url.Parse("https://" + ip + ":6443"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiServers = append(apiServers, u)
|
||||
}
|
||||
|
||||
u, err := url.Parse("https://127.0.0.1:6443")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiServers = append(apiServers, u)
|
||||
|
||||
_, podCIDR, err := net.ParseCIDR(config.Cluster().Network().PodCIDR())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, serviceCIDR, err := net.ParseCIDR(config.Cluster().Network().ServiceCIDR())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -141,6 +153,7 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
if block == nil {
|
||||
return errors.New("failed to Kubernetes CA certificate")
|
||||
}
|
||||
|
||||
k8sCA, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse Kubernetes CA certificate")
|
||||
@@ -150,6 +163,7 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
if block == nil {
|
||||
return errors.New("failed to Kubernetes CA key")
|
||||
}
|
||||
|
||||
k8sKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse Kubernetes key")
|
||||
@@ -205,11 +219,13 @@ func generateAssets(config config.Configurator) (err error) {
|
||||
|
||||
func altNamesFromURLs(urls []*url.URL) *tlsutil.AltNames {
|
||||
var an tlsutil.AltNames
|
||||
|
||||
for _, u := range urls {
|
||||
host, _, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
host = u.Host
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
an.DNSNames = append(an.DNSNames, host)
|
||||
@@ -217,5 +233,6 @@ func altNamesFromURLs(urls []*url.URL) *tlsutil.AltNames {
|
||||
an.IPs = append(an.IPs, ip)
|
||||
}
|
||||
}
|
||||
|
||||
return &an
|
||||
}
|
||||
|
||||
@@ -106,8 +106,10 @@ func (e *Etcd) Runner(config config.Configurator) (runner.Runner, error) {
|
||||
|
||||
initialClusterState := "new"
|
||||
initialCluster := hostname + "=https://" + ips[0].String() + ":2380"
|
||||
|
||||
if config.Machine().Type() == machine.ControlPlane {
|
||||
initialClusterState = "existing"
|
||||
|
||||
initialCluster, err = buildInitialCluster(config, hostname, ips[0].String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -181,6 +183,7 @@ func generatePKI(config config.Configurator) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to discover IP addresses")
|
||||
}
|
||||
|
||||
ips = append(ips, stdlibnet.ParseIP("127.0.0.1"))
|
||||
|
||||
hostname, err := os.Hostname()
|
||||
@@ -295,6 +298,7 @@ func addMember(endpoints, addrs []string) (*clientv3.MemberAddResponse, error) {
|
||||
|
||||
func buildInitialCluster(config config.Configurator, name, ip string) (initial string, err error) {
|
||||
endpoint := stdlibnet.ParseIP(config.Cluster().IPs()[0])
|
||||
|
||||
h, err := kubernetes.NewTemporaryClientFromPKI(config.Cluster().CA().Crt, config.Cluster().CA().Key, endpoint.String(), "6443")
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -305,6 +309,7 @@ func buildInitialCluster(config config.Configurator, name, ip string) (initial s
|
||||
if err != nil {
|
||||
log.Printf("failed to get client endpoints: %+v\n", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -319,17 +324,20 @@ func buildInitialCluster(config config.Configurator, name, ip string) (initial s
|
||||
if err != nil {
|
||||
log.Printf("failed to add etcd member: %+v\n", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
newID := resp.Member.ID
|
||||
conf := []string{}
|
||||
|
||||
for _, memb := range resp.Members {
|
||||
for _, u := range memb.PeerURLs {
|
||||
n := memb.Name
|
||||
if memb.ID == newID {
|
||||
n = name
|
||||
}
|
||||
|
||||
conf = append(conf, fmt.Sprintf("%s=%s", n, u))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,9 @@ func (k *Kubelet) PreFunc(ctx context.Context, config config.Configurator) error
|
||||
}
|
||||
|
||||
templ := template.Must(template.New("tmpl").Parse(string(kubeletKubeConfigTemplate)))
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if err := templ.Execute(&buf, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,6 +104,7 @@ func (k *Kubelet) PreFunc(ctx context.Context, config config.Configurator) error
|
||||
|
||||
// Pull the image and unpack it.
|
||||
containerdctx := namespaces.WithNamespace(ctx, "k8s.io")
|
||||
|
||||
image := fmt.Sprintf("%s:v%s", constants.KubernetesImage, config.Cluster().Version())
|
||||
if _, err = client.Pull(containerdctx, image, containerdapi.WithPullUnpack); err != nil {
|
||||
return fmt.Errorf("failed to pull image %q: %v", image, err)
|
||||
@@ -181,6 +184,7 @@ func (k *Kubelet) Runner(config config.Configurator) (runner.Runner, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mounts = append(mounts, cniMounts...)
|
||||
|
||||
// Add extra mounts.
|
||||
|
||||
@@ -35,6 +35,7 @@ func (n *Networkd) ID(config config.Configurator) string {
|
||||
// PreFunc implements the Service interface.
|
||||
func (n *Networkd) PreFunc(ctx context.Context, config config.Configurator) error {
|
||||
importer := containerd.NewImporter(constants.SystemContainerdNamespace, containerd.WithContainerdAddress(constants.SystemContainerdAddress))
|
||||
|
||||
return importer.Import(&containerd.ImportRequest{
|
||||
Path: "/usr/images/networkd.tar",
|
||||
Options: []containerdapi.ImportOpt{
|
||||
|
||||
@@ -35,6 +35,7 @@ func (n *NTPd) ID(config config.Configurator) string {
|
||||
// PreFunc implements the Service interface.
|
||||
func (n *NTPd) PreFunc(ctx context.Context, config config.Configurator) error {
|
||||
importer := containerd.NewImporter(constants.SystemContainerdNamespace, containerd.WithContainerdAddress(constants.SystemContainerdAddress))
|
||||
|
||||
return importer.Import(&containerd.ImportRequest{
|
||||
Path: "/usr/images/ntpd.tar",
|
||||
Options: []containerdapi.ImportOpt{
|
||||
|
||||
@@ -39,6 +39,7 @@ func (o *OSD) ID(config config.Configurator) string {
|
||||
// PreFunc implements the Service interface.
|
||||
func (o *OSD) PreFunc(ctx context.Context, config config.Configurator) error {
|
||||
importer := containerd.NewImporter(constants.SystemContainerdNamespace, containerd.WithContainerdAddress(constants.SystemContainerdAddress))
|
||||
|
||||
return importer.Import(&containerd.ImportRequest{
|
||||
Path: "/usr/images/osd.tar",
|
||||
Options: []containerdapi.ImportOpt{
|
||||
@@ -70,6 +71,7 @@ func (o *OSD) Runner(config config.Configurator) (runner.Runner, error) {
|
||||
image := "talos/osd"
|
||||
|
||||
endpoints := config.Cluster().IPs()
|
||||
|
||||
if config.Machine().Type() == machine.Worker {
|
||||
h, err := kubernetes.NewHelper()
|
||||
if err != nil {
|
||||
|
||||
@@ -35,6 +35,7 @@ func (p *Proxyd) ID(config config.Configurator) string {
|
||||
// PreFunc implements the Service interface.
|
||||
func (p *Proxyd) PreFunc(ctx context.Context, config config.Configurator) error {
|
||||
importer := containerd.NewImporter(constants.SystemContainerdNamespace, containerd.WithContainerdAddress(constants.SystemContainerdAddress))
|
||||
|
||||
return importer.Import(&containerd.ImportRequest{
|
||||
Path: "/usr/images/proxyd.tar",
|
||||
Options: []containerdapi.ImportOpt{
|
||||
|
||||
@@ -35,6 +35,7 @@ func (t *Trustd) ID(config config.Configurator) string {
|
||||
// PreFunc implements the Service interface.
|
||||
func (t *Trustd) PreFunc(ctx context.Context, config config.Configurator) error {
|
||||
importer := containerd.NewImporter(constants.SystemContainerdNamespace, containerd.WithContainerdAddress(constants.SystemContainerdAddress))
|
||||
|
||||
return importer.Import(&containerd.ImportRequest{
|
||||
Path: "/usr/images/trustd.tar",
|
||||
Options: []containerdapi.ImportOpt{
|
||||
|
||||
@@ -49,6 +49,7 @@ func Services(config config.Configurator) *singleton {
|
||||
running: make(map[string]struct{}),
|
||||
}
|
||||
})
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
@@ -58,6 +59,7 @@ func Services(config config.Configurator) *singleton {
|
||||
func (s *singleton) Load(services ...Service) []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.terminating {
|
||||
return nil
|
||||
}
|
||||
@@ -86,6 +88,7 @@ func (s *singleton) Load(services ...Service) []string {
|
||||
func (s *singleton) Start(serviceIDs ...string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.terminating {
|
||||
return nil
|
||||
}
|
||||
@@ -99,10 +102,12 @@ func (s *singleton) Start(serviceIDs ...string) error {
|
||||
}
|
||||
|
||||
s.runningMu.Lock()
|
||||
|
||||
_, running := s.running[id]
|
||||
if !running {
|
||||
s.running[id] = struct{}{}
|
||||
}
|
||||
|
||||
s.runningMu.Unlock()
|
||||
|
||||
if running {
|
||||
@@ -111,6 +116,7 @@ func (s *singleton) Start(serviceIDs ...string) error {
|
||||
}
|
||||
|
||||
s.wg.Add(1)
|
||||
|
||||
go func(id string, svcrunner *ServiceRunner) {
|
||||
defer func() {
|
||||
s.runningMu.Lock()
|
||||
@@ -130,9 +136,11 @@ func (s *singleton) Start(serviceIDs ...string) error {
|
||||
func (s *singleton) StartAll() {
|
||||
s.mu.Lock()
|
||||
serviceIDs := make([]string, 0, len(s.state))
|
||||
|
||||
for id := range s.state {
|
||||
serviceIDs = append(serviceIDs, id)
|
||||
}
|
||||
|
||||
s.mu.Unlock()
|
||||
|
||||
// nolint: errcheck
|
||||
@@ -155,8 +163,10 @@ func (s *singleton) Shutdown() {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
stateCopy := make(map[string]*ServiceRunner)
|
||||
s.terminating = true
|
||||
|
||||
for name, svcrunner := range s.state {
|
||||
stateCopy[name] = svcrunner
|
||||
}
|
||||
@@ -180,6 +190,7 @@ func (s *singleton) Shutdown() {
|
||||
|
||||
for name, svcrunner := range stateCopy {
|
||||
shutdownWg.Add(1)
|
||||
|
||||
go func(svcrunner *ServiceRunner, reverseDeps []string) {
|
||||
defer shutdownWg.Done()
|
||||
conds := make([]conditions.Condition, len(reverseDeps))
|
||||
@@ -193,6 +204,7 @@ func (s *singleton) Shutdown() {
|
||||
svcrunner.Shutdown()
|
||||
}(svcrunner, reverseDependencies[name])
|
||||
}
|
||||
|
||||
shutdownWg.Wait()
|
||||
|
||||
s.wg.Wait()
|
||||
@@ -229,12 +241,15 @@ func (s *singleton) Stop(ctx context.Context, serviceIDs ...string) (err error)
|
||||
|
||||
// Copy current service state
|
||||
stateCopy := make(map[string]*ServiceRunner)
|
||||
|
||||
for _, id := range serviceIDs {
|
||||
if _, ok := s.state[id]; !ok {
|
||||
return fmt.Errorf("service not found: %s", id)
|
||||
}
|
||||
|
||||
stateCopy[id] = s.state[id]
|
||||
}
|
||||
|
||||
s.mu.Unlock()
|
||||
|
||||
conds := make([]conditions.Condition, 0, len(stateCopy))
|
||||
@@ -242,6 +257,7 @@ func (s *singleton) Stop(ctx context.Context, serviceIDs ...string) (err error)
|
||||
// Initiate a shutdown on the specific service
|
||||
for id, svcrunner := range stateCopy {
|
||||
svcrunner.Shutdown()
|
||||
|
||||
conds = append(conds, WaitForService(StateEventDown, id))
|
||||
}
|
||||
|
||||
@@ -323,6 +339,7 @@ func (s *singleton) APIRestart(ctx context.Context, id string) error {
|
||||
if err := s.Stop(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Start(id)
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,9 @@ func (suite *SystemServicesSuite) TestStartStop() {
|
||||
system.Services(nil).LoadAndStart(
|
||||
&MockService{name: "yolo"},
|
||||
)
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
err := system.Services(nil).Stop(
|
||||
context.TODO(), "yolo",
|
||||
)
|
||||
|
||||
@@ -20,7 +20,9 @@ var configPath *string
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Lshortfile | log.Ldate | log.Lmicroseconds | log.Ltime)
|
||||
|
||||
configPath = flag.String("config", "", "the path to the config")
|
||||
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -32,7 +34,9 @@ func main() {
|
||||
|
||||
// Convert links to nic
|
||||
log.Println("discovering local network interfaces")
|
||||
|
||||
var netconf networkd.NetConf
|
||||
|
||||
if netconf, err = nwd.Discover(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -41,21 +45,26 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
}
|
||||
|
||||
config, err := config.New(content)
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
}
|
||||
|
||||
log.Println("overlaying config network configuration")
|
||||
|
||||
if err = netconf.BuildOptions(config); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Configure specified interface
|
||||
netIfaces := make([]*nic.NetworkInterface, 0, len(netconf))
|
||||
|
||||
for link, opts := range netconf {
|
||||
var iface *nic.NetworkInterface
|
||||
|
||||
log.Printf("creating interface %s", link.Name)
|
||||
|
||||
iface, err = nic.Create(link, opts...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -71,6 +80,7 @@ func main() {
|
||||
// kick off the addressing mechanism
|
||||
// Add any necessary routes
|
||||
log.Println("configuring interface addressing")
|
||||
|
||||
if err = nwd.Configure(netIfaces...); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ func (d *DHCP) Discover(ctx context.Context) error {
|
||||
// TODO do something with context
|
||||
ack, err := d.discover()
|
||||
d.Ack = ack
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -64,6 +65,7 @@ func (d *DHCP) MTU() uint32 {
|
||||
if err != nil {
|
||||
return uint32(d.NetIf.MTU)
|
||||
}
|
||||
|
||||
return uint32(mtu)
|
||||
}
|
||||
|
||||
@@ -72,6 +74,7 @@ func (d *DHCP) TTL() time.Duration {
|
||||
if d.Ack == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return d.Ack.IPAddressLeaseTime(time.Minute * 30)
|
||||
}
|
||||
|
||||
@@ -80,6 +83,7 @@ func (d *DHCP) Family() int {
|
||||
if d.Ack.YourIPAddr.To4() != nil {
|
||||
return unix.AF_INET
|
||||
}
|
||||
|
||||
return unix.AF_INET6
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ func (s *Static) Address() *net.IPNet {
|
||||
// nolint: errcheck
|
||||
ip, ipn, _ := net.ParseCIDR(s.Device.CIDR)
|
||||
ipn.IP = ip
|
||||
|
||||
return ipn
|
||||
}
|
||||
|
||||
@@ -52,6 +53,7 @@ func (s *Static) MTU() uint32 {
|
||||
if mtu == 0 {
|
||||
mtu = uint32(s.NetIf.MTU)
|
||||
}
|
||||
|
||||
return mtu
|
||||
}
|
||||
|
||||
@@ -66,6 +68,7 @@ func (s *Static) Family() int {
|
||||
if s.Address().IP.To4() != nil {
|
||||
return unix.AF_INET
|
||||
}
|
||||
|
||||
return unix.AF_INET6
|
||||
}
|
||||
|
||||
@@ -80,8 +83,10 @@ func (s *Static) Routes() (routes []*Route) {
|
||||
for _, route := range s.Device.Routes {
|
||||
// nolint: errcheck
|
||||
_, ipnet, _ := net.ParseCIDR(route.Network)
|
||||
|
||||
routes = append(routes, &Route{Dest: ipnet, Router: net.ParseIP(route.Gateway)})
|
||||
}
|
||||
|
||||
return routes
|
||||
}
|
||||
|
||||
|
||||
@@ -66,13 +66,17 @@ func writeResolvConf(resolvers []net.IP) error {
|
||||
resolvers = []net.IP{net.ParseIP(DefaultPrimaryResolver), net.ParseIP(DefaultSecondaryResolver)}
|
||||
}
|
||||
|
||||
var resolvconf strings.Builder
|
||||
var err error
|
||||
var (
|
||||
resolvconf strings.Builder
|
||||
err error
|
||||
)
|
||||
|
||||
for idx, resolver := range resolvers {
|
||||
// Only allow the first 3 nameservers since that is all that will be used
|
||||
if idx >= 3 {
|
||||
break
|
||||
}
|
||||
|
||||
if _, err = resolvconf.WriteString(fmt.Sprintf("nameserver %s\n", resolver)); err != nil {
|
||||
log.Println("failed to add some resolver to resolvconf")
|
||||
return err
|
||||
@@ -80,5 +84,6 @@ func writeResolvConf(resolvers []net.IP) error {
|
||||
}
|
||||
|
||||
log.Println("writing resolvconf")
|
||||
|
||||
return ioutil.WriteFile("/etc/resolv.conf", []byte(resolvconf.String()), 0644)
|
||||
}
|
||||
|
||||
@@ -153,11 +153,13 @@ func (n *Networkd) Renew(ifaces ...*nic.NetworkInterface) {
|
||||
// halflife.
|
||||
func (n *Networkd) renew(method address.Addressing) {
|
||||
renewDuration := method.TTL() / 2
|
||||
|
||||
for {
|
||||
<-time.After(renewDuration)
|
||||
|
||||
if err := n.configureInterface(method); err != nil {
|
||||
log.Printf("failed to renew interface address for %s: %v\n", method.Link().Name, err)
|
||||
|
||||
renewDuration = (renewDuration / 2)
|
||||
} else {
|
||||
renewDuration = method.TTL() / 2
|
||||
@@ -191,6 +193,7 @@ func (n *Networkd) configureInterface(method address.Addressing) error {
|
||||
}
|
||||
|
||||
addressExists := false
|
||||
|
||||
for _, addr := range addrs {
|
||||
if method.Address().String() == addr.String() {
|
||||
addressExists = true
|
||||
@@ -239,6 +242,7 @@ func (n *Networkd) Hostname(ifaces ...*nic.NetworkInterface) string {
|
||||
if !method.Valid() {
|
||||
continue
|
||||
}
|
||||
|
||||
if method.Hostname() != "" {
|
||||
return method.Hostname()
|
||||
}
|
||||
@@ -256,6 +260,7 @@ func (n *Networkd) PrintState() {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range rl {
|
||||
log.Printf("%+v", r)
|
||||
}
|
||||
@@ -265,14 +270,18 @@ func (n *Networkd) PrintState() {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
log.Printf("%+v", link)
|
||||
|
||||
for _, fam := range []int{unix.AF_INET, unix.AF_INET6} {
|
||||
var addrs []*net.IPNet
|
||||
|
||||
addrs, err = n.Conn.Addrs(link, fam)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
log.Printf("%+v", addr)
|
||||
}
|
||||
@@ -280,10 +289,12 @@ func (n *Networkd) PrintState() {
|
||||
}
|
||||
|
||||
var b []byte
|
||||
|
||||
b, err = ioutil.ReadFile("/etc/resolv.conf")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("resolv.conf: %s", string(b))
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ func (n *NetworkInterface) IsIgnored() bool {
|
||||
if n.Ignore || kernel.ProcCmdline().Get(constants.KernelParamNetworkInterfaceIgnore).Contains(n.Name) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ func (r *Registrator) Routes(ctx context.Context, in *empty.Empty) (reply *netwo
|
||||
// TODO: Remove once we get this sorted on why there's a
|
||||
// failure here
|
||||
log.Printf("%+v", rMesg)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -121,12 +122,15 @@ func (r *Registrator) Interfaces(ctx context.Context, in *empty.Empty) (reply *n
|
||||
|
||||
func toCIDR(family uint8, prefix net.IP, prefixLen int) string {
|
||||
netLen := 32
|
||||
|
||||
if family == unix.AF_INET6 {
|
||||
netLen = 128
|
||||
}
|
||||
|
||||
ipNet := &net.IPNet{
|
||||
IP: prefix,
|
||||
Mask: net.CIDRMask(prefixLen, netLen),
|
||||
}
|
||||
|
||||
return ipNet.String()
|
||||
}
|
||||
|
||||
@@ -46,8 +46,8 @@ func (suite *NetworkdSuite) TestRoutes() {
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf("%s://%s", "unix", listener.Addr().String()), grpc.WithInsecure())
|
||||
suite.Assert().NoError(err)
|
||||
nClient := networkapi.NewNetworkClient(conn)
|
||||
|
||||
nClient := networkapi.NewNetworkClient(conn)
|
||||
resp, err := nClient.Routes(context.Background(), &empty.Empty{})
|
||||
suite.Assert().NoError(err)
|
||||
suite.Assert().Greater(len(resp.Routes), 0)
|
||||
@@ -67,8 +67,8 @@ func (suite *NetworkdSuite) TestInterfaces() {
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf("%s://%s", "unix", listener.Addr().String()), grpc.WithInsecure())
|
||||
suite.Assert().NoError(err)
|
||||
nClient := networkapi.NewNetworkClient(conn)
|
||||
|
||||
nClient := networkapi.NewNetworkClient(conn)
|
||||
resp, err := nClient.Interfaces(context.Background(), &empty.Empty{})
|
||||
suite.Assert().NoError(err)
|
||||
suite.Assert().Greater(len(resp.Interfaces), 0)
|
||||
|
||||
@@ -29,7 +29,9 @@ var configPath *string
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Lshortfile | log.Ldate | log.Lmicroseconds | log.Ltime)
|
||||
|
||||
configPath = flag.String("config", "", "the path to the config")
|
||||
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -46,6 +48,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
}
|
||||
|
||||
config, err := config.New(content)
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
@@ -64,7 +67,9 @@ func main() {
|
||||
}
|
||||
|
||||
log.Println("Starting ntpd")
|
||||
|
||||
errch := make(chan error)
|
||||
|
||||
go func() {
|
||||
errch <- n.Daemon()
|
||||
}()
|
||||
|
||||
@@ -42,6 +42,7 @@ func NewNTPClient(opts ...Option) (*NTP, error) {
|
||||
func (n *NTP) Daemon() (err error) {
|
||||
// Do an initial hard set of time to ensure clock skew isnt too far off
|
||||
var resp *ntp.Response
|
||||
|
||||
if resp, err = n.Query(); err != nil {
|
||||
log.Printf("error querying %s for time, %s", n.Server, err)
|
||||
return err
|
||||
@@ -52,6 +53,7 @@ func (n *NTP) Daemon() (err error) {
|
||||
}
|
||||
|
||||
var randSleep time.Duration
|
||||
|
||||
for {
|
||||
// Set some variance with how frequently we poll ntp servers.
|
||||
// This is based on rand(MaxPoll) + MinPoll so we wait at least
|
||||
@@ -103,6 +105,7 @@ func setTime(adjustedTime time.Time) error {
|
||||
log.Printf("setting time to %s", adjustedTime)
|
||||
|
||||
timeval := syscall.NsecToTimeval(adjustedTime.UnixNano())
|
||||
|
||||
return syscall.Settimeofday(&timeval)
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ func (r *Registrator) Register(s *grpc.Server) {
|
||||
// Time issues a query to the configured ntp server and displays the results
|
||||
func (r *Registrator) Time(ctx context.Context, in *empty.Empty) (reply *timeapi.TimeReply, err error) {
|
||||
reply = &timeapi.TimeReply{}
|
||||
|
||||
rt, err := r.Ntpd.Query()
|
||||
if err != nil {
|
||||
return reply, err
|
||||
@@ -48,6 +49,7 @@ func (r *Registrator) Time(ctx context.Context, in *empty.Empty) (reply *timeapi
|
||||
// TimeCheck issues a query to the specified ntp server and displays the results
|
||||
func (r *Registrator) TimeCheck(ctx context.Context, in *timeapi.TimeRequest) (reply *timeapi.TimeReply, err error) {
|
||||
reply = &timeapi.TimeReply{}
|
||||
|
||||
tc, err := ntp.NewNTPClient(ntp.WithServer(in.Server))
|
||||
if err != nil {
|
||||
return reply, err
|
||||
|
||||
@@ -53,8 +53,8 @@ func (suite *NtpdSuite) TestTime() {
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf("%s://%s", "unix", listener.Addr().String()), grpc.WithInsecure())
|
||||
suite.Assert().NoError(err)
|
||||
nClient := timeapi.NewTimeClient(conn)
|
||||
|
||||
nClient := timeapi.NewTimeClient(conn)
|
||||
resp, err := nClient.Time(context.Background(), &empty.Empty{})
|
||||
suite.Assert().NoError(err)
|
||||
suite.Assert().Equal(resp.Server, testServer)
|
||||
@@ -84,8 +84,8 @@ func (suite *NtpdSuite) TestTimeCheck() {
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf("%s://%s", "unix", listener.Addr().String()), grpc.WithInsecure())
|
||||
suite.Assert().NoError(err)
|
||||
nClient := timeapi.NewTimeClient(conn)
|
||||
|
||||
nClient := timeapi.NewTimeClient(conn)
|
||||
resp, err := nClient.TimeCheck(context.Background(), &timeapi.TimeRequest{Server: testServer})
|
||||
suite.Assert().NoError(err)
|
||||
suite.Assert().Equal(resp.Server, testServer)
|
||||
|
||||
@@ -92,9 +92,11 @@ func copyClientServer(msg interface{}, client grpc.ClientStream, srv grpc.Server
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = srv.SendMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -57,6 +57,7 @@ func (r *Registrator) Kubeconfig(ctx context.Context, in *empty.Empty) (data *os
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
data = &osapi.Data{
|
||||
Bytes: fileBytes,
|
||||
}
|
||||
@@ -187,6 +188,7 @@ func (r *Registrator) Dmesg(ctx context.Context, in *empty.Empty) (data *osapi.D
|
||||
}
|
||||
// Read all messages from the log (non-destructively)
|
||||
buf := make([]byte, size)
|
||||
|
||||
n, err := unix.Klogctl(constants.SYSLOG_ACTION_READ_ALL, buf)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -206,7 +208,9 @@ func (r *Registrator) Logs(req *osapi.LogsRequest, l osapi.OS_LogsServer) (err e
|
||||
switch {
|
||||
case req.Namespace == "system" || req.Id == "kubelet" || req.Id == "kubeadm":
|
||||
filename := filepath.Join(constants.DefaultLogPath, filepath.Base(req.Id)+".log")
|
||||
|
||||
var file *os.File
|
||||
|
||||
file, err = os.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -217,6 +221,7 @@ func (r *Registrator) Logs(req *osapi.LogsRequest, l osapi.OS_LogsServer) (err e
|
||||
chunk = filechunker.NewChunker(file)
|
||||
default:
|
||||
var file io.Closer
|
||||
|
||||
if chunk, file, err = k8slogs(l.Context(), req); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -259,14 +264,17 @@ func (r *Registrator) Processes(ctx context.Context, in *empty.Empty) (reply *os
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
executable, err = proc.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
args, err = proc.CmdLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats, err = proc.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -299,12 +307,14 @@ func getContainerInspector(ctx context.Context, namespace string, driver osapi.C
|
||||
if namespace != criconstants.K8sContainerdNamespace {
|
||||
return nil, errors.New("CRI inspector is supported only for K8s namespace")
|
||||
}
|
||||
|
||||
return cri.NewInspector(ctx)
|
||||
case osapi.ContainerDriver_CONTAINERD:
|
||||
addr := constants.ContainerdAddress
|
||||
if namespace == constants.SystemContainerdNamespace {
|
||||
addr = constants.SystemContainerdAddress
|
||||
}
|
||||
|
||||
return containerd.NewInspector(ctx, namespace, containerd.WithContainerdAddress(addr))
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported driver %q", driver)
|
||||
@@ -323,6 +333,7 @@ func k8slogs(ctx context.Context, req *osapi.LogsRequest) (chunker.Chunker, io.C
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if container == nil {
|
||||
return nil, nil, fmt.Errorf("container %q not found", req.Id)
|
||||
}
|
||||
|
||||
@@ -30,8 +30,10 @@ var (
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Lshortfile | log.Ldate | log.Lmicroseconds | log.Ltime)
|
||||
|
||||
configPath = flag.String("config", "", "the path to the config")
|
||||
endpoints = flag.String("endpoints", "", "the IPs of the control plane nodes")
|
||||
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -45,6 +47,7 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
}
|
||||
|
||||
config, err := config.New(content)
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
@@ -67,6 +70,7 @@ func main() {
|
||||
}
|
||||
|
||||
var provider tls.CertificateProvider
|
||||
|
||||
provider, err = tls.NewRemoteRenewingFileCertificateProvider(config.Machine().Security().Token(), strings.Split(*endpoints, ","), constants.TrustdPort, hostname, ips)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create remote certificate provider: %+v", err)
|
||||
|
||||
@@ -56,6 +56,7 @@ func (r *ReverseProxy) Listen(address string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("listening on %v", l.Addr())
|
||||
|
||||
for {
|
||||
@@ -97,10 +98,13 @@ func (r *ReverseProxy) AddBackend(uid, addr string) (added bool) {
|
||||
func (r *ReverseProxy) DeleteBackend(uid string) (deleted bool) {
|
||||
r.mux.Lock()
|
||||
defer r.mux.Unlock()
|
||||
|
||||
if _, ok := r.backends[uid]; ok {
|
||||
delete(r.backends, uid)
|
||||
|
||||
deleted = true
|
||||
}
|
||||
|
||||
r.setCurrent()
|
||||
|
||||
return deleted
|
||||
@@ -115,9 +119,11 @@ func (r *ReverseProxy) GetBackend() (backend *backend.Backend) {
|
||||
func (r *ReverseProxy) IncrementBackend(uid string) {
|
||||
r.mux.Lock()
|
||||
defer r.mux.Unlock()
|
||||
|
||||
if _, ok := r.backends[uid]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
r.backends[uid].Connections++
|
||||
r.setCurrent()
|
||||
}
|
||||
@@ -126,9 +132,11 @@ func (r *ReverseProxy) IncrementBackend(uid string) {
|
||||
func (r *ReverseProxy) DecrementBackend(uid string) {
|
||||
r.mux.Lock()
|
||||
defer r.mux.Unlock()
|
||||
|
||||
if _, ok := r.backends[uid]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid setting the connections to the max uint32 value.
|
||||
if r.backends[uid].Connections == 0 {
|
||||
return
|
||||
@@ -239,6 +247,7 @@ func (r *ReverseProxy) DeleteFunc() func(obj interface{}) {
|
||||
|
||||
func (r *ReverseProxy) setCurrent() {
|
||||
least := uint32(math.MaxUint32)
|
||||
|
||||
for _, b := range r.backends {
|
||||
switch {
|
||||
case b.Connections == 0:
|
||||
@@ -259,6 +268,7 @@ func (r *ReverseProxy) proxyConnection(c1 net.Conn) {
|
||||
log.Printf("no available backend, closing remote connection: %s", c1.RemoteAddr().String())
|
||||
// nolint: errcheck
|
||||
c1.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -267,6 +277,7 @@ func (r *ReverseProxy) proxyConnection(c1 net.Conn) {
|
||||
log.Printf("dial %v failed, deleting backend: %v", backend.Addr, err)
|
||||
r.DeleteBackend(backend.UID)
|
||||
r.proxyConnection(c1)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -287,6 +298,7 @@ func (r *ReverseProxy) joinConnections(uid string, c1 net.Conn, c2 net.Conn) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
tcp2, ok := c2.(*net.TCPConn)
|
||||
if !ok {
|
||||
return
|
||||
@@ -295,11 +307,13 @@ func (r *ReverseProxy) joinConnections(uid string, c1 net.Conn, c2 net.Conn) {
|
||||
log.Printf("%s -> %s", c1.RemoteAddr(), c2.RemoteAddr())
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
join := func(dst *net.TCPConn, src *net.TCPConn) {
|
||||
// Close after the copy to avoid a deadlock.
|
||||
// nolint: errcheck
|
||||
defer dst.CloseRead()
|
||||
defer wg.Done()
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
log.Printf("%v", err)
|
||||
@@ -307,8 +321,11 @@ func (r *ReverseProxy) joinConnections(uid string, c1 net.Conn, c2 net.Conn) {
|
||||
}
|
||||
|
||||
wg.Add(2)
|
||||
|
||||
go join(tcp1, tcp2)
|
||||
|
||||
go join(tcp2, tcp1)
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -370,10 +387,13 @@ func (r *ReverseProxy) Bootstrap(ctx context.Context) {
|
||||
func (r *ReverseProxy) Backends() map[string]*backend.Backend {
|
||||
r.mux.Lock()
|
||||
defer r.mux.Unlock()
|
||||
|
||||
backends := make(map[string]*backend.Backend)
|
||||
|
||||
for uid, addr := range r.backends {
|
||||
backends[uid] = addr
|
||||
}
|
||||
|
||||
return backends
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ func (suite *ProxydSuite) TestWatch() {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
r, err := NewReverseProxy([]string{"127.0.0.1"}, cancel)
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
defer r.Shutdown()
|
||||
|
||||
// Generate a simple pod
|
||||
@@ -49,6 +50,7 @@ func (suite *ProxydSuite) TestWatch() {
|
||||
go r.Watch(client)
|
||||
|
||||
output := make(chan string)
|
||||
|
||||
go func() {
|
||||
var be map[string]*backend.Backend
|
||||
for {
|
||||
@@ -76,6 +78,7 @@ func (suite *ProxydSuite) TestAddFunc() {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
r, err := NewReverseProxy([]string{"127.0.0.1"}, cancel)
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
defer r.Shutdown()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
@@ -96,16 +99,20 @@ func (suite *ProxydSuite) TestDeleteFunc() {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
r, err := NewReverseProxy([]string{"127.0.0.1"}, cancel)
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
defer r.Shutdown()
|
||||
|
||||
// Add some sample backends
|
||||
pods := make([]*v1.Pod, 5)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
pods[i] = genPod()
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
r.AddFunc()(pod)
|
||||
}
|
||||
|
||||
// Delete all sample backends
|
||||
for _, pod := range pods {
|
||||
r.DeleteFunc()(pod)
|
||||
@@ -119,6 +126,7 @@ func (suite *ProxydSuite) TestUpdateFunc() {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
r, err := NewReverseProxy([]string{"127.0.0.1"}, cancel)
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
defer r.Shutdown()
|
||||
|
||||
// Add some sample backend
|
||||
|
||||
@@ -35,6 +35,7 @@ func (r *Registrator) Register(s *grpc.Server) {
|
||||
// Backends exposes the internal state of backends in proxyd
|
||||
func (r *Registrator) Backends(ctx context.Context, in *empty.Empty) (reply *proto.BackendsReply, err error) {
|
||||
reply = &proto.BackendsReply{}
|
||||
|
||||
for _, be := range r.Proxyd.Backends() {
|
||||
protobe := &proto.Backend{
|
||||
Id: be.UID,
|
||||
|
||||
@@ -57,8 +57,8 @@ func (suite *ProxydSuite) TestBackends() {
|
||||
|
||||
conn, err := grpc.Dial(fmt.Sprintf("%s://%s", "unix", listener.Addr().String()), grpc.WithInsecure())
|
||||
suite.Assert().NoError(err)
|
||||
pClient := proto.NewProxydClient(conn)
|
||||
|
||||
pClient := proto.NewProxydClient(conn)
|
||||
resp, err := pClient.Backends(context.Background(), &empty.Empty{})
|
||||
suite.Assert().NoError(err)
|
||||
suite.Assert().Equal(resp.Backends[0].Addr, testBackend)
|
||||
|
||||
@@ -27,7 +27,9 @@ var configPath *string
|
||||
|
||||
func init() {
|
||||
log.SetFlags(log.Lshortfile | log.Ldate | log.Lmicroseconds | log.Ltime)
|
||||
|
||||
configPath = flag.String("config", "", "the path to the config")
|
||||
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -40,12 +42,14 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
}
|
||||
|
||||
config, err := config.New(content)
|
||||
if err != nil {
|
||||
log.Fatalf("open config: %v", err)
|
||||
}
|
||||
|
||||
bootstrapCtx, bootstrapCancel := context.WithCancel(context.Background())
|
||||
|
||||
r, err := frontend.NewReverseProxy(config.Cluster().IPs(), bootstrapCancel)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize the reverse proxy: %v", err)
|
||||
@@ -91,11 +95,12 @@ func waitForKube(r *frontend.ReverseProxy) {
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get local address: %v", err)
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
log.Fatalf("no IP address found for local api server")
|
||||
}
|
||||
ip := ips[0]
|
||||
|
||||
ip := ips[0]
|
||||
config.Host = ip.String() + ":6443"
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
|
||||
@@ -49,11 +49,13 @@ func (r *Registrator) Certificate(ctx context.Context, in *securityapi.Certifica
|
||||
// ReadFile implements the securityapi.SecurityServer interface.
|
||||
func (r *Registrator) ReadFile(ctx context.Context, in *securityapi.ReadFileRequest) (resp *securityapi.ReadFileResponse, err error) {
|
||||
var b []byte
|
||||
|
||||
if b, err = ioutil.ReadFile(in.Path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf("read file on disk: %s", in.Path)
|
||||
|
||||
resp = &securityapi.ReadFileResponse{Data: b}
|
||||
|
||||
return resp, nil
|
||||
@@ -64,11 +66,13 @@ func (r *Registrator) WriteFile(ctx context.Context, in *securityapi.WriteFileRe
|
||||
if err = os.MkdirAll(path.Dir(in.Path), os.ModeDir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(in.Path, in.Data, os.FileMode(in.Perm)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("wrote file to disk: %s", in.Path)
|
||||
|
||||
resp = &securityapi.WriteFileResponse{}
|
||||
|
||||
return resp, nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user