major update

Signed-off-by: Wataru Ishida <wataru.ishid@gmail.com>
This commit is contained in:
Wataru Ishida
2022-06-07 01:32:10 +00:00
parent 5f1281fd88
commit 02419c522b
456 changed files with 34810 additions and 654 deletions

5
.gitmodules vendored
View File

@@ -1,6 +1,9 @@
[submodule "sm/ONL"]
path = sm/ONL
url = https://github.com/microsonic/OpenNetworkLinux.git
url = https://github.com/oopt-goldstone/OpenNetworkLinux.git
[submodule "sm/sonic-buildimage"]
path = sm/sonic-buildimage
url = https://github.com/microsonic/sonic-buildimage.git
[submodule "sm/linux-gft"]
path = sm/linux-gft
url = https://github.com/oopt-goldstone/linux-gft.git

View File

@@ -1,5 +1,3 @@
all: autobuild
ifndef GOLDSTONE_BUILDER_IMAGE
GOLDSTONE_BUILDER_IMAGE = gs-builder
endif
@@ -51,12 +49,19 @@ BUILDER_OPTS = \
--isolate \
# THIS LINE INTENTIONALLY LEFT BLANK
ARCH = amd64
ALL_ARCHES = arm64 amd64
# Build rule for each architecture.
define build_arch_template
$(1) :
$(MAKE) -C builds/$(1)
endef
$(foreach a,$(ALL_ARCHES),$(eval $(call build_arch_template,$(a))))
all: builder docker
autobuild:
$(MAKE) -C builds/$(ARCH)
autobuild: $(ALL_ARCHES)
docker-check:
@which docker > /dev/null || (echo "*** Docker appears to be missing. Please install docker in order to build Goldstone." && exit 1)
@@ -67,6 +72,8 @@ docker-debug: docker-check
$(ONL)/docker/tools/onlbuilder $(BUILDER_OPTS) $(VOLUMES_OPTS) -c tools/debug.sh
builder:
docker pull --platform=linux/amd64 python:3-buster
docker tag python:3-buster python:3-buster-amd64
cd docker/images/builder && docker build -t $(GOLDSTONE_BUILDER_IMAGE) .
docker: docker-check

View File

@@ -1,15 +1,46 @@
# Goldstone
Goldstone NOS
---
Goldstone is an open source network OS for TIP OOPT networking hardware
Goldstone NOS is an open source network OS for [TIP OOPT](https://telecominfraproject.com/oopt/) networking hardware
### Supported Hardware
- Wistron WTP-01-02-00 (Galileo 1)
- Wistron WTP-01-C1-00 (Galileo FlexT)
- Edgecore AS7716-24SC/XC (Cassini)
### How to build
- prerequisite: Docker ( version >= 18.09 )
#### Prerequisite
- Git
- Docker ( version >= 18.09, enable [buildkit](https://docs.docker.com/develop/develop-images/build_enhancements/) )
- Python2
- make
```
$ git clone https://github.com/Telecominfraproject/goldstone.git
$ cd goldstone
$ docker run --rm --privileged multiarch/qemu-user-static --reset -p yes # https://github.com/multiarch/qemu-user-static
$ git clone https://github.com/oopt-goldstone/goldstone-buildimage.git
$ cd goldstone-buildimage
$ git submodule update --init
$ make builder
$ make docker
```
This will build [ONIE](https://opencomputeproject.github.io/onie/) installers that can be installed on the supported hardware under `RELEASE` directory.
```
$ find RELEASE
RELEASE
RELEASE/buster
RELEASE/buster/arm64
RELEASE/buster/arm64/goldstone-ea520b9_ONL-OS10_2022-06-08.2311-ea520b9_ARM64.swi.md5sum
RELEASE/buster/arm64/goldstone-ea520b9_ONL-OS_2022-06-08.2311-ea520b9_ARM64_INSTALLER
RELEASE/buster/arm64/goldstone-ea520b9_ONL-OS_2022-06-08.2311-ea520b9_ARM64_INSTALLER.md5sum
RELEASE/buster/arm64/goldstone-ea520b9_ONL-OS10_2022-06-08.2311-ea520b9_ARM64.swi
RELEASE/buster/amd64
RELEASE/buster/amd64/goldstone-ea520b9_ONL-OS10_2022-06-08.2311-ea520b9_AMD64.swi.md5sum
RELEASE/buster/amd64/goldstone-ea520b9_ONL-OS_2022-06-08.2311-ea520b9_AMD64_INSTALLER
RELEASE/buster/amd64/goldstone-ea520b9_ONL-OS_2022-06-08.2311-ea520b9_AMD64_INSTALLER.md5sum
RELEASE/buster/amd64/goldstone-ea520b9_ONL-OS10_2022-06-08.2311-ea520b9_AMD64.swi
```

View File

@@ -1 +0,0 @@
!include $X1/builds/any/installer/APKG.yml ARCH=amd64 BOOTMODE=swi

View File

@@ -1,3 +0,0 @@
BOOTMODE=SWI
include $(X1)/make/config.amd64.mk
include $(X1)/builds/any/installer/builds/Makefile

View File

@@ -0,0 +1,32 @@
- apt-transport-https
- ca-certificates
- dmidecode
- parted
- grub2
- efibootmgr
- gdisk
- ipmitool
- iptables
- x1-upgrade
- onl-kernel-5.4-lts-x86-64-all-modules
- x1-saibcm-kernel-5.4-lts-x86-64-all:amd64
- gs-usonic:amd64
- gs-tai:amd64
- k3s:amd64
- stern:amd64
- gs-mgmt:amd64
- cfp2piu-kernel-5.4-lts-x86-64-all:amd64
- libyang1:amd64
- sysrepo:amd64
- gscli:amd64
- onlp-py3:amd64
- gssystem:amd64
- libtac2:amd64
- libtac2-bin:amd64
- libpam-tacplus:amd64
- libnss-tacplus:amd64
- gsyang:all
- netopeer2:amd64
- gs-snmp:amd64
- gs-xlate-oc:amd64
- python3.10:amd64

View File

@@ -22,10 +22,6 @@ ifndef RFS_DIR
RFS_DIR := $(RFS_WORKDIR)/rootfs-$(ARCH).d
endif
ifndef RFS_CPIO
RFS_CPIO := $(RFS_WORKDIR)/rootfs-$(ARCH).cpio.gz
endif
ifndef RFS_SQUASH
RFS_SQUASH := $(RFS_WORKDIR)/rootfs-$(ARCH).sqsh
endif

View File

@@ -0,0 +1,4 @@
source-directory /etc/network/interfaces.d
auto eth0
iface eth0 inet dhcp

View File

@@ -1,3 +1,3 @@
ROOTFS_PACKAGE := x1-rootfs
ROOTFS_PACKAGE := goldstone-rootfs
include $(X1)/make/config.amd64.mk
include $(ONL)/make/swi.mk

View File

@@ -3,17 +3,14 @@ variables:
prerequisites:
broken: true
packages: [ "x1-swi:$ARCH" ]
packages: [ "goldstone-swi:$ARCH" ]
packages:
- name: x1-installer-$BOOTMODE
- name: goldstone-installer-$BOOTMODE
summary: Goldstone Network OS $ARCH Installer
arch: $ARCH
version: 0.$FNAME_RELEASE_ID
copyright: Copyright 2018 Big Switch Networks
maintainer: support@bigswitch.com
changelog: Initial
support: support@bigswitch.com
maintainer: goldstone-nos@googlegroups.com
files:
builds/*INSTALLER : $$PKG_INSTALL/

View File

@@ -2,18 +2,18 @@ ifndef ARCH
$(error $$ARCH not set)
endif
ifndef BOOTMODE
$(error $$BOOTMODE not set)
ifneq ($(BOOTMODE), INSTALLED)
$(error $$BOOTMODE is not INSTALLED but $BOOTMOT)
endif
# Hardcoded to match ONL File naming conventions.
include $(X1)/make/versions/version-goldstone.mk
INSTALLER_NAME=$(FNAME_PRODUCT_VERSION)_ONL-OS_$(FNAME_BUILD_ID)_$(UARCH)_$(BOOTMODE)_INSTALLER
INSTALLER_NAME=$(FNAME_PRODUCT_VERSION)_$(FNAME_BUILD_ID)_$(UARCH)_INSTALLER
MKINSTALLER_OPTS := \
--arch $(ARCH) \
--boot-config boot-config \
--swi x1-swi:$(ARCH) \
--swi goldstone-swi:$(ARCH) \
--add-file zerotouch.json \
--onl-version "$(VERSION_STRING)" \
# THIS LINE INTENTIONALLY LEFT BLANK

View File

@@ -2,16 +2,12 @@ variables:
!include $X1/make/versions/version-goldstone.yml
packages:
- name: x1-rootfs
- name: goldstone-rootfs
summary: Goldstone Network OS Root Filesystem
arch: $ARCH
version: 0.$FNAME_RELEASE_ID
copyright: Copyright 2018 Big Switch Networks
maintainer: support@bigswitch.com
changelog: Initial
support: support@bigswitch.com
maintainer: goldstone-nos@googlegroups.com
files:
builds/$ONL_DEBIAN_SUITE/rootfs-$ARCH.cpio.gz : $$PKG_INSTALL/
builds/$ONL_DEBIAN_SUITE/rootfs-$ARCH.sqsh : $$PKG_INSTALL/
builds/$ONL_DEBIAN_SUITE/manifest.json : $$PKG_INSTALL/

View File

@@ -43,3 +43,4 @@
- watchdog
- gs-config
- python-parted
- jq

View File

@@ -6,7 +6,7 @@ Packages: &Packages
- !script $ONL/tools/onl-platform-pkgs.py ${PLATFORM_LIST}
- !script $X1/tools/goldstone-platform-pkgs.py ${PLATFORM_LIST}
- !include $X1/builds/any/rootfs/all-packages.yml
- !include $X1/builds/any/rootfs/${ARCH}-packages.yml
- !include $X1/builds/$ARCH/rootfs/arch-packages.yml
Multistrap:
General:
@@ -45,6 +45,7 @@ Configure:
- $ONL/builds/any/rootfs/${ONL_DEBIAN_SUITE}/common/overlay
- $ONL/builds/any/rootfs/${ONL_DEBIAN_SUITE}/${INIT}/overlay
- $X1/builds/any/rootfs/overlay
- $X1/builds/${ARCH}/rootfs/overlay
update-rc.d:
- 'onlpd defaults'
@@ -69,12 +70,18 @@ Configure:
console: True
PermitRootLogin: 'yes'
groups:
gsmgmt:
force: true
users:
root:
password: x1
admin:
password: admin
shell: /usr/bin/gscli
shell: /usr/local/bin/gscli
groups:
- gsmgmt
manifests:
'/etc/onl/rootfs/manifest.json' :
@@ -103,3 +110,5 @@ Configure:
- 'sudo mkdir -p __rfs__/var/tmp'
- 'sudo mkdir -p __rfs__/host/warmboot'
- 'sudo mkdir -p __rfs__/var/log/swss'
- 'sudo chroot __rfs__ update-alternatives --set iptables /usr/sbin/iptables-legacy'
- 'if [ ${ARCH} = amd64 ]; then sudo chroot __rfs__ systemctl enable ipmievd.service; fi'

View File

@@ -3,17 +3,14 @@ variables:
prerequisites:
broken: true
packages: [ "x1-rootfs:$ARCH" ]
packages: [ "goldstone-rootfs:$ARCH" ]
packages:
- name: x1-swi
- name: goldstone-swi
summary: Goldstone Network OS Switch Image (All $ARCH) Platforms)
arch: $ARCH
version: 0.$FNAME_RELEASE_ID
copyright: Copyright 2018 Big Switch Networks
maintainer: support@bigswitch.com
changelog: Initial
support: support@bigswitch.com
maintainer: goldstone-nos@googlegroups.com
files:
builds/$ONL_DEBIAN_SUITE/*.swi : $$PKG_INSTALL/

1
builds/arm64/Makefile Normal file
View File

@@ -0,0 +1 @@
include $(ONL)/make/arch-build.mk

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

View File

@@ -0,0 +1 @@
!include $X1/builds/any/installer/APKG.yml ARCH=arm64 BOOTMODE=installed

View File

@@ -0,0 +1,3 @@
BOOTMODE=INSTALLED
include $(X1)/make/config.arm64.mk
include $(X1)/builds/any/installer/builds/Makefile

View File

@@ -1,3 +1,3 @@
NETDEV=eth0
BOOTMODE=SWI
BOOTMODE=INSTALLED
SWI=images::latest

1
builds/arm64/rootfs/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.d/

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

View File

@@ -0,0 +1 @@
!include $X1/builds/any/rootfs/APKG.yml ARCH=arm64

View File

@@ -0,0 +1,29 @@
- apt-transport-https
- ca-certificates
- dmidecode
- parted
- gdisk
- iptables
- u-boot-tools
- libatomic1
- goldstone-kernel-5.4-lts-arm64-all-modules
- k3s:arm64
- stern:arm64
- gs-tai:arm64
- gs-tai-gearbox:arm64
- gs-tai-dpll:arm64
- gs-mgmt:arm64
- libyang1:arm64
- sysrepo:arm64
- gscli:arm64
- onlp-py3:arm64
- gssystem:arm64
- libtac2:arm64
- libtac2-bin:arm64
- libpam-tacplus:arm64
- libnss-tacplus:arm64
- gsyang:all
- netopeer2:arm64
- gs-snmp:arm64
- gs-xlate-oc:arm64
- python3.10:arm64

1
builds/arm64/rootfs/builds/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
manifest.json

View File

@@ -0,0 +1,53 @@
include $(X1)/make/config.arm64.mk
ifndef PLATFORM_LIST
export PLATFORM_LIST=$(shell onlpm --list-platforms --arch arm64 --csv )
endif
RFS_CONFIG := $(X1)/builds/any/rootfs/rfs.yml
ifndef ARCH
$(error $$ARCH must be specified)
endif
ifndef RFS_CONFIG
$(error $$RFS_CONFIG must be set to the RFS yaml configuration file)
endif
ifndef RFS_WORKDIR
RFS_WORKDIR := $(ONL_DEBIAN_SUITE)
endif
ifndef RFS_DIR
RFS_DIR := $(RFS_WORKDIR)/rootfs-$(ARCH).d
endif
ifndef RFS_SQUASH
RFS_SQUASH := $(RFS_WORKDIR)/rootfs-$(ARCH).sqsh
endif
RFS_COMMAND := $(ONL)/tools/onlrfs.py --config $(RFS_CONFIG) --arch $(ARCH) --dir $(RFS_DIR)
ifdef RFS_CPIO
RFS_COMMAND += --cpio $(RFS_CPIO)
endif
ifdef RFS_SQUASH
RFS_COMMAND += --squash $(RFS_SQUASH)
endif
ifndef RFS_MANIFEST
RFS_MANIFEST := etc/goldstone/rootfs/manifest.json
endif
LOCAL_MANIFEST := $(RFS_WORKDIR)/manifest.json
RFS: clean
$(ONL_V_at) $(RFS_COMMAND)
$(ONL_V_at) [ -f $(RFS_DIR)/$(RFS_MANIFEST) ] && sudo cp $(RFS_DIR)/$(RFS_MANIFEST) $(LOCAL_MANIFEST)
clean:
$(ONL_V_at) sudo rm -rf $(RFS_WORKDIR)
show-packages:
$(ONL_V_at) $(RFS_COMMAND) --show-packages

View File

@@ -0,0 +1,7 @@
source-directory /etc/network/interfaces.d
auto eth0
iface eth0 inet manual
auto swp2
iface swp2 inet dhcp

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

1
builds/arm64/swi/PKG.yml Normal file
View File

@@ -0,0 +1 @@
!include $X1/builds/any/swi/APKG.yml ARCH=arm64

2
builds/arm64/swi/builds/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
manifest.json
zerotouch.json

View File

@@ -0,0 +1,3 @@
ROOTFS_PACKAGE := goldstone-rootfs
include $(X1)/make/config.arm64.mk
include $(ONL)/make/swi.mk

View File

@@ -1,7 +1,26 @@
FROM alpine:latest AS buildx
RUN apk add --no-cache curl jq
RUN curl -sS https://api.github.com/repos/docker/buildx/releases/latest | \
jq -r '.assets[].browser_download_url' | grep linux-amd64 | xargs curl -sSL > /docker-buildx
RUN chmod a+x /docker-buildx
FROM opennetworklinux/builder10:1.2
RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -qy apt-transport-https ed libelf-dev python-pip ca-certificates apt-cacher-ng
RUN apt update --allow-releaseinfo-change && DEBIAN_FRONTEND=noninteractive apt install -qy apt-transport-https ed libelf-dev python-pip ca-certificates apt-cacher-ng curl
# Install Docker cli
RUN wget -O /tmp/docker.tgz https://download.docker.com/linux/static/stable/x86_64/docker-19.03.9.tgz && tar xf /tmp/docker.tgz -C /tmp && mv /tmp/docker/docker /usr/bin/ && rm -rf /tmp/*
COPY --from=docker:20.10 /usr/local/bin/docker /usr/local/bin/
COPY --from=buildx /docker-buildx /usr/local/lib/docker/cli-plugins/
COPY --from=python:3-buster-amd64 /usr/local/bin/python3 /usr/local/bin/python3
COPY --from=python:3-buster-amd64 /usr/local/lib/python3.10 /usr/local/lib/python3.10
COPY --from=python:3-buster-amd64 /usr/local/lib/libpython3.10.so.1.0 /usr/local/lib/libpython3.10.so.1.0
COPY --from=python:3-buster-amd64 /usr/local/include/python3.10 /usr/local/include/python3.10
RUN ldconfig
RUN git clone https://github.com/AgentD/squashfs-tools-ng.git && cd squashfs-tools-ng && ./autogen.sh && ./configure && make && make install && ldconfig
RUN cd /usr/bin/ && curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
RUN curl -LO https://github.com/oras-project/oras/releases/download/v0.12.0/oras_0.12.0_linux_amd64.tar.gz && mkdir -p oras-install/ && tar -zxf oras_0.12.0_*.tar.gz -C oras-install/ && mv oras-install/oras /usr/local/bin/ && rm -rf oras_0.12.0_*.tar.gz oras-install/

2
make/config.arm64.mk Normal file
View File

@@ -0,0 +1,2 @@
include $(ONL)/make/config.arm64.mk
include $(X1)/make/config.mk

11
make/mgmt.mk Normal file
View File

@@ -0,0 +1,11 @@
GS_MGMT_IMAGE_REPO ?= ghcr.io/oopt-goldstone/mgmt
GS_MGMT_VERSION ?= v0.5.0
.PHONY: manifests
manifests:
rm -rf manifests && mkdir manifests
for file in $(wildcard $(TEMPLATE_DIR)/*.yaml);\
do\
sed -e s!IMAGE_TAG!$(IMAGE_TAG)!g -e s!MGMT_IMAGE_REPO!$(GS_MGMT_IMAGE_REPO)!g $$file > manifests/$$(basename $$file);\
done

View File

@@ -2,13 +2,7 @@ packages:
- name: gs-config
arch: all
version: 1.0.0
copyright: Copyright 2020 Wataru Ishida
maintainer: wataru.ishid@gmail.com
changelog: Initial
support: wataru.ishid@gmail.com
maintainer: goldstone-nos@googlegroups.com
summary: Goldstone config file
after-install: $__DIR__/after-install.sh
before-remove: $__DIR__/before-remove.sh
files:
builds/gs-config.service: /etc/systemd/system/
builds/gs_config.sh: /usr/bin/
builds/80.gs-platform-baseconf: /etc/boot.d/

View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -eux
source /etc/profile.d/onl-platform-current.sh # modify $PATH to include onlpdump
if [ -e /etc/goldstone/platform ]; then
PLATFORM=`cat /etc/goldstone/platform`
elif [ -e /etc/onl/platform ]; then
PLATFORM=`cat /etc/onl/platform`
mkdir -p /etc/goldstone
echo $PLATFORM > /etc/goldstone/platform
else
PLATFORM=`onlpdump -o | awk '/Platform Name:/{ print $3 }'`
if [ -z "$PLATFORM" ]; then
echo "no platform detected by onlpdump command"
exit 1
fi
PLATFORM=${PLATFORM//_/-}
mkdir -p /etc/goldstone
echo $PLATFORM > /etc/goldstone/platform
fi
rm -rf /var/lib/goldstone/device/current
if [ ! -d /var/lib/goldstone/device/$PLATFORM ]; then
echo "/var/lib/goldstone/device/$PLATFORM not found"
exit 1
fi
cd /var/lib/goldstone/device && ln -sf $PLATFORM current
if [ -e /var/lib/goldstone/device/current/init.sh ]; then
/var/lib/goldstone/device/current/init.sh
fi

View File

@@ -1,3 +1,4 @@
K3S_DIGEST := 04443fbf9e01ff9c3ad5b1dec8cac0df5885e5aac7e3b79196e35a9023e92e25
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/k3s/builds/Makefile

View File

@@ -1,15 +1 @@
packages:
- name: gs-mgmt
arch: amd64
version: 1.0.0
copyright: Copyright 2020 Wataru Ishida
maintainer: wataru.ishid@gmail.com
changelog: Initial
support: wataru.ishid@gmail.com
summary: Goldstone Management Framework
before-remove: $__DIR__/before-remove.sh
depends:
- k3s
files:
builds/mgmt-amd64.tar: /var/lib/rancher/k3s/agent/images/
builds/manifests: /var/lib/rancher/k3s/server/manifests/mgmt/
!include $X1/packages/base/any/mgmt/APKG.yml ARCH=amd64

View File

@@ -1 +1,7 @@
*.tar
*.deb
*.whl
*.yang
manifests
yang
gs-yang.py

View File

@@ -1,9 +1,2 @@
IMAGE_REPO=docker.io/microsonic
IMAGES=gs-mgmt-debug
TAG=latest
mgmt-amd64.tar: FORCE
DOCKER_BUILDKIT=1 docker build --build-arg BASE=$(IMAGE_REPO)/$(IMAGES):$(TAG) -t $(IMAGES):$(TAG) .
docker save $(foreach image,$(IMAGES), $(image):$(TAG)) > $@
FORCE:
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/mgmt/builds/Makefile

View File

@@ -1,270 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: netopeer2
spec:
selector:
app: gs-mgmt
ports:
- protocol: TCP
port: 830
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: gs-mgmt
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gs-mgmt
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: gs-mgmt
namespace: default
---
apiVersion: batch/v1
kind: Job
metadata:
name: prep-gs-mgmt
spec:
template:
spec:
hostPID: true
restartPolicy: Never
containers:
- name: prep-sysrepo
image: docker.io/microsonic/gs-mgmt-debug:latest
imagePullPolicy: IfNotPresent
command: ['sysrepoctl', '-s', '/var/lib/goldstone/yang/gs', '--install', '/var/lib/goldstone/yang/gs/goldstone-tai.yang,/var/lib/goldstone/yang/gs/goldstone-onlp.yang']
volumeMounts:
- name: shm
mountPath: /dev/shm
- name: sysrepo
mountPath: /var/lib/sysrepo
- name: prep-np
image: docker.io/microsonic/gs-mgmt-netopeer2:latest
imagePullPolicy: IfNotPresent
command: ['sh', '-c', '$NP2/setup.sh && $NP2/merge_hostkey.sh && $NP2/merge_config.sh']
env:
- name: NP2
value: /usr/local/share/netopeer2/scripts
- name: NP2_MODULE_OWNER
value: root
- name: NP2_MODULE_GROUP
value: root
- name: NP2_MODULE_PERMS
value: "600"
- name: NP2_MODULE_DIR
value: /usr/local/share/yang/modules/netopeer2
volumeMounts:
- mountPath: /dev/shm
name: shm
- mountPath: /var/lib/sysrepo
name: sysrepo
volumes:
- name: shm
hostPath:
path: /dev/shm
- name: sysrepo
hostPath:
path: /var/lib/sysrepo
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: gs-mgmt-cli
labels:
app: gs-mgmt
spec:
selector:
matchLabels:
app: gs-mgmt-cli
template:
metadata:
labels:
app: gs-mgmt-cli
spec:
hostPID: true
serviceAccountName: gs-mgmt
initContainers:
- name: wait-prep
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=complete', 'job/prep-gs-mgmt']
containers:
- name: cli
image: docker.io/microsonic/gs-mgmt-debug:latest
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'while true; do gscli -c "transponder; show"; sleep 2; done']
volumeMounts:
- name: shm
mountPath: /dev/shm
- name: sysrepo
mountPath: /var/lib/sysrepo
volumes:
- name: shm
hostPath:
path: /dev/shm
- name: sysrepo
hostPath:
path: /var/lib/sysrepo
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: gs-mgmt-np2
labels:
app: gs-mgmt
spec:
selector:
matchLabels:
app: gs-mgmt-np2
template:
metadata:
labels:
app: gs-mgmt-np2
spec:
hostPID: true
serviceAccountName: gs-mgmt
initContainers:
- name: wait-prep
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=complete', 'job/prep-gs-mgmt']
containers:
- name: netopeer2
image: docker.io/microsonic/gs-mgmt-netopeer2:latest
imagePullPolicy: IfNotPresent
command: ['netopeer2-server', '-d', '-v', '2']
volumeMounts:
- name: shm
mountPath: /dev/shm
- name: sysrepo
mountPath: /var/lib/sysrepo
volumes:
- name: shm
hostPath:
path: /dev/shm
- name: sysrepo
hostPath:
path: /var/lib/sysrepo
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: gs-mgmt-tai
labels:
app: gs-mgmt
spec:
selector:
matchLabels:
app: gs-mgmt-tai
template:
metadata:
labels:
app: gs-mgmt-tai
spec:
hostPID: true
serviceAccountName: gs-mgmt
initContainers:
- name: wait-prep
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=complete', 'job/prep-gs-mgmt']
containers:
- name: tai
image: docker.io/microsonic/gs-mgmt-debug:latest
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'gssouthd-tai --taish-server $TAISH_SERVER_SERVICE_HOST:$TAISH_SERVER_SERVICE_PORT --verbose']
volumeMounts:
- name: shm
mountPath: /dev/shm
- name: sysrepo
mountPath: /var/lib/sysrepo
volumes:
- name: shm
hostPath:
path: /dev/shm
- name: sysrepo
hostPath:
path: /var/lib/sysrepo
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: gs-mgmt-onlp
labels:
app: gs-mgmt
spec:
selector:
matchLabels:
app: gs-mgmt-onlp
template:
metadata:
labels:
app: gs-mgmt-onlp
spec:
hostPID: true
serviceAccountName: gs-mgmt
initContainers:
- name: wait-prep
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=complete', 'job/prep-gs-mgmt']
containers:
- name: onlp
image: docker.io/microsonic/gs-mgmt-debug:latest
imagePullPolicy: IfNotPresent
command: ['gssouthd-onlp']
volumeMounts:
- name: shm
mountPath: /dev/shm
- name: sysrepo
mountPath: /var/lib/sysrepo
- name: onl
mountPath: /etc/onl/
- name: host-lib
mountPath: /lib/x86_64-linux-gnu/libonlp-platform.so
- name: host-i2c-lib
mountPath: /lib/x86_64-linux-gnu/libi2c.so.0
securityContext:
capabilities:
add:
- IPC_OWNER
- IPC_LOCK
volumes:
- name: shm
hostPath:
path: /dev/shm
- name: sysrepo
hostPath:
path: /var/lib/sysrepo
- name: onl
hostPath:
path: /etc/onl
- name: host-lib
hostPath:
path: /lib/x86_64-linux-gnu/libonlp-platform.so
- name: host-i2c-lib
hostPath:
path: /usr/lib/x86_64-linux-gnu/libi2c.so.0.1.1

View File

@@ -0,0 +1,8 @@
bases:
- base
patches:
- north-notif.yaml
- south-onlp.yaml
- south-sonic.yaml
- south-tai.yaml

View File

@@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: north-notif
spec:
template:
spec:
containers:
- name: notif
image: MGMT_IMAGE_REPO/north-notif:IMAGE_TAG

View File

@@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: south-onlp
spec:
template:
spec:
containers:
- name: onlp
image: MGMT_IMAGE_REPO/south-onlp:IMAGE_TAG
volumeMounts:
- name: host-lib
mountPath: /lib/x86_64-linux-gnu/libonlp-platform.so
- name: host-i2c-lib
mountPath: /lib/x86_64-linux-gnu/libi2c.so.0
volumes:
- name: host-lib
hostPath:
path: /lib/x86_64-linux-gnu/libonlp-platform.so
- name: host-i2c-lib
hostPath:
path: /usr/lib/x86_64-linux-gnu/libi2c.so.0

View File

@@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: south-sonic
spec:
template:
spec:
containers:
- name: sonic
image: MGMT_IMAGE_REPO/south-sonic:IMAGE_TAG

View File

@@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: south-tai
spec:
template:
spec:
containers:
- name: tai
image: MGMT_IMAGE_REPO/south-tai:IMAGE_TAG

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

View File

@@ -0,0 +1 @@
!include $X1/packages/base/any/netopeer2/APKG.yml ARCH=amd64

View File

@@ -0,0 +1,4 @@
*.tar
scripts
yang
manifests

View File

@@ -0,0 +1,2 @@
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/netopeer2/builds/Makefile

View File

@@ -0,0 +1 @@
lib

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

View File

@@ -0,0 +1 @@
!include $X1/packages/base/any/python3.10/APKG.yml ARCH=amd64

View File

@@ -0,0 +1,4 @@
libpython3.10.so.1.0
pip
python
python3.10

View File

@@ -0,0 +1,2 @@
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/python3.10/builds/Makefile

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

View File

@@ -0,0 +1 @@
!include $X1/packages/base/any/snmp/APKG.yml ARCH=amd64

View File

@@ -0,0 +1,2 @@
*.tar
manifests

View File

@@ -0,0 +1,2 @@
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/snmp/builds/Makefile

View File

@@ -1,13 +1 @@
packages:
- name: stern
arch: amd64
version: 1.0.0
copyright: Copyright 2020 Wataru Ishida
maintainer: wataru.ishid@gmail.com
changelog: Initial
support: wataru.ishid@gmail.com
summary: stern
depends:
- k3s
files:
builds/stern: /usr/local/bin/stern
!include $X1/packages/base/any/stern/APKG.yml ARCH=amd64

View File

@@ -1 +1,3 @@
stern
LICENSE
*.tar.gz

View File

@@ -1,5 +1,2 @@
STERN_VERSION = 1.11.0
stern:
wget -O $@ https://github.com/wercker/stern/releases/download/$(STERN_VERSION)/stern_linux_amd64
chmod +x $@
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/stern/builds/Makefile

View File

@@ -1,15 +1 @@
packages:
- name: x1-tai
arch: amd64
version: 1.0.0
copyright: Copyright 2020 Wataru Ishida
maintainer: wataru.ishid@gmail.com
changelog: Initial
support: wataru.ishid@gmail.com
summary: TAI shell server
before-remove: $__DIR__/before-remove.sh
depends:
- k3s
files:
builds/tai-amd64.tar: /var/lib/rancher/k3s/agent/images/
builds/manifests: /var/lib/rancher/k3s/server/manifests/tai/
!include $X1/packages/base/any/tai/APKG.yml ARCH=amd64 DEFAULT_LIBTAI=libtai-mux.so.0.5.0

View File

@@ -1,9 +1,19 @@
IMAGE_REPO=docker.io/microsonic
IMAGES=tai
TAG=latest
TAI_LIBS := libtai-ldc.so.0.5.0 libtai-menara.so.0.5.0 libtai-lumentum.so.0.5.0 libtai-mux.so.0.5.0 libtai-aco.so.0.1.0
tai-amd64.tar: FORCE
DOCKER_BUILDKIT=1 docker build --build-arg BASE=$(IMAGE_REPO)/$(IMAGES):$(TAG) -t $(IMAGES):$(TAG) .
docker save $(foreach image,$(IMAGES), $(image):$(TAG)) > $@
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/tai/builds/Makefile
FORCE:
$(LIB_DIR)/libtai-ldc.so.0.5.0:
oras pull -o $(LIB_DIR) ghcr.io/oopt-goldstone/libtai-ldc:0.5.0@sha256:34b7953204e2c92988bd20c4f4c40db5a8ebe628b2820b5d28ac71f1672f6c1b
$(LIB_DIR)/libtai-menara.so.0.5.0:
oras pull -o $(LIB_DIR) ghcr.io/oopt-goldstone/libtai-menara:0.5.0@sha256:d18a51440b9b98a8a029e7bfced83c68d363bfeca4c2a52ec53a98ff22695420
$(LIB_DIR)/libtai-lumentum.so.0.5.0:
oras pull -o $(LIB_DIR) ghcr.io/oopt-goldstone/libtai-lumentum:0.5.0@sha256:4273a2ed6ae1b80b568c20487faea391c07e12ff849a882ecd8d11c3fcff7b67
$(LIB_DIR)/libtai-mux.so.0.5.0:
oras pull -o $(LIB_DIR) ghcr.io/oopt-goldstone/libtai-mux:0.5.0@sha256:c72f6d40d57f14823471b2a472ff2216525d7d8b347b33625a01911d8cd26028
$(LIB_DIR)/libtai-aco.so.0.1.0:
oras pull -o $(LIB_DIR) ghcr.io/oopt-goldstone/libtai-aco:0.1.0@sha256:e27e154de9adcd8be8e80bf453d4bbac63179a7bddc7a1b05451c446963419c3

View File

@@ -0,0 +1,5 @@
bases:
- base
patches:
- tai.yaml

View File

@@ -1,38 +1,15 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: tai
name: tai
spec:
replicas: 1
selector:
matchLabels:
app: tai
strategy:
type: Recreate
template:
metadata:
labels:
app: tai
spec:
containers:
- name: taish-server
image: tai:latest
imagePullPolicy: IfNotPresent
command: ['taish_server']
env:
- name: LD_LIBRARY_PATH
value: "/var/lib/tai"
volumeMounts:
- name: host-libtai
mountPath: /var/lib/tai
- name: tai-config
mountPath: /etc/tai
securityContext:
privileged: true
volumes:
- name: host-libtai
hostPath:
path: /var/lib/tai/
- name: tai-config
hostPath:
path: /var/lib/goldstone/device/current/tai
template:
spec:
containers:
- name: taish-server
image: tai-amd64:latest
env:
- name: TAI_MUX_PLATFORM_ADAPTER
value: "exec"
- name: TAI_MUX_EXEC_SCRIPT
value: "/etc/tai/mux/exec.py"

View File

@@ -0,0 +1,13 @@
[Unit]
Description=TAI shell service
[Service]
Type=oneshot
ExecStartPre=/bin/sh -c 'while [ true ]; do ( kubectl get nodes | grep " Ready" ) && exit 0; sleep 1; done'
ExecStart=/usr/bin/tai.sh start
ExecStop=-/usr/bin/tai.sh stop
RemainAfterExit=true
StandardOutput=journal
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,25 @@
#!/bin/bash
set -eux
start() {
kubectl apply -f /var/lib/tai/k8s
kubectl wait --for=condition=ready pod/$(kubectl get pod -l app=tai -o jsonpath='{.items[0].metadata.name}')
}
stop() {
pod=$(kubectl get pod -l app=tai -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo 'dummy' )
kubectl delete --ignore-not-found -f /var/lib/tai/k8s
kubectl delete --ignore-not-found pods/$pod
}
case "$1" in
start|stop)
$1
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
;;
esac

View File

@@ -1,19 +1,18 @@
packages:
- name: x1-usonic
- name: gs-usonic
arch: amd64
version: 1.0.0
copyright: Copyright 2020 Wataru Ishida
maintainer: wataru.ishid@gmail.com
changelog: Initial
support: wataru.ishid@gmail.com
maintainer: goldstone-nos@googlegroups.com
summary: usonic
after-install: $__DIR__/after-install.sh
after-remove: $__DIR__/after-remove.sh
before-remove: $__DIR__/before-remove.sh
depends:
- k3s
- jq
files:
builds/usonic-amd64.tar: /var/lib/rancher/k3s/agent/images/
builds/manifests: /var/lib/rancher/k3s/server/manifests/usonic
builds/load-usonic-config.service: /etc/systemd/system/
builds/load_usonic_config.sh: /usr/bin/
builds/manifests: /var/lib/usonic/k8s
builds/usonic.service: /etc/systemd/system/
builds/usonic.sh: /usr/bin/
builds/bcm.sh: /etc/profile.d/
builds/libsai.so: /var/lib/usonic/lib/

View File

@@ -2,6 +2,4 @@
set -eux
systemctl enable load-usonic-config.service
mkdir -p /var/lib/usonic/redis
systemctl enable usonic.service

View File

@@ -2,5 +2,5 @@
set -eux
systemctl disable load-usonic-config.service
kubectl delete -f /var/lib/rancher/k3s/server/manifests/usonic/ || true
systemctl disable usonic.service
usonic.sh stop || true

View File

@@ -1,6 +1,21 @@
USONIC_IMAGE_REPO=docker.io/microsonic
USONIC_IMAGES=usonic-debug:201904 usonic-cli:latest
USONIC_IMAGES ?= docker.io/nlpldev/usonic-cli:latest ghcr.io/microsonic/usonic-debug:201904 docker.io/bitnami/kubectl:latest
BCMD_IMAGE ?= bcmd:latest
usonic-amd64.tar: Makefile
$(foreach image,$(USONIC_IMAGES), docker pull $(USONIC_IMAGE_REPO)/$(image);)
docker save $(foreach image,$(USONIC_IMAGES), $(USONIC_IMAGE_REPO)/$(image)) > $@
SAI_VERSION := 20220524
.PHONY: bcmd
all: usonic-amd64.tar libsai.so
usonic-amd64.tar: Makefile bcmd
for image in $(USONIC_IMAGES);\
do \
docker pull $$image; \
done
docker save $(USONIC_IMAGES) $(BCMD_IMAGE) > $@
libsai.so:
oras pull ghcr.io/oopt-goldstone/libsai:$(SAI_VERSION)
bcmd:
cd bcmd && DOCKER_BUILDKIT=1 docker build --progress plain -t $(BCMD_IMAGE) .

View File

@@ -0,0 +1 @@
alias bcmsh='k exec -it deploy/usonic-core -c syncd -- socat unix-connect:/run/sswsyncd/sswsyncd.socket -'

View File

@@ -0,0 +1,13 @@
# syntax=docker/dockerfile:1.3
FROM python:3-slim AS builder
RUN pip install --upgrade pip
RUN pip install wheel grpcio-tools grpclib
RUN --mount=type=bind,target=/src,rw cd /src && python -m grpc_tools.protoc -Iproto --python_out=. --python_grpc_out=. ./proto/bcmd/bcmd.proto && mkdir /dist && pip wheel . -w /dist && find /dist
FROM python:3-slim
ADD https://sonicstorage.blob.core.windows.net/packages/20190307/bcmcmd?sv=2015-04-05&sr=b&sig=sUdbU7oVbh5exbXXHVL5TDFBTWDDBASHeJ8Cp0B0TIc%3D&se=2038-05-06T22%3A34%3A19Z&sp=r /usr/bin/bcmcmd
RUN chmod +x /usr/bin/bcmcmd
RUN --mount=type=bind,from=builder,source=/dist,target=/dist pip install /dist/*.whl

View File

@@ -0,0 +1,3 @@
bcmd_grpc.py
bcmd_pb2.py
__pycache__

View File

@@ -0,0 +1,25 @@
import asyncio
import argparse
from grpclib.client import Channel
from bcmd import bcmd_pb2
from bcmd import bcmd_grpc
def main():
parser = argparse.ArgumentParser()
parser.add_argument("command", nargs="*")
args = parser.parse_args()
async def _main():
async with Channel('127.0.0.1', 50051) as ch:
bcm = bcmd_grpc.BCMDStub(ch)
reply = await bcm.Exec(bcmd_pb2.ExecRequest(command=" ".join(args.command)))
print(reply.response)
asyncio.run(_main())
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,107 @@
import logging
import subprocess
import argparse
from parse import *
from bcmd import bcmd_pb2
from bcmd import bcmd_grpc
import asyncio
from grpclib.utils import graceful_exit
from grpclib.server import Server
logger = logging.getLogger(__name__)
def bcmcmd(cmds):
logger.debug(f"cmds: {cmds}")
output = subprocess.run(["bcmcmd", cmds], capture_output=True)
output = "\n".join(line for line in output.stdout.decode().split("\r\n"))
logger.debug(f"output: {output}")
return output
class BCMd(bcmd_grpc.BCMDBase):
async def Exec(self, stream):
request = await stream.recv_message()
response = bcmcmd(request.command)
await stream.send_message(bcmd_pb2.ExecResponse(response=response))
def get_temp():
output = bcmcmd("show temp")
data = []
for line in output.split("\n"):
result = parse("temperature monitor {:d}: current= {:f}, peak= {:f}", line)
if result:
data.append((result[1], result[2]))
return data
async def temp_loop(output_dir, interval):
while True:
d = get_temp()
for i, v in enumerate(d):
with open(f"{output_dir}/temp{i+1}_current", "w") as f:
f.write(f"{int(v[0] * 1000)}\n")
with open(f"{output_dir}/temp{i+1}_peak", "w") as f:
f.write(f"{int(v[1] * 1000)}\n")
with open(f"{output_dir}/temp_avg", "w") as f:
v = sum(v[0] * 1000 for v in d) // len(d)
f.write(f"{int(v)}\n")
with open(f"{output_dir}/temp_max_peak", "w") as f:
v = max(v[1] * 1000 for v in d)
f.write(f"{int(v)}\n")
await asyncio.sleep(interval)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--init-soc")
parser.add_argument("--temp-capture-interval", default=10)
parser.add_argument("--temp-output-dir", default="/run/bcm")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
output_dir = args.temp_output_dir
interval = args.temp_capture_interval
fmt = "%(levelname)s %(module)s %(funcName)s l.%(lineno)d | %(message)s"
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format=fmt)
for noisy in ["parse", "hpack"]:
l = logging.getLogger(noisy)
l.setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.INFO, format=fmt)
if args.init_soc:
print(bcmcmd(args.init_soc))
async def _main():
server = Server([BCMd()])
with graceful_exit([server]):
await server.start("0.0.0.0", "50051")
tasks = [
asyncio.create_task(server.wait_closed()),
asyncio.create_task(temp_loop(output_dir, interval)),
]
done, pending = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
logger.debug(f"done: {done}, pending: {pending}")
for task in done:
e = task.exception()
if e:
raise e
asyncio.run(_main())
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,15 @@
syntax = "proto3";
package bcmd;
service BCMD {
rpc Exec(ExecRequest) returns (ExecResponse);
}
message ExecRequest {
string command = 1;
}
message ExecResponse {
string response = 1;
}

View File

@@ -0,0 +1,3 @@
parse
grpclib
protobuf

View File

@@ -0,0 +1,18 @@
import setuptools
with open("requirements.txt", "r") as f:
install_requires = f.read().split()
setuptools.setup(
name="bcmd",
version="0.1.0",
install_requires=install_requires,
entry_points={
"console_scripts": [
"bcmd = bcmd.server:main",
"bcmsh = bcmd.client:main",
],
},
packages=["bcmd"],
zip_safe=False,
)

View File

@@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: cli
image: docker.io/microsonic/usonic-cli:latest
image: docker.io/nlpldev/usonic-cli:latest
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'sleep 100000']
volumeMounts:
@@ -21,7 +21,11 @@ spec:
name: sonic-db-config
- name: sonic-version
configMap:
name: sonic-version
name: usonic-config
items:
- key: sonic_version.yml
path: sonic_version.yml
- name: usonic-config
configMap:
name: usonic-config
terminationGracePeriodSeconds: 0

View File

@@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: redis
image: docker.io/microsonic/usonic-debug:201904
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ["redis-server", "/etc/redis/redis.conf"]
volumeMounts:

View File

@@ -6,7 +6,7 @@ data:
commit_id: '443ccce'
build_date: Fri Sep 27 23:52:52 UTC 2019
build_number: 0
built_by: wataru@x1builder
built_by: goldstone-nos@googlegroups.com
debian_version: 8.11
kernel_version: 4.9.0-9-2-amd64
kind: ConfigMap

View File

@@ -3,19 +3,12 @@ kind: ConfigMap
metadata:
name: start-script
data:
prepvs.sh: |
#!/bin/sh
for index in $(seq 2);
do
ip link add vEthernet$index type veth peer name veth$index
ip link set up dev vEthernet$index
ip link set up dev veth$index
done
start.sh: |
#!/bin/sh
set -eux
REDIS_HOST='-h redis.default.svc.cluster.local'
# REDIS_HOST='-h 127.0.0.1'
redis-cli ${REDIS_HOST} flushall
@@ -23,5 +16,3 @@ data:
do
redis-cli -n 3 ${REDIS_HOST} hset $daemon LOGOUTPUT STDERR;
done
$(dirname $0)/prepvs.sh

View File

@@ -1,22 +1,52 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: usonic
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: usonic
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: usonic
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic
name: usonic-core
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: core
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: core
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: init-loglevel
image: docker.io/microsonic/usonic-debug:201904
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['/var/run/start/start.sh']
volumeMounts:
@@ -26,7 +56,7 @@ spec:
capabilities:
add: ["NET_ADMIN"]
- name: init-configdb
image: docker.io/microsonic/usonic-cli:latest
image: docker.io/nlpldev/usonic-cli:latest
imagePullPolicy: IfNotPresent
command: ['sonic-cfggen', '-k', 'dummy', '-p', '/etc/usonic/port_config.ini', '-j', '/etc/sonic/config_db.json', '--write-to-db']
volumeMounts:
@@ -37,17 +67,17 @@ spec:
- name: usonic-config
mountPath: /etc/usonic/
- name: complete-init
image: docker.io/microsonic/usonic-debug:201904
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['redis-cli', '-h', 'redis.default.svc.cluster.local', '-n', '4', 'SET', 'CONFIG_DB_INITIALIZED', '1']
containers:
- name: syncd
image: docker.io/microsonic/usonic-debug:201904
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'service syslog-ng start && dsserve /usr/bin/syncd -p /etc/sonic/sai.profile -d']
env:
- name: LD_LIBRARY_PATH
value: "/var/lib/current:/var/lib/usonic"
value: "/var/lib/current:/var/lib/usonic/lib"
volumeMounts:
- name: usonic-config
mountPath: /etc/sonic/
@@ -62,7 +92,7 @@ spec:
- name: bcmsh-sock
mountPath: /var/run/sswsyncd
- name: host-libsai
mountPath: /var/lib/usonic
mountPath: /var/lib/usonic/lib
- name: current
mountPath: /var/lib/current
securityContext:
@@ -70,58 +100,29 @@ spec:
capabilities:
add: ["NET_ADMIN"]
- name: orchagent
image: docker.io/microsonic/usonic-debug:201904
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'sleep 5 && gdb -ex "run" -ex "bt" --args orchagent -m $GLOBAL_MAC_ADDRESS']
command: ['orchagent']
args: ['-m', '$(GLOBAL_MAC_ADDRESS)']
env:
- name: GLOBAL_MAC_ADDRESS
value: "72:16:EA:18:79:F9"
valueFrom:
configMapKeyRef:
name: usonic-config
key: macaddress
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
- name: portsyncd
image: docker.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'sleep 8 && gdb -ex "run" -ex "bt" portsyncd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
- name: neighsyncd
image: docker.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'sleep 8 && gdb -ex "run" -ex "bt" neighsyncd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
- name: vlanmgrd
image: docker.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'mount -o remount,rw /sys && sleep 8 && gdb -ex "run" -ex "bt" vlanmgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
privileged: true
- name: intfmgrd
image: docker.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'sleep 8 && gdb -ex "run" -ex "bt" intfmgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
capabilities:
add: ["NET_ADMIN"]
startupProbe:
exec:
command:
- grep
- SAI_SWITCH_ATTR_NUMBER_OF_ACTIVE_PORTS
- sairedis.rec
failureThreshold: 60 # 60 * 2 = 120sec for SAI initialization
periodSeconds: 2
volumes:
- name: redis-sock
hostPath:
@@ -141,19 +142,559 @@ spec:
- name: usonic-config
configMap:
name: usonic-config
- name: config-db-json
configMap:
name: usonic-config
items:
- key: config_db.json
path: config_db.json
- name: sonic-db-config
configMap:
name: sonic-db-config
- name: config-db-json
configMap:
name: config-db-json
- name: warm-boot
emptyDir: {}
- name: bcmsh-sock
emptyDir: {}
hostPath:
path: /var/run/sswsyncd/
- name: host-libsai
hostPath:
path: /var/lib/usonic/
path: /var/lib/usonic/lib
- name: current
hostPath:
path: /var/lib/goldstone/device/current/usonic
---
apiVersion: v1
kind: Service
metadata:
name: bcmd
spec:
selector:
app: usonic
usonic: bcm
ports:
- protocol: TCP
port: 50051
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-bcm
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: bcm
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: bcm
spec:
serviceAccountName: usonic
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-core', '--timeout=5m']
containers:
- name: bcmd
image: bcmd:latest
imagePullPolicy: IfNotPresent
env:
- name: LED_PROC_INIT_SOC
value: "/var/lib/current/led_proc_init.soc"
command: ['bcmd']
args: ['--init-soc', '$(LED_PROC_INIT_SOC)']
volumeMounts:
- name: bcmsh-sock
mountPath: /var/run/sswsyncd
- name: current
mountPath: /var/lib/current
- name: bcmrun
mountPath: /run/bcm
volumes:
- name: bcmsh-sock
hostPath:
path: /var/run/sswsyncd/
- name: current
hostPath:
path: /var/lib/goldstone/device/current/usonic
- name: bcmrun
hostPath:
path: /run/bcm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-port
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: port
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: port
spec:
serviceAccountName: usonic
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-bcm', '--timeout=5m']
containers:
- name: portsyncd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['portsyncd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
capabilities:
add: ["NET_ADMIN"]
# TODO: need more research to use this. enabling startupProbe makes
# state change to 'ready' really slow.
#
# startupProbe:
# exec:
# command:
# - bash
# - -c
# - 'redis-cli exists PORT_TABLE:PortInitDone | grep 1'
# failureThreshold: 5 # 5sec for Port initialization
# periodSeconds: 1
livenessProbe:
exec:
command:
- bash
- -c
- 'redis-cli -h redis.default.svc.cluster.local exists PORT_TABLE:PortInitDone | grep 1'
failureThreshold: 1
periodSeconds: 10
# using this instead of startupProbe for now
initialDelaySeconds: 5
volumes:
- name: redis-sock
hostPath:
path: /var/lib/usonic/redis/redis.sock
type: Socket
- name: sonic-db-config
configMap:
name: sonic-db-config
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-neighbor
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: neighbor
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: neighbor
spec:
serviceAccountName: usonic
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-port', '--timeout=5m']
containers:
- name: neighsyncd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['neighsyncd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
capabilities:
add: ["NET_ADMIN"]
volumes:
- name: redis-sock
hostPath:
path: /var/lib/usonic/redis/redis.sock
type: Socket
- name: sonic-db-config
configMap:
name: sonic-db-config
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-frr
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: frr
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: frr
spec:
hostNetwork: true
serviceAccountName: usonic
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-core', '--timeout=5m']
containers:
- name: fpmsyncd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['fpmsyncd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: frr
mountPath: /var/run/frr/
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
- name: zebra
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['/usr/lib/frr/zebra', '-A', '127.0.0.1', '-s', '90000000', '-M', 'fpm']
volumeMounts:
- name: frr
mountPath: /var/run/frr/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
- name: staticd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['/usr/lib/frr/staticd', '-A', '127.0.0.1']
volumeMounts:
- name: frr
mountPath: /var/run/frr/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
- name: bgpd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['/usr/lib/frr/bgpd', '-A', '127.0.0.1']
volumeMounts:
- name: frr
mountPath: /var/run/frr/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
volumes:
- name: redis-sock
hostPath:
path: /var/lib/usonic/redis/redis.sock
type: Socket
- name: frr
emptyDir: {}
- name: sonic-db-config
configMap:
name: sonic-db-config
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-teamd
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: teamd
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: teamd
spec:
hostNetwork: true
serviceAccountName: usonic
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-port', '--timeout=5m']
containers:
- name: teamsyncd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['teamsyncd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: teamd
mountPath: /var/run/teamd/
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
- name: teammgrd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['teammgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: teamd
mountPath: /var/run/teamd/
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
volumes:
- name: redis-sock
hostPath:
path: /var/lib/usonic/redis/redis.sock
type: Socket
- name: teamd
emptyDir: {}
- name: sonic-db-config
configMap:
name: sonic-db-config
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-lldpd
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: lldpd
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: lldpd
spec:
hostNetwork: true
serviceAccountName: usonic
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-core', '--timeout=5m']
containers:
- name: lldpd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['/usr/sbin/lldpd', '-d', '-I', 'Ethernet*,eth0', '-C', 'eth0']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: lldpd
mountPath: /var/run/
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
# - name: lldpsyncd
# image: ghcr.io/microsonic/usonic-debug:201904
# imagePullPolicy: IfNotPresent
# command: ['/usr/bin/env', 'python2', '-m', 'lldp_syncd']
# volumeMounts:
# - name: redis-sock
# mountPath: /var/run/redis/redis.sock
# - name: lldpd
# mountPath: /var/run/
# - name: sonic-db-config
# mountPath: /var/run/redis/sonic-db/
# securityContext:
# privileged: true
# capabilities:
# add: ["NET_ADMIN"]
# - name: lldpmgrd
# image: ghcr.io/microsonic/usonic-debug:201904
# imagePullPolicy: IfNotPresent
# command: ['/usr/bin/lldpmgrd']
# volumeMounts:
# - name: redis-sock
# mountPath: /var/run/redis/redis.sock
# - name: lldpd
# mountPath: /var/run/
# - name: sonic-db-config
# mountPath: /var/run/redis/sonic-db/
# securityContext:
# privileged: true
# capabilities:
# add: ["NET_ADMIN"]
volumes:
- name: redis-sock
hostPath:
path: /var/lib/usonic/redis/redis.sock
type: Socket
- name: lldpd
emptyDir: {}
- name: sonic-db-config
configMap:
name: sonic-db-config
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: usonic-mgrd
labels:
app: usonic
spec:
replicas: 1
selector:
matchLabels:
app: usonic
usonic: mgrd
strategy:
type: Recreate
template:
metadata:
labels:
app: usonic
usonic: mgrd
spec:
serviceAccountName: usonic
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- name: wait-core
image: docker.io/bitnami/kubectl:latest
imagePullPolicy: IfNotPresent
command: ['kubectl', 'wait', '--for=condition=available', 'deploy/usonic-port', '--timeout=5m']
- name: swssconfig
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['swssconfig']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
containers:
- name: vlanmgrd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'mount -o remount,rw /sys && vlanmgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
privileged: true
- name: intfmgrd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['intfmgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
capabilities:
add: ["NET_ADMIN"]
- name: portmgrd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['portmgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
capabilities:
add: ["NET_ADMIN"]
- name: nbrmgrd
image: ghcr.io/microsonic/usonic-debug:201904
imagePullPolicy: IfNotPresent
command: ['nbrmgrd']
volumeMounts:
- name: redis-sock
mountPath: /var/run/redis/redis.sock
- name: sonic-db-config
mountPath: /var/run/redis/sonic-db/
securityContext:
capabilities:
add: ["NET_ADMIN"]
volumes:
- name: redis-sock
hostPath:
path: /var/lib/usonic/redis/redis.sock
type: Socket
- name: sonic-db-config
configMap:
name: sonic-db-config

View File

@@ -0,0 +1,13 @@
[Unit]
Description=uSONiC
[Service]
Type=oneshot
ExecStartPre=/bin/sh -c 'while [ true ]; do ( kubectl get nodes | grep " Ready" ) && exit 0; sleep 1; done'
ExecStart=/usr/bin/usonic.sh start
ExecStop=-/usr/bin/usonic.sh stop
RemainAfterExit=true
StandardOutput=journal
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,62 @@
#!/bin/bash
set -eux
# For SONiC hostname should be "localhost", so its hardcoded below.
# Seems some places of SONiC has been hardcoded with hostname as "localhost",
# so vlanmgrd container fails to start when hostname is different.
#
# Even on official SONiC builds, system hostname and hostname in Redis are
# different
create_config_db() {
exec 3<<< $(jq -n "{DEVICE_METADATA: {localhost: {hwsku: \"$(cat /etc/goldstone/platform)\",\
mac: \"$(cat /sys/class/net/eth0/address)\",\
platform: \"$(cat /etc/goldstone/platform)\"}}}")
}
create_sonic_version() {
exec 4<<< "build_version: $(jq .version.PRODUCT_ID_VERSION /etc/goldstone/rootfs/manifest.json)
asic_type: broadcom
commit_id: $(jq .version.BUILD_SHORT_SHA1 /etc/goldstone/rootfs/manifest.json)
build_date: $(jq .version.BUILD_TIMESTAMP /etc/goldstone/rootfs/manifest.json)
build_number: 0
built_by: '-'
debian_version: $(jq '."os-release".VERSION' /etc/goldstone/rootfs/manifest.json)
kernel_version: $(uname -r)
"
}
start() {
mkdir -p /var/lib/usonic/redis
create_config_db
create_sonic_version
kubectl create configmap usonic-config \
--from-file=config_db.json=/dev/fd/3 \
--from-file=sonic_version.yml=/dev/fd/4 \
--from-file="/var/lib/goldstone/device/current/usonic" \
--from-literal=macaddress="$(cat /sys/class/net/eth0/address)" \
--dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f /var/lib/usonic/k8s
kubectl wait --for=condition=available deploy/usonic-core --timeout=5m
for pod in $(kubectl get pod -l app=usonic -o jsonpath='{.items[*].metadata.name}'); do
kubectl wait --for=condition=ready pod/$pod --timeout 5m
done
}
stop() {
kubectl delete -f /var/lib/usonic/k8s
kubectl delete configmap usonic-config
rm -rf /var/lib/usonic/redis
}
case "$1" in
start|stop)
$1
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
;;
esac

View File

@@ -0,0 +1 @@
include $(ONL)/make/pkg.mk

View File

@@ -0,0 +1 @@
!include $X1/packages/base/any/xlate-oc/APKG.yml ARCH=amd64

View File

@@ -0,0 +1,2 @@
*.tar
manifests

View File

@@ -0,0 +1,2 @@
include $(X1)/make/config.amd64.mk
include $(X1)/packages/base/any/xlate-oc/builds/Makefile

View File

@@ -0,0 +1,22 @@
prerequisites:
packages:
- x1-loader-initrd:$ARCH
stage: 3
common:
arch: $ARCH
version: 1.0.0
copyright: Copyright 2013, 2014, 2015 Big Switch Networks
maintainer: support@bigswitch.com
support: opennetworklinux@googlegroups.com
packages:
- name: x1-loader-fit
version: 1.0.0
summary: X1 FIT Loader Image for $ARCH
files:
builds/x1-loader-fit.itb : $$PKG_INSTALL/
builds/manifest.json : $$PKG_INSTALL/
changelog: Change changes changes.,

View File

@@ -0,0 +1,14 @@
ifndef ARCH
$(error $$ARCH must be set)
endif
.PHONY: x1-loader-fit.itb x1-loader-fit.its
x1-loader-fit.itb: its
$(ONL)/tools/flat-image-tree.py --initrd x1-loader-initrd:$(ARCH),x1-loader-initrd-$(ARCH).cpio.gz --arch $(ARCH) --add-platform initrd --itb $@
$(ONLPM) --copy-file x1-loader-initrd:$(ARCH) manifest.json .
x1-loader-fit.its:
$(ONL)/tools/flat-image-tree.py --initrd x1-loader-initrd:$(ARCH),x1-loader-initrd-$(ARCH).cpio.gz --arch $(ARCH) --add-platform initrd --its $@
its: x1-loader-fit.its

View File

@@ -2,16 +2,14 @@ packages:
- name: k3s
arch: $ARCH
version: 1.0.0
copyright: Copyright 2020 Wataru Ishida
maintainer: wataru.ishid@gmail.com
changelog: Initial
support: wataru.ishid@gmail.com
maintainer: goldstone-nos@googlegroups.com
summary: k3s
after-install: $X1/packages/base/any/k3s/after-install.sh
build-depends:
- apt-transport-https
- ca-certificates
files:
builds/k3s-airgap-images-$ARCH.tar: /var/lib/rancher/k3s/agent/images/
builds/k3s-airgap-images-$ARCH.tar.gz: /var/lib/rancher/k3s/agent/images/
builds/k3s: /usr/local/bin/
builds/install.sh: /usr/share/k3s/
$X1/packages/base/any/k3s/builds/k3s-gs-killall.sh: /usr/local/bin/
$X1/packages/base/any/k3s/builds/prep-k3s.service: /etc/systemd/system/

View File

@@ -661,6 +661,7 @@ ExecStartPre=-/sbin/modprobe overlay
ExecStartPost=/bin/sh -c 'while [ true ]; do ( kubectl get nodes | grep " Ready" ) && exit 0; sleep 1; done'
ExecStart=${BIN_DIR}/k3s \\
${CMD_K3S_EXEC}
--bind-address 127.0.0.1 --advertise-address 127.0.0.1 --node-ip 127.0.0.1 --flannel-iface lo --disable local-storage --disable metrics-server --disable traefik --disable-cloud-controller --kubelet-arg 'address=127.0.0.1'
EOF
}
@@ -725,6 +726,7 @@ get_installed_hashes() {
# --- enable and start systemd service ---
systemd_enable() {
info "systemd: Enabling ${SYSTEM_NAME} unit"
$SUDO systemctl enable prep-k3s.service >/dev/null
$SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null
# $SUDO systemctl daemon-reload >/dev/null
}

View File

@@ -1,14 +1,7 @@
K3S_VERSION = v1.18.3
all: k3s-airgap-images-$(ARCH).tar k3s install.sh
k3s-airgap-images-$(ARCH).tar:
wget -O $@ https://github.com/rancher/k3s/releases/download/$(K3S_VERSION)%2Bk3s1/k3s-airgap-images-$(ARCH).tar
all: k3s k3s-airgap-images-$(ARCH).tar.gz
k3s:
wget -O $@ https://github.com/rancher/k3s/releases/download/$(K3S_VERSION)%2Bk3s1/k3s
chmod +x $@
oras pull ghcr.io/oopt-goldstone/k3s:1.22.2@sha256:$(K3S_DIGEST)
tar xvf k3s.tar
install.sh:
wget -O $@ https://raw.githubusercontent.com/rancher/k3s/master/install.sh
chmod +x $@
k3s-airgap-images-$(ARCH).tar.gz: k3s

View File

@@ -0,0 +1,68 @@
#!/bin/sh
[ $(id -u) -eq 0 ] || exec sudo $0 $@
for bin in /var/lib/rancher/k3s/data/**/bin/; do
[ -d $bin ] && export PATH=$PATH:$bin:$bin/aux
done
set -x
pschildren() {
ps -e -o ppid= -o pid= | \
sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \
grep -w "^$1" | \
cut -f2
}
pstree() {
for pid in $@; do
echo $pid
for child in $(pschildren $pid); do
pstree $child
done
done
}
killtree() {
kill -9 $(
{ set +x; } 2>/dev/null;
pstree $@;
set -x;
) 2>/dev/null
}
getshims() {
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
}
killtree $({ set +x; } 2>/dev/null; getshims; set -x)
do_unmount() {
{ set +x; } 2>/dev/null
MOUNTS=
while read ignore mount ignore; do
MOUNTS="$mount\n$MOUNTS"
done </proc/self/mounts
MOUNTS=$(printf $MOUNTS | grep "^$1" | sort -r)
if [ -n "${MOUNTS}" ]; then
set -x
umount ${MOUNTS}
else
set -x
fi
}
do_unmount '/run/k3s'
do_unmount '/var/lib/rancher/k3s'
do_unmount '/var/lib/kubelet/pods'
do_unmount '/run/netns/cni-'
# Delete network interface(s) that match 'master cni0'
ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do
iface=${iface%%@*}
[ -z "$iface" ] || ip link delete $iface
done
ip link delete cni0
ip link delete flannel.1
rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore

Some files were not shown because too many files have changed in this diff Show More