mirror of
https://github.com/Telecominfraproject/OpenNetworkLinux.git
synced 2025-12-25 17:27:01 +00:00
Merge branch 'master' of github.com:opencomputeproject/OpenNetworkLinux
This commit is contained in:
5
.gitignore
vendored
5
.gitignore
vendored
@@ -7,6 +7,7 @@ dependmodules.x
|
||||
*.cpio.gz
|
||||
*.sqsh
|
||||
*.pyc
|
||||
*.pyo
|
||||
|
||||
# Package cache and lock files
|
||||
.lock
|
||||
@@ -18,3 +19,7 @@ RELEASE/
|
||||
|
||||
.bash_history
|
||||
.buildroot-ccache
|
||||
|
||||
# temporary files
|
||||
*~
|
||||
.#*
|
||||
|
||||
@@ -275,6 +275,15 @@ partition_gpt()
|
||||
installer_standard_gpt_install()
|
||||
{
|
||||
DEV=$1; shift
|
||||
|
||||
if [ -z $DEV ]; then
|
||||
# Install NOS to the same block device as ONIE image
|
||||
DEV=$(blkid | grep ONIE-BOOT | awk '{print $1}' | sed -e 's/[1-9][0-9]*:.*$//' | sed -e 's/\([0-9]\)\(p\)/\1/' | head -n 1)
|
||||
[ -b "$DEV" ] || {
|
||||
echo "Error: Unable to determine the block device to install NOS"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
visit_parted $DEV do_handle_disk do_handle_partitions || return 1
|
||||
partition_gpt $(get_free_space) || return 1
|
||||
|
||||
1
builds/amd64/installer/new-hotness/Makefile
Normal file
1
builds/amd64/installer/new-hotness/Makefile
Normal file
@@ -0,0 +1 @@
|
||||
include $(ONL)/make/pkg.mk
|
||||
2
builds/amd64/installer/new-hotness/PKG.yml
Normal file
2
builds/amd64/installer/new-hotness/PKG.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
!include $ONL/builds/any/installer/new-hotness/APKG.yml ARCH=amd64
|
||||
|
||||
1
builds/amd64/installer/new-hotness/builds/.gitignore
vendored
Normal file
1
builds/amd64/installer/new-hotness/builds/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*INSTALLER
|
||||
2
builds/amd64/installer/new-hotness/builds/Makefile
Normal file
2
builds/amd64/installer/new-hotness/builds/Makefile
Normal file
@@ -0,0 +1,2 @@
|
||||
include $(ONL)/make/config.amd64.mk
|
||||
include $(ONL)/builds/any/installer/new-hotness/grub/builds/Makefile
|
||||
4
builds/amd64/installer/new-hotness/builds/boot-config
Normal file
4
builds/amd64/installer/new-hotness/builds/boot-config
Normal file
@@ -0,0 +1,4 @@
|
||||
NETDEV=ma1
|
||||
NETAUTO=dhcp
|
||||
BOOTMODE=SWI
|
||||
SWI=images::latest
|
||||
35
builds/any/installer/new-hotness/APKG.yml
Normal file
35
builds/any/installer/new-hotness/APKG.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
|
||||
prerequisites:
|
||||
broken: true
|
||||
packages: [ "onl-swi:$ARCH" ]
|
||||
|
||||
common:
|
||||
arch: $ARCH
|
||||
version: $FNAME_RELEASE_ID
|
||||
copyright: Copyright 2016 Big Switch Networks
|
||||
maintainer: support@bigswitch.com
|
||||
|
||||
packages:
|
||||
- name: onl-installer
|
||||
summary: Open Network Linux $ARCH Installer
|
||||
|
||||
files:
|
||||
builds/*INSTALLER : $$PKG_INSTALL/
|
||||
builds/*.md5sum : $$PKG_INSTALL/
|
||||
|
||||
changelog: Change changes changes.,
|
||||
|
||||
|
||||
release:
|
||||
- builds/*INSTALLER : $ARCH/
|
||||
- builds/*.md5sum : $ARCH/
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
94
builds/any/installer/new-hotness/grub/builds/Makefile
Normal file
94
builds/any/installer/new-hotness/grub/builds/Makefile
Normal file
@@ -0,0 +1,94 @@
|
||||
ifndef ARCH
|
||||
$(error $$ARCH not set)
|
||||
endif
|
||||
|
||||
ONLPLATFORM = python $(ONL)/tools/onlplatform.py
|
||||
PLATFORMS := $(shell $(ONLPM) --platform-manifest onl-loader-initrd:$(ARCH))
|
||||
|
||||
MKSHAR = $(ONL)/tools/mkshar
|
||||
MKSHAR_OPTS = --lazy --unzip-pad --fixup-perms autoperms.sh
|
||||
MKSHAR_PERMS = autoperms.sh
|
||||
|
||||
# Hardcoded to match ONL File naming conventions.
|
||||
include $(ONL)/make/version-onl.mk
|
||||
INSTALLER_NAME=$(FNAME_PRODUCT_VERSION)_ONL-OS_$(FNAME_BUILD_ID)_$(UARCH)_INSTALLER
|
||||
|
||||
ifeq ($(ARCH), amd64)
|
||||
INSTALLER_ARCH = x86_64
|
||||
else
|
||||
INSTALLER_ARCH = $(ARCH)
|
||||
endif
|
||||
|
||||
__installer: __installer_platform_files __installer_swi_files
|
||||
$(ONL_V_at)rm -rf *INSTALLER* *.md5sum
|
||||
$(ONL_V_at)cp /dev/null installer.sh
|
||||
$(ONL_V_at): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
set dummy *.cpio.gz; initrd="$$2" ;\
|
||||
sed \
|
||||
-e 's^@ONLVERSION@^$(VERSION_STRING)^g' \
|
||||
-e "s^@INITRD_ARCHIVE@^$$initrd^g" \
|
||||
-e 's^@INITRD_OFFSET@^^g' \
|
||||
-e 's^@INITRD_SIZE@^^g' \
|
||||
-e 's^@ARCH@^$(INSTALLER_ARCH)^g' \
|
||||
$(ONL)/builds/any/installer/new-hotness/installer.sh.in \
|
||||
>> installer.sh
|
||||
$(ONL_V_at)echo "PAYLOAD_FOLLOWS" >> installer.sh
|
||||
$(ONL_V_at)cp /dev/null $(MKSHAR_PERMS)
|
||||
$(ONL_V_at)cp $(ONL)/make/version-onl.sh .
|
||||
$(ONL_V_at)echo "#!/bin/sh" >> $(MKSHAR_PERMS)
|
||||
$(ONL_V_at)echo "set -e" >> $(MKSHAR_PERMS)
|
||||
$(ONL_V_at)echo "set -x" >> $(MKSHAR_PERMS)
|
||||
$(MKSHAR) $(MKSHAR_OPTS) "$(INSTALLER_NAME)" $(ONL)/tools/scripts/sfx.sh.in installer.sh kernel-* onl-loader-initrd-* *.swi version-onl.sh boot-config
|
||||
$(ONL_V_at)rm -rf installer.sh kernel-* onl-loader-initrd-* $(ZTN_MANIFEST) *.swi version-onl.sh autoperms.sh
|
||||
md5sum "$(INSTALLER_NAME)" | awk '{ print $$1 }' > "$(INSTALLER_NAME).md5sum"
|
||||
|
||||
__installer_platform_files:
|
||||
$(ONL_V_GEN): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
l="$(PLATFORMS)"; for p in $$l; do \
|
||||
src=$$($(ONLPLATFORM) $$p $(ARCH) kernel 2>/dev/null) || : ;\
|
||||
if test "$$src"; then \
|
||||
dst=$${src##*/} ;\
|
||||
if test "$dst" -ot Makefile; then \
|
||||
: ;\
|
||||
else \
|
||||
echo "Staging $$dst for $$p" ;\
|
||||
cp "$$src" "$$dst" ;\
|
||||
fi ;\
|
||||
fi ;\
|
||||
src=$$($(ONLPLATFORM) $$p $(ARCH) initrd 2>/dev/null) || : ;\
|
||||
if test "$$src"; then \
|
||||
dst=$${src##*/} ;\
|
||||
if test "$dst" -ot Makefile; then \
|
||||
: ;\
|
||||
else \
|
||||
echo "Staging $$dst for $$p" ;\
|
||||
cp "$$src" "$$dst" ;\
|
||||
fi ;\
|
||||
fi ;\
|
||||
done ;\
|
||||
:
|
||||
|
||||
ifndef NO_SWI
|
||||
__installer_swi_files:
|
||||
$(ONL_V_GEN): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
swidir=$$(mktemp -d $(PWD)/swi-d-XXXXXX) ;\
|
||||
$(ONLPM) --extract-dir onl-swi:$(ARCH) $$swidir ;\
|
||||
mv $$swidir/usr/share/onl/packages/$(ARCH)/onl-swi/*.swi . ;\
|
||||
rm -fr $$swidir ;\
|
||||
:
|
||||
else
|
||||
__installer_swi_files:
|
||||
$(ONL_V_GEN):
|
||||
endif
|
||||
|
||||
shar installer: installer
|
||||
|
||||
clean:
|
||||
rm -f *.swi *.installer $(notdir $(KERNELS)) *initrd*.cpio.gz
|
||||
|
||||
513
builds/any/installer/new-hotness/installer.sh.in
Normal file
513
builds/any/installer/new-hotness/installer.sh.in
Normal file
@@ -0,0 +1,513 @@
|
||||
#!/bin/sh
|
||||
############################################################
|
||||
# <bsn.cl fy=2013 v=none>
|
||||
#
|
||||
# Copyright 2013, 2014 BigSwitch Networks, Inc.
|
||||
#
|
||||
#
|
||||
#
|
||||
# </bsn.cl>
|
||||
############################################################
|
||||
#
|
||||
# SwitchLight Installation Script for PPC.
|
||||
#
|
||||
# The purpose of this script is to automatically install SwitchLight
|
||||
# on the target system.
|
||||
#
|
||||
# This script is ONIE-compatible.
|
||||
#
|
||||
# This script is can be run under a manual boot of the SwitchLight
|
||||
# Loader as the execution environment for platforms that do not
|
||||
# support ONIE.
|
||||
#
|
||||
############################################################
|
||||
|
||||
IARCH="@ARCH@"
|
||||
ARCH=`uname -m`
|
||||
if test "$ARCH" != "$IARCH"; then
|
||||
# identify mappings between kernel arch and debian arch
|
||||
case "$IARCH:$ARCH" in
|
||||
armel:armv7l) ;;
|
||||
powerpc:ppc) ;;
|
||||
*)
|
||||
echo
|
||||
echo "------------------------------------"
|
||||
echo "Installer Architecture: $IARCH"
|
||||
echo "Target Architecture: $ARCH"
|
||||
echo
|
||||
echo "This installer cannot be used on this"
|
||||
echo "target."
|
||||
echo
|
||||
echo "------------------------------------"
|
||||
sleep 5
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
case "$ARCH" in
|
||||
ppc|powerpc)
|
||||
ARCH_PPC=$ARCH
|
||||
;;
|
||||
x86*|amd*|i?86*)
|
||||
ARCH_X86=$ARCH
|
||||
;;
|
||||
arm*)
|
||||
ARCH_ARM=$ARCH
|
||||
;;
|
||||
*)
|
||||
echo "Invalid Architecture: $ARCH"
|
||||
sleep 5
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
############################################################
|
||||
#
|
||||
# Installation Main
|
||||
#
|
||||
# Installation is performed as follows:
|
||||
#
|
||||
# 1. Detect whether we are running under ONIE or SwitchLight
|
||||
# and perform the appropriate setup.
|
||||
#
|
||||
# 2. Unpack the installer files.
|
||||
#
|
||||
# 3. Source the installer scriptlet for the current platform.
|
||||
# 4. Run the installer function from the platform scriptlet.
|
||||
#
|
||||
# The platform scriptlet determines the entire installation
|
||||
# sequence.
|
||||
#
|
||||
# Most platforms will just call the installation
|
||||
# utilities in this script with the approprate platform settings.
|
||||
#
|
||||
############################################################
|
||||
|
||||
set -e
|
||||
|
||||
installer_script=${0##*/}
|
||||
installer_dir=${0%/*}
|
||||
installer_dir=$(cd $installer_dir && pwd)
|
||||
installer_zip=$1
|
||||
|
||||
installer_tmpfs=
|
||||
installer_tmpfs_opts=
|
||||
# installer_tmpfs=??*, installer_tmpfs_opts= --> temporary mount
|
||||
# installer_tmpfs=??*, installer_tmpfs_opts=??* --> temporary remount
|
||||
|
||||
installer_tmpfs_kmin=1048576
|
||||
# minimum tmpfs/ramfs size to run this installer
|
||||
# (conservative, could be based on actual installer size)
|
||||
|
||||
BOOTDIR=/mnt/onie-boot
|
||||
# initial boot partition (onie)
|
||||
|
||||
# Replaced during build packaging with the current version.
|
||||
onl_version="@ONLVERSION@"
|
||||
initrd_archive="@INITRD_ARCHIVE@"
|
||||
initrd_offset="@INITRD_OFFSET@"
|
||||
initrd_size="@INITRD_SIZE@"
|
||||
|
||||
CR="
|
||||
"
|
||||
|
||||
cd $installer_dir
|
||||
|
||||
has_grub_env()
|
||||
{
|
||||
local tag
|
||||
tag=$1; shift
|
||||
test -f $BOOTDIR/grub/grubenv || return 1
|
||||
case "`grub-editenv $BOOTDIR/grubenv list` 2>/dev/null" in
|
||||
*${tag}*) return 0 ;;
|
||||
esac
|
||||
return 1
|
||||
}
|
||||
|
||||
has_uboot_env()
|
||||
{
|
||||
local tag
|
||||
tag=$1; shift
|
||||
test -x /usr/sbin/fw_printenv || return 1
|
||||
test -f /etc/fw_env.config || return 1
|
||||
/usr/sbin/fw_printenv $tag 1>/dev/null 2>&1 && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
has_boot_env()
|
||||
{
|
||||
local tag
|
||||
tag=$1; shift
|
||||
has_grub_env $tag && return 0
|
||||
has_uboot_env $tag && return 0
|
||||
return 1
|
||||
}
|
||||
|
||||
# Check installer debug option from the boot environment
|
||||
if has_boot_env onl_installer_debug; then installer_debug=1; fi
|
||||
|
||||
if test "$installer_debug"; then
|
||||
echo "Debug mode"
|
||||
set -x
|
||||
fi
|
||||
|
||||
# Pickup ONIE defines for this machine.
|
||||
if test -r /etc/machine.conf; then
|
||||
. /etc/machine.conf
|
||||
fi
|
||||
|
||||
visit_proc_mounts() {
|
||||
local ifs line dummy fn rest sts
|
||||
fn=$1; shift
|
||||
rest="$@"
|
||||
|
||||
ifs=$IFS; IFS=$CR
|
||||
for line in $(cat /proc/mounts); do
|
||||
IFS=$ifs
|
||||
if eval $fn $line $rest; then
|
||||
:
|
||||
else
|
||||
sts=$?
|
||||
if test $sts -eq 2; then break; fi
|
||||
return $sts
|
||||
fi
|
||||
done
|
||||
IFS=$ifs
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
#
|
||||
# Installation environment setup.
|
||||
#
|
||||
|
||||
installer_umount() {
|
||||
local cwd mpt tdir
|
||||
cwd=$PWD
|
||||
cd /
|
||||
|
||||
tdir=${TMPDIR-"/tmp"}
|
||||
for mpt in $(cat /proc/mounts | cut -d' ' -f2 | sort -r); do
|
||||
case "$mpt" in
|
||||
"$tdir"/*)
|
||||
installer_say "Unmounting $mpt"
|
||||
umount "$mpt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# handle installer_tmpfs specially
|
||||
if test "$installer_tmpfs"; then
|
||||
if grep -q " $installer_tmpfs " /proc/mounts; then
|
||||
|
||||
if test "$installer_tmpfs_opts"; then
|
||||
|
||||
# remount if still mounted
|
||||
|
||||
case ",$installer_tmpfs_opts," in
|
||||
*,size=*,*) ;;
|
||||
*)
|
||||
# default if unspecified is 50% of physical memory
|
||||
installer_tmpfs_opts=${installer_tmpfs_opts},size=50%
|
||||
;;
|
||||
esac
|
||||
installer_say "Remounting $installer_tmpfs with options $installer_tmpfs_opts"
|
||||
mount -o remount,$installer_tmpfs_opts $installer_tmpfs
|
||||
|
||||
elif test "$installer_tmpfs" != "/tmp"; then
|
||||
|
||||
# else unmount if still mounted
|
||||
|
||||
installer_say "Unmounting $installer_tmpfs"
|
||||
umount "$installer_tmpfs"
|
||||
rmdir "$installer_tmpfs"
|
||||
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
cd $cwd || :
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
if test "${onie_platform}"; then
|
||||
# Running under ONIE, most likely in the background in installer mode.
|
||||
# Our messages have to be sent to the console directly, not to stdout.
|
||||
installer_say()
|
||||
{
|
||||
echo "$@" > /dev/console
|
||||
}
|
||||
|
||||
# Installation failure message.
|
||||
installer_cleanup()
|
||||
{
|
||||
installer_say "Install failed."
|
||||
cat /var/log/onie.log > /dev/console
|
||||
installer_say "Install failed. See log messages above for details"
|
||||
|
||||
installer_umount
|
||||
|
||||
if installer_reboot; then
|
||||
:
|
||||
else
|
||||
sync
|
||||
sleep 3
|
||||
reboot
|
||||
fi
|
||||
}
|
||||
else
|
||||
if test "$ARCH_X86"; then
|
||||
echo "Missing onie_platform (invalid /etc/machine.conf)" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
#
|
||||
# Assume we are running in an interactive environment
|
||||
#
|
||||
installer_say()
|
||||
{
|
||||
echo
|
||||
echo "* $@"
|
||||
echo
|
||||
}
|
||||
|
||||
installer_cleanup()
|
||||
{
|
||||
installer_say "Install failed."
|
||||
installer_umount
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
trap "installer_cleanup" 0 1
|
||||
|
||||
# Find a suitable location for TMPDIR
|
||||
|
||||
scan_tmpfs() {
|
||||
local dev mpt fstype opts tdir
|
||||
dev=$1; shift
|
||||
mpt=$1; shift
|
||||
fstype=$1; shift
|
||||
opts=$1; shift
|
||||
shift
|
||||
shift
|
||||
tdir="$1"
|
||||
|
||||
case "$fstype" in
|
||||
ramfs|tmpfs) ;;
|
||||
*) return 0 ;;
|
||||
esac
|
||||
|
||||
case "$tdir" in
|
||||
"$mpt"|"$mpt"/*)
|
||||
d1=$(stat -c '%D' "$tdir")
|
||||
d2=$(stat -c '%D' $mpt)
|
||||
if test "$d1" = "$d2"; then
|
||||
installer_say "Found installer $fstype on $installer_dir ($mpt) using opts $opts"
|
||||
installer_tmpfs=$mpt
|
||||
installer_tmpfs_opts=${opts:-"defaults"}
|
||||
return 2
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# maybe installer script was unpacked to a tmpfs/ramfs filesystem
|
||||
if test -z "$installer_tmpfs" -a "$installer_dir"; then
|
||||
visit_proc_mounts scan_tmpfs "$installer_dir"
|
||||
if test "$installer_tmpfs"; then
|
||||
TMPDIR="$installer_dir"
|
||||
export TMPDIR
|
||||
fi
|
||||
fi
|
||||
# maybe TMPDIR is on a tmpfs/ramfs filesystem
|
||||
if test -z "$installer_tmpfs" -a "$TMPDIR"; then
|
||||
visit_proc_mounts scan_tmpfs "$TMPDIR"
|
||||
if test "$installer_tmpfs"; then
|
||||
:
|
||||
else
|
||||
installer_say "TMPDIR $TMPDIR is not actually tmpfs, ignoring"
|
||||
unset TMPDIR
|
||||
fi
|
||||
fi
|
||||
# else, hopefully /tmp is a tmpfs/ramfs
|
||||
if test -z "$installer_tmpfs"; then
|
||||
visit_proc_mounts scan_tmpfs /tmp
|
||||
if test "$installer_tmpfs"; then
|
||||
TMPDIR=/tmp
|
||||
export TMPDIR
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "$installer_tmpfs"; then
|
||||
set dummy $(df -k $installer_tmpfs | tail -1)
|
||||
if test $3 -lt $installer_tmpfs_kmin; then
|
||||
installer_say "Resizing tmpfs $installer_tmpfs to ${installer_tmpfs_kmin}k"
|
||||
mount -o remount,size=${installer_tmpfs_kmin}k $installer_tmpfs
|
||||
else
|
||||
# existing installer_tmpfs is fine,
|
||||
# no need to unmount or remount
|
||||
installer_tmpfs=
|
||||
installer_tmpfs_opts=
|
||||
fi
|
||||
else
|
||||
installer_say "Creating tmpfs for installer"
|
||||
installer_tmpfs=$(mktemp -d -t installer-tmpfs-XXXXXX)
|
||||
installer_tmpfs_opts=
|
||||
mount -t tmpfs -o size=1024m tmpfs $installer_tmpfs
|
||||
export TMPDIR=$installer_tmpfs
|
||||
fi
|
||||
|
||||
# Unpack our distribution
|
||||
if test "${installer_unpack_only}"; then
|
||||
installer_list=
|
||||
else
|
||||
installer_list=$initrd_archive
|
||||
fi
|
||||
|
||||
installer_say "Unpacking ONL installer files..."
|
||||
if test "$SFX_PAD"; then
|
||||
# ha ha, busybox cannot exclude multiple files
|
||||
unzip -o $installer_zip $installer_list -x $SFX_PAD
|
||||
elif test "$SFX_UNZIP"; then
|
||||
unzip -o $installer_zip $installer_list -x $installer_script
|
||||
else
|
||||
dd if=$installer_zip bs=$SFX_BLOCKSIZE skip=$SFX_BLOCKS \
|
||||
| unzip -o - $installer_list -x $installer_script
|
||||
fi
|
||||
|
||||
# Developer debugging
|
||||
if has_boot_env onl_installer_unpack_only; then installer_unpack_only=1; fi
|
||||
if test "${installer_unpack_only}"; then
|
||||
installer_say "Unpack only requested."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rootdir=$(mktemp -d -t "initrd-XXXXXX")
|
||||
installer_say "Extracting initrd to $rootdir"
|
||||
if test "$initrd_offset"; then
|
||||
tmprd=$(mktemp -t initrd-XXXXXX)
|
||||
dd if="$initrd_archive" of="$tmprd" bs="$initrd_offset" skip=1
|
||||
dd if=/dev/null of="$tmprd" bs="$initrd_size" seek=1
|
||||
initrd=$tmprd
|
||||
else
|
||||
initrd="${installer_dir}/$initrd_archive"
|
||||
fi
|
||||
gzip -dc "$initrd" | ( cd "$rootdir" && cpio -imd )
|
||||
|
||||
# get common installer functions
|
||||
. "${rootdir}/lib/vendor-config/onl/install/lib.sh"
|
||||
|
||||
installer_mkchroot "${rootdir}"
|
||||
|
||||
# make the installer available to the chroot
|
||||
mkdir -p "${rootdir}/mnt/installer"
|
||||
mount -o ro,bind "${installer_dir}" "${rootdir}/mnt/installer"
|
||||
|
||||
# make the onie boot files available to the chroot
|
||||
mkdir -p "${rootdir}/mnt/onie-boot"
|
||||
if test -d "/mnt/onie-boot"; then
|
||||
mount -o ro,bind "/mnt/onie-boot" "${rootdir}/mnt/onie-boot"
|
||||
fi
|
||||
|
||||
# generate config for installer environment
|
||||
mkdir -p "${rootdir}/etc/onl"
|
||||
cp /dev/null "${rootdir}/etc/onl/installer.conf"
|
||||
echo "onl_version=\"$onl_version\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
|
||||
# Generate the MD5 signature for ourselves for future reference.
|
||||
installer_md5=$(md5sum "$0" | awk '{print $1}')
|
||||
echo "installer_md5=\"$installer_md5\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
|
||||
# expose the zip file for later expansion by the initrd
|
||||
case "$installer_zip" in
|
||||
"${installer_dir}"/*)
|
||||
echo "installer_zip=\"${installer_zip##*/}\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
;;
|
||||
*)
|
||||
zf=$(mktemp "$rootdir/mnt/installer/installer-zip-XXXXXX")
|
||||
installer_say "Exposing installer archive $installer_zip as $zf"
|
||||
mount --bind "$installer_zip" $zf
|
||||
echo "installer_zip=\"${zf##*/}\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Cache our install URL if available
|
||||
if test -f "$0.url"; then
|
||||
installer_url=$(cat "$0.url")
|
||||
echo "installer_url=\"$installer_url\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
fi
|
||||
|
||||
echo "installer_dir=/mnt/installer" >> "${rootdir}/etc/onl/installer.conf"
|
||||
|
||||
# include access details for the initrd
|
||||
if test "$initrd_offset"; then
|
||||
echo "initrd_archive=\"$initrd_archive\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
echo "initrd_offset=\"$initrd_offset\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
echo "initrd_size=\"$initrd_size\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
fi
|
||||
|
||||
postinst=$(mktemp -t postinst-XXXXXX)
|
||||
b=${postinst##*/}
|
||||
echo "installer_chroot=\"${rootdir}\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
echo "installer_postinst=\"/mnt/installer/$b\"" >> "${rootdir}/etc/onl/installer.conf"
|
||||
|
||||
# for now, skip the other dot-files in /etc/onl, we do not need them
|
||||
# to enable initial install
|
||||
|
||||
# no special handling for /tmp or /run, since this is all in /tmp
|
||||
# anyway
|
||||
|
||||
installer_say "Launching ONL installer"
|
||||
|
||||
installer_shell_dfl="/usr/bin/onl-install --force"
|
||||
installer_shell=${installer_shell-"$installer_shell_dfl"}
|
||||
# default, unmount flash filesystems and run the installer script
|
||||
|
||||
# Ugh, unmount /mnt filesystems here,
|
||||
# they are not accessible from within the chroot
|
||||
installer_force_umount() {
|
||||
local dev mpt
|
||||
dev=$1; shift
|
||||
mpt=$1; shift
|
||||
case "$mpt" in
|
||||
/mnt/*)
|
||||
installer_say "Unmounting $mpt (--force)"
|
||||
umount "$mpt"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
if test "$installer_shell" = "$installer_shell_dfl"; then
|
||||
visit_proc_mounts installer_force_umount
|
||||
else
|
||||
installer_say "*** using non-default installer command: $installer_shell"
|
||||
installer_say "*** watch out for lingering mount-points"
|
||||
fi
|
||||
|
||||
chroot "${rootdir}" $installer_shell
|
||||
|
||||
if test -f "$postinst"; then
|
||||
installer_say "Invoking post-install actions"
|
||||
set -x
|
||||
. "$postinst"
|
||||
set +x
|
||||
fi
|
||||
|
||||
trap - 0 1
|
||||
installer_umount
|
||||
|
||||
if test "${onie_platform}"; then
|
||||
installer_reboot
|
||||
fi
|
||||
|
||||
exit
|
||||
|
||||
# Local variables:
|
||||
# mode: sh
|
||||
# sh-basic-offset: 2
|
||||
# End:
|
||||
# Do not add any additional whitespace after this point.
|
||||
111
builds/any/installer/new-hotness/uboot/builds/Makefile
Normal file
111
builds/any/installer/new-hotness/uboot/builds/Makefile
Normal file
@@ -0,0 +1,111 @@
|
||||
ifndef ARCH
|
||||
$(error $$ARCH not set)
|
||||
endif
|
||||
|
||||
ONLPLATFORM = python $(ONL)/tools/onlplatform.py
|
||||
PLATFORMS := $(shell $(ONLPM) --platform-manifest onl-loader-initrd:$(ARCH))
|
||||
|
||||
MKSHAR = $(ONL)/tools/mkshar
|
||||
MKSHAR_OPTS = --lazy --unzip-pad --fixup-perms autoperms.sh
|
||||
MKSHAR_PERMS = autoperms.sh
|
||||
|
||||
VONLDIR = $(ONL)/packages/base/all/vendor-config-onl
|
||||
PYFIT = $(VONLDIR)/src/bin/pyfit
|
||||
PYFIT_ENVIRONMENT = PYTHONPATH=$(VONLDIR)/src/python
|
||||
|
||||
# Hardcoded to match ONL File naming conventions.
|
||||
include $(ONL)/make/version-onl.mk
|
||||
INSTALLER_NAME=$(FNAME_PRODUCT_VERSION)_ONL-OS_$(FNAME_BUILD_ID)_$(UARCH)_INSTALLER
|
||||
|
||||
# default fit image can be used as the canonical location for the initrd
|
||||
FIT_IMAGE_ALL := $(shell $(ONLPM) --find-file onl-loader-fit:$(ARCH) onl-loader-fit.itb)
|
||||
INITRD := $(shell $(ONLPM) --find-file onl-loader-initrd:$(ARCH) onl-loader-initrd-$(ARCH).cpio.gz)
|
||||
INITRD_BOUNDS := $(shell $(PYFIT_ENVIRONMENT) $(PYFIT) -v offset $(FIT_IMAGE_ALL) --initrd)
|
||||
|
||||
__installer: installer.sh __installer_fit_files __installer_platform_files __installer_swi_files
|
||||
$(ONL_V_at)rm -rf *INSTALLER* *.md5sum
|
||||
$(ONL_V_at)cp /dev/null $(MKSHAR_PERMS)
|
||||
$(ONL_V_at)cp $(ONL)/make/version-onl.sh .
|
||||
$(ONL_V_at)echo "#!/bin/sh" >> $(MKSHAR_PERMS)
|
||||
$(ONL_V_at)echo "set -e" >> $(MKSHAR_PERMS)
|
||||
$(ONL_V_at)echo "set -x" >> $(MKSHAR_PERMS)
|
||||
$(MKSHAR) $(MKSHAR_OPTS) "$(INSTALLER_NAME)" $(ONL)/tools/scripts/sfx.sh.in installer.sh *.swi *.itb version-onl.sh boot-config
|
||||
$(ONL_V_at)rm -rf installer.sh *.itb *.swi version-onl.sh autoperms.sh
|
||||
$(ONL_V_at)md5sum "$(INSTALLER_NAME)" | awk '{ print $$1 }' > "$(INSTALLER_NAME).md5sum"
|
||||
|
||||
installer.sh: Makefile $(ONL)/builds/any/installer/new-hotness/installer.sh.in
|
||||
$(ONL_V_GEN)cp /dev/null $@
|
||||
$(ONL_V_at): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
if test "$(INITRD_BOUNDS)"; then \
|
||||
a="$(FIT_IMAGE_ALL)"; a=$${a##*/} ;\
|
||||
else \
|
||||
a="$(INITRD)"; i=$${a##*/} ;\
|
||||
fi ;\
|
||||
set dummy $(INITRD_BOUNDS); start=$$2; end=$$3; sz=$$(($$end - $$start + 1)) ;\
|
||||
sed \
|
||||
-e 's^@ONLVERSION@^$(VERSION_STRING)^g' \
|
||||
-e "s^@INITRD_ARCHIVE@^$${a}^g" \
|
||||
-e "s^@INITRD_OFFSET@^$$start^g" \
|
||||
-e "s^@INITRD_SIZE@^$$sz^g" \
|
||||
-e 's^@ARCH@^$(ARCH)^g' \
|
||||
$(ONL)/builds/any/installer/new-hotness/installer.sh.in \
|
||||
>> $@
|
||||
$(ONL_V_at)echo "PAYLOAD_FOLLOWS" >> $@
|
||||
|
||||
__installer_fit_files:
|
||||
$(ONL_V_GEN): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
src=$(FIT_IMAGE_ALL) ;\
|
||||
dst=$${src##*/} ;\
|
||||
if test "$$dst" -nt Makefile; then \
|
||||
: ;\
|
||||
else \
|
||||
echo "Staging $$dst" ;\
|
||||
cp $$src $$dst ;\
|
||||
fi ;\
|
||||
:
|
||||
|
||||
##############################
|
||||
#
|
||||
# optionally include custom itb files for each platform
|
||||
#
|
||||
##############################
|
||||
|
||||
__installer_platform_files:
|
||||
$(ONL_V_GEN): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
l="$(PLATFORMS)"; for p in $$l; do \
|
||||
echo "Looking for an ITB specific to $$p, ignore errors..." ;\
|
||||
src=$$($(ONLPLATFORM) $$p $(ARCH) itb) 2>/dev/null || : ;\
|
||||
if test "$$src"; then :; else continue; fi ;\
|
||||
dst=$${src##*/} ;\
|
||||
echo "Found $$dst" ;\
|
||||
if test "$$dst" -nt Makefile; then continue; fi ;\
|
||||
echo "Staging $$dst for $$p" ;\
|
||||
cp "$$src" "$$dst" ;\
|
||||
done ;\
|
||||
:
|
||||
|
||||
__installer_swi_files:
|
||||
ifndef NO_SWI
|
||||
$(ONL_V_GEN): ;\
|
||||
set -e ;\
|
||||
if $(ONL_V_P); then set -x; fi ;\
|
||||
swidir=$$(mktemp -d $(PWD)/swi-d-XXXXXX) ;\
|
||||
$(ONLPM) --extract-dir onl-swi:$(ARCH) $$swidir ;\
|
||||
mv $$swidir/usr/share/onl/packages/$(ARCH)/onl-swi/*.swi . ;\
|
||||
rm -fr $$swidir ;\
|
||||
:
|
||||
else
|
||||
$(ONL_V_GEN):
|
||||
endif
|
||||
|
||||
shar installer: installer
|
||||
|
||||
clean:
|
||||
rm -f *.swi *.installer *.cpio.gz
|
||||
|
||||
@@ -69,5 +69,10 @@
|
||||
- onl-loader-initscripts
|
||||
- onlp-snmpd
|
||||
- oom-shim
|
||||
- python-parted
|
||||
- python-yaml
|
||||
- bzip2
|
||||
- xz-utils
|
||||
- unzip
|
||||
- onl-mibs
|
||||
- openssl
|
||||
|
||||
@@ -68,5 +68,10 @@
|
||||
- onl-loader-initscripts
|
||||
- onlp-snmpd
|
||||
- oom-shim
|
||||
- python-parted
|
||||
- python-yaml
|
||||
- bzip2
|
||||
- xz-utils
|
||||
- unzip
|
||||
- onl-mibs
|
||||
- openssl
|
||||
|
||||
1
builds/armel/installer/new-hotness/Makefile
Normal file
1
builds/armel/installer/new-hotness/Makefile
Normal file
@@ -0,0 +1 @@
|
||||
include $(ONL)/make/pkg.mk
|
||||
2
builds/armel/installer/new-hotness/PKG.yml
Normal file
2
builds/armel/installer/new-hotness/PKG.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
!include $ONL/builds/any/installer/new-hotness/APKG.yml ARCH=armel
|
||||
|
||||
1
builds/armel/installer/new-hotness/builds/.gitignore
vendored
Normal file
1
builds/armel/installer/new-hotness/builds/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*INSTALLER
|
||||
2
builds/armel/installer/new-hotness/builds/Makefile
Normal file
2
builds/armel/installer/new-hotness/builds/Makefile
Normal file
@@ -0,0 +1,2 @@
|
||||
include $(ONL)/make/config.armel.mk
|
||||
include $(ONL)/builds/any/installer/new-hotness/uboot/builds/Makefile
|
||||
4
builds/armel/installer/new-hotness/builds/boot-config
Normal file
4
builds/armel/installer/new-hotness/builds/boot-config
Normal file
@@ -0,0 +1,4 @@
|
||||
NETDEV=ma1
|
||||
NETAUTO=dhcp
|
||||
BOOTMODE=SWI
|
||||
SWI=images::latest
|
||||
1
builds/powerpc/installer/new-hotness/Makefile
Normal file
1
builds/powerpc/installer/new-hotness/Makefile
Normal file
@@ -0,0 +1 @@
|
||||
include $(ONL)/make/pkg.mk
|
||||
2
builds/powerpc/installer/new-hotness/PKG.yml
Normal file
2
builds/powerpc/installer/new-hotness/PKG.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
!include $ONL/builds/any/installer/new-hotness/APKG.yml ARCH=powerpc
|
||||
|
||||
1
builds/powerpc/installer/new-hotness/builds/.gitignore
vendored
Normal file
1
builds/powerpc/installer/new-hotness/builds/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*INSTALLER
|
||||
2
builds/powerpc/installer/new-hotness/builds/Makefile
Normal file
2
builds/powerpc/installer/new-hotness/builds/Makefile
Normal file
@@ -0,0 +1,2 @@
|
||||
include $(ONL)/make/config.powerpc.mk
|
||||
include $(ONL)/builds/any/installer/new-hotness/uboot/builds/Makefile
|
||||
4
builds/powerpc/installer/new-hotness/builds/boot-config
Normal file
4
builds/powerpc/installer/new-hotness/builds/boot-config
Normal file
@@ -0,0 +1,4 @@
|
||||
NETDEV=ma1
|
||||
NETAUTO=dhcp
|
||||
BOOTMODE=SWI
|
||||
SWI=images::latest
|
||||
@@ -14,10 +14,11 @@ packages:
|
||||
- src/etc : /etc
|
||||
- src/lib : /lib
|
||||
- src/bootmodes : /bootmodes
|
||||
- src/python: /usr/lib/python2.7/site-packages
|
||||
- $ONL/make/version-onl.sh : /etc/onl/loader/versions.sh
|
||||
- $ONL/make/version-onl.json : /etc/onl/loader/versions.json
|
||||
- $ONL/make/version-onl.mk : /etc/onl/loader/versions.mk
|
||||
|
||||
|
||||
changelog: Change changes changes.,
|
||||
|
||||
|
||||
|
||||
@@ -36,7 +36,12 @@ trap "restoreconsole; reboot -f" EXIT
|
||||
mount -t proc proc /proc
|
||||
mount -t sysfs sysfs /sys
|
||||
mount -o remount,size=1M /dev
|
||||
|
||||
case "$(stat -f -c "%T" /tmp)" in
|
||||
tmpfs|ramfs) ;;
|
||||
*)
|
||||
mount -t tmpfs tmpfs /tmp
|
||||
;;
|
||||
esac
|
||||
|
||||
# Grab cmdline settings
|
||||
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
/usr/lib/python2.7/dist-packages
|
||||
@@ -11,6 +11,7 @@ packages:
|
||||
src/python/onl : $PY_INSTALL/onl
|
||||
src/boot.d : /etc/boot.d
|
||||
src/bin : /usr/bin
|
||||
src/lib : /lib/vendor-config/onl
|
||||
|
||||
changelog: Changes
|
||||
|
||||
@@ -23,7 +24,7 @@ packages:
|
||||
summary: ONL Base Configuration Package (Loader)
|
||||
|
||||
files:
|
||||
src/python/onl : /usr/lib/python2.7/onl
|
||||
src/python/onl : /usr/lib/python2.7/dist-packages/onl
|
||||
src/bin/initmounts : /bin/initmounts
|
||||
src/bin/pki : /sbin/pki
|
||||
|
||||
|
||||
7
packages/base/all/vendor-config-onl/src/bin/loader-shell
Executable file
7
packages/base/all/vendor-config-onl/src/bin/loader-shell
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Run native ONIE tools
|
||||
"""
|
||||
|
||||
import onl.install.ShellApp
|
||||
onl.install.ShellApp.Loader.main()
|
||||
7
packages/base/all/vendor-config-onl/src/bin/onie-shell
Executable file
7
packages/base/all/vendor-config-onl/src/bin/onie-shell
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Run native ONIE tools
|
||||
"""
|
||||
|
||||
import onl.install.ShellApp
|
||||
onl.install.ShellApp.Onie.main()
|
||||
7
packages/base/all/vendor-config-onl/src/bin/onl-install
Executable file
7
packages/base/all/vendor-config-onl/src/bin/onl-install
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Install switch light
|
||||
"""
|
||||
|
||||
import onl.install.App
|
||||
onl.install.App.main()
|
||||
7
packages/base/all/vendor-config-onl/src/bin/onl-recover
Executable file
7
packages/base/all/vendor-config-onl/src/bin/onl-recover
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Recover switch light
|
||||
"""
|
||||
|
||||
import onl.install.RecoverApp
|
||||
onl.install.RecoverApp.main()
|
||||
7
packages/base/all/vendor-config-onl/src/bin/pyfit
Executable file
7
packages/base/all/vendor-config-onl/src/bin/pyfit
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""Swiss-army-knife FIT decoder
|
||||
"""
|
||||
|
||||
import onl.install.Fit
|
||||
onl.install.Fit.App.main()
|
||||
129
packages/base/all/vendor-config-onl/src/lib/install/lib.sh
Normal file
129
packages/base/all/vendor-config-onl/src/lib/install/lib.sh
Normal file
@@ -0,0 +1,129 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
######################################################################
|
||||
#
|
||||
# helper functions for install
|
||||
#
|
||||
######################################################################
|
||||
|
||||
installer_reboot() {
|
||||
local dummy sts timeout trapsts
|
||||
if test $# -gt 0; then
|
||||
timeout=$1; shift
|
||||
else
|
||||
timeout=3
|
||||
fi
|
||||
|
||||
installer_say "Rebooting in ${timeout}s"
|
||||
|
||||
unset dummy trapsts
|
||||
# ha ha, 'local' auto-binds the variables
|
||||
|
||||
trap "trapsts=130" 2
|
||||
if read -t $timeout -r -p "Hit CR to continue, CTRL-D or CTRL-C to stop... " dummy; then
|
||||
sts=0
|
||||
else
|
||||
sts=$?
|
||||
fi
|
||||
trap - 2
|
||||
test "$trapsts" && sts=$trapsts
|
||||
|
||||
if test ${dummy+set}; then
|
||||
if test $sts -eq 0; then
|
||||
installer_say "CR, rebooting"
|
||||
exit
|
||||
else
|
||||
installer_say "CTRL-D, stopped"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
# ha ha, busybox does not report SIGALRM
|
||||
if test "${trapsts+set}"; then
|
||||
:
|
||||
else
|
||||
installer_say "timeout, rebooting"
|
||||
reboot
|
||||
fi
|
||||
|
||||
signo=$(( $sts - 128 ))
|
||||
if test $signo -eq 14; then
|
||||
# SIGALRM, possibly irrelevant for busybox
|
||||
installer_say "timeout, rebooting"
|
||||
reboot
|
||||
fi
|
||||
|
||||
# e.g. SIGQUIT
|
||||
installer_say "signal $signo, stopped"
|
||||
exit
|
||||
}
|
||||
|
||||
installer_mkchroot() {
|
||||
local rootdir
|
||||
rootdir=$1
|
||||
|
||||
# special handling for /dev, which usually already has nested mounts
|
||||
installer_say "Setting up /dev"
|
||||
rm -fr "${rootdir}/dev"/*
|
||||
for dev in /dev/*; do
|
||||
if test -d "$dev"; then
|
||||
mkdir "${rootdir}${dev}"
|
||||
else
|
||||
cp -a "$dev" "${rootdir}${dev}"
|
||||
fi
|
||||
done
|
||||
mkdir -p "${rootdir}/dev/pts"
|
||||
|
||||
installer_say "Setting up /run"
|
||||
rm -fr "${rootdir}/run"/*
|
||||
mkdir -p "${rootdir}/run"
|
||||
d1=$(stat -c "%D" /run)
|
||||
for rdir in /run/*; do
|
||||
if test -d "$rdir"; then
|
||||
mkdir "${rootdir}${rdir}"
|
||||
d2=$(stat -c "%D" $rdir)
|
||||
t2=$(stat -f -c "%T" $rdir)
|
||||
case "$t2" in
|
||||
tmpfs|ramfs)
|
||||
# skip tmpfs, we'll just inherit the initrd ramfs
|
||||
;;
|
||||
*)
|
||||
if test "$d1" != "$d2"; then
|
||||
mount -o bind $rdir "${rootdir}${rdir}"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
|
||||
installer_say "Setting up mounts"
|
||||
mount -t proc proc "${rootdir}/proc"
|
||||
mount -t sysfs sysfs "${rootdir}/sys"
|
||||
mount -t devpts devpts "${rootdir}/dev/pts"
|
||||
|
||||
if test ${TMPDIR+set}; then
|
||||
# make the tempdir available to the chroot
|
||||
mkdir -p "${rootdir}${TMPDIR}"
|
||||
fi
|
||||
|
||||
# export ONIE defines to the installer
|
||||
if test -r /etc/machine.conf; then
|
||||
cp /etc/machine.conf "${rootdir}/etc/machine.conf"
|
||||
fi
|
||||
|
||||
# export ONL defines to the installer
|
||||
mkdir -p "${rootdir}/etc/onl"
|
||||
if test -d /etc/onl; then
|
||||
cp -a /etc/onl/. "${rootdir}/etc/onl/."
|
||||
fi
|
||||
|
||||
# export firmware config
|
||||
if test -r /etc/fw_env.config; then
|
||||
cp /etc/fw_env.config "${rootdir}/etc/fw_env.config"
|
||||
fi
|
||||
}
|
||||
|
||||
# Local variables
|
||||
# mode: sh
|
||||
# sh-basic-offset: 2
|
||||
# End:
|
||||
@@ -0,0 +1,136 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config-defaults-uboot.yml
|
||||
#
|
||||
# Configuration for u-boot systems (powerpc and arm)
|
||||
#
|
||||
######################################################################
|
||||
|
||||
default:
|
||||
|
||||
flat_image_tree:
|
||||
|
||||
##############################
|
||||
#
|
||||
# Default kernel packages provided by ONL
|
||||
#
|
||||
##############################
|
||||
|
||||
e500v-kernel-package: &e500v-kernel-package
|
||||
package: onl-kernel-3.9.6-powerpc-e500v:powerpc
|
||||
|
||||
e500v-kernel: &e500v-kernel
|
||||
=: kernel-3.9.6-powerpc-e500v.bin.gz
|
||||
<<: *e500v-kernel-package
|
||||
|
||||
e500mc-kernel-package: &e500mc-kernel-package
|
||||
package: onl-kernel-3.8.13-powerpc-e500mc:powerpc
|
||||
|
||||
e500mc-kernel: &e500mc-kernel
|
||||
=: kernel-3.8.13-powerpc-e500mc.bin.gz
|
||||
<<: *e500mc-kernel-package
|
||||
|
||||
arm-iproc-kernel-package: &arm-iproc-kernel-package
|
||||
package: onl-kernel-3.2-deb7-arm-iproc-all:armel
|
||||
|
||||
arm-iproc-kernel: &arm-iproc-kernel
|
||||
=: kernel-3.2-deb7-arm-iproc-all.bin.gz
|
||||
<<: *arm-iproc-kernel-package
|
||||
|
||||
##############################
|
||||
#
|
||||
# For your system, pick from the above list
|
||||
# to compose a 'kernel' and 'dtb' key
|
||||
#
|
||||
##############################
|
||||
|
||||
### Example, pick one kernel and one DTB
|
||||
##kernel:
|
||||
## <<: *e500v-kernel
|
||||
##dtb:
|
||||
## =: powerpc-quanta-lb9-r0.dtb
|
||||
## <<: *e500v-kernel-package
|
||||
|
||||
##############################
|
||||
#
|
||||
# pick an actual loader file,
|
||||
# usually the 'all' image
|
||||
#
|
||||
##############################
|
||||
|
||||
powerpc-itb: &powerpc-itb
|
||||
=: onl-loader-fit.itb
|
||||
package: onl-loader-fit:powerpc
|
||||
|
||||
arm-itb: &arm-itb
|
||||
=: onl-loader-fit.itb
|
||||
package: onl-loader-fit:armel
|
||||
|
||||
itb: *powerpc-itb
|
||||
|
||||
loader:
|
||||
|
||||
device: /dev/sda
|
||||
##device: /dev/mmcblk0
|
||||
|
||||
loadaddr: 0x10000000
|
||||
##loadaddr: 70000000
|
||||
|
||||
# Add your own 'setenv' clauses,
|
||||
# otherwise lean back and coast with these implicit ones
|
||||
setenv:
|
||||
##- onl_loadaddr: @loadaddr@
|
||||
### added automatically
|
||||
##- onl_platform: @platform@
|
||||
### added automatically
|
||||
##- onl_itb: @itb@
|
||||
- bootargs: >-
|
||||
console=$consoledev,$baudrate
|
||||
onl_platform=$onl_platform
|
||||
|
||||
ide_bootcmds: &ide_bootcmds
|
||||
- ext2load ide 0:1 $onl_loadaddr $onl_itb
|
||||
- "bootm $onl_loadaddr#$onl_platform"
|
||||
|
||||
usb_bootcmds: &usb_bootcmds
|
||||
- usb start
|
||||
- ext2load usb 0:1 $onl_loadaddr $onl_itb
|
||||
- "bootm $onl_loadaddr#$onl_platform"
|
||||
|
||||
# XXX roth arm example includes the 'usbiddev' magic
|
||||
usb2_bootcmds: &usb2_bootcmds
|
||||
- usb start
|
||||
- usbiddev
|
||||
- ext2load usb 0:1 $onl_loadaddr $onl_itb
|
||||
- "bootm $onl_loadaddr#$onl_platform"
|
||||
|
||||
mmc_bootcmds: &mmc_bootcmds
|
||||
- mmc part 0
|
||||
- ext2load mmc 0:1 $onl_loadaddr $onl_itb
|
||||
- "bootm $onl_loadaddr#$onl_platform"
|
||||
|
||||
nos_bootcmds: *ide_bootcmds
|
||||
|
||||
# Default partitioning scheme
|
||||
# boot, config --> 128MiB (ext2)
|
||||
# images --> 1GiB
|
||||
# data --> rest of disk
|
||||
# default format (as shown) is ext4
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 128MiB
|
||||
# NOTE that u-boot wants the boot partition ext2, not ext4
|
||||
format: ext2
|
||||
##format: raw
|
||||
|
||||
- ONL-CONFIG:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 1GiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
@@ -0,0 +1,106 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config-defaults-x86-64.yml
|
||||
#
|
||||
# Default settings for x86-64 platform-config YAML declarations
|
||||
#
|
||||
# X86 platforms assume a GPT partition table and ext4 partitions
|
||||
#
|
||||
######################################################################
|
||||
|
||||
default:
|
||||
|
||||
grub:
|
||||
|
||||
label: gpt
|
||||
# default, use a GPT (not msdos) label
|
||||
# this is mostly to *reject* invalid disk labels,
|
||||
# since we will never create our own
|
||||
|
||||
initrd-amd64: &initrd-amd64
|
||||
=: onl-loader-initrd-amd64.cpio.gz
|
||||
package: onl-loader-initrd:amd64
|
||||
|
||||
initrd:
|
||||
<<: *initrd-amd64
|
||||
|
||||
kernel-3.2: &kernel-3-2
|
||||
=: kernel-3.2-deb7-x86_64-all
|
||||
package: onl-kernel-3.2-deb7-x86-64-all:amd64
|
||||
|
||||
kernel-3.9.6: &kernel-3-9-6
|
||||
=: kernel-3.9.6-x86-64-all
|
||||
package: onl-kernel-3.9.6-x86-64-all:amd64
|
||||
|
||||
kernel-3.18: &kernel-3-18
|
||||
=: kernel-3.18-x86_64-all
|
||||
package: onl-kernel-3.18-x86-64-all:amd64
|
||||
|
||||
# pick one of the above kernels
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
# GRUB command line arguments for 'serial' declaration
|
||||
# this is equivalent to, but not in the same format as,
|
||||
# the linux 'console=' arguments below
|
||||
# Default for ttyS1
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
# supplemental kernel arguments
|
||||
# (not including kernel, initrd and ONL-specific options)
|
||||
# Default for ttyS1
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
|
||||
### Defaults for ttyS0
|
||||
##serial: >-
|
||||
## --port=0x3f8
|
||||
## --speed=115200
|
||||
## --word=8
|
||||
## --parity=no
|
||||
## --stop=1
|
||||
##args: >-
|
||||
## nopat
|
||||
## console=ttyS0,115200n8
|
||||
|
||||
##device: /dev/vda
|
||||
### install to a specific block device
|
||||
|
||||
device: ONIE-BOOT
|
||||
# install to the device that contains the ONIE-BOOT partition
|
||||
# (query using parted and/or blkid)
|
||||
|
||||
# Default partitioning scheme
|
||||
# boot, config --> 128MiB
|
||||
# images --> 1GiB
|
||||
# data --> rest of disk
|
||||
# default format (as shown) is ext4
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-CONFIG:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 1GiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
|
||||
### Sample partitioning scheme experiencing disk space pressure
|
||||
##installer:
|
||||
##- ONL-BOOT: 128MiB
|
||||
##- ONL-CONFIG: 128MiB
|
||||
##- ONL-IMAGES: 384MiB
|
||||
##- ONL-DATA: 100%
|
||||
|
||||
101
packages/base/all/vendor-config-onl/src/python/onl/YamlUtils.py
Normal file
101
packages/base/all/vendor-config-onl/src/python/onl/YamlUtils.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""YamlUtils.py
|
||||
|
||||
"""
|
||||
|
||||
import yaml
|
||||
|
||||
def merge(p1, p2):
|
||||
"""Merge two YAML files.
|
||||
|
||||
y1 is the 'default' source; leaf values from y2 will override.
|
||||
Return the merged tree.
|
||||
|
||||
y1 should be a dict with a single top-level key, 'default'.
|
||||
y2 should be a dict with a single top-level key, not 'default'.
|
||||
|
||||
Set a leaf in y2 to nil ('~') to create a tombstone (discard any key
|
||||
from y1).
|
||||
|
||||
if a (sub) key in y1, y2 differ in type (dict vs. non-dict) then
|
||||
the merge will proceed with the non-dict promoted to a dict using
|
||||
the default-key schema ('='). Consumers of this function should be
|
||||
prepared to handle such keys.
|
||||
"""
|
||||
|
||||
with open(p1) as fd:
|
||||
buf1 = fd.read()
|
||||
with open(p2) as fd:
|
||||
buf2 = fd.read()
|
||||
|
||||
# read p1 as-is, make sure it looks like a 'default' YAML
|
||||
c1 = yaml.load(buf1)
|
||||
k1 = list(c1.keys())
|
||||
if k1 != ['default']:
|
||||
raise ValueError("%s: invalid top-level keys for default mapping: %s"
|
||||
% (p1, k1,))
|
||||
|
||||
# read p2 with the default YAML as a sub-key (to resolve anchors)
|
||||
lines = buf2.splitlines(False)
|
||||
lines = [x for x in lines if x != '---']
|
||||
buf3 = buf1 + "\n" + "\n".join(lines)
|
||||
c2 = yaml.load(buf3)
|
||||
c2.pop('default', None)
|
||||
|
||||
k2 = list(c2.keys())
|
||||
if len(k2) != 1:
|
||||
raise ValueError("invalid format for target mapping")
|
||||
tgtKey = k2[0]
|
||||
|
||||
merged = { tgtKey : {} }
|
||||
q = [(c1['default'], c2[tgtKey], merged[tgtKey])]
|
||||
while True:
|
||||
if not q: break
|
||||
c1, c2, c3 = q.pop(0)
|
||||
# add in non-overlapping keys
|
||||
# 'None' keys from p2 are tombstones
|
||||
s1 = set(c1.keys())
|
||||
s2 = set(c2.keys())
|
||||
|
||||
for k in s1.difference(s2):
|
||||
v = c1[k]
|
||||
if type(v) == dict:
|
||||
c3.setdefault(k, {})
|
||||
q.append((v, {}, c3[k],))
|
||||
else:
|
||||
c3.setdefault(k, v)
|
||||
|
||||
for k in s2.difference(s1):
|
||||
v = c2[k]
|
||||
if v is None: continue
|
||||
if type(v) == dict:
|
||||
c3.setdefault(k, {})
|
||||
q.append(({}, v, c3[k],))
|
||||
else:
|
||||
c3.setdefault(k, v)
|
||||
|
||||
# handle overlapping keys
|
||||
for k in s1.intersection(s2):
|
||||
v1 = c1[k]
|
||||
v2 = c2[k]
|
||||
|
||||
if v2 is None: continue
|
||||
|
||||
# two dicts, key-by-key reconciliation required
|
||||
if type(v1) == dict and type(v2) == dict:
|
||||
c3.setdefault(k, {})
|
||||
q.append((v1, v2, c3[k],))
|
||||
continue
|
||||
|
||||
# two non-dicts, p2 wins
|
||||
if type(v1) != dict and type(v2) != dict:
|
||||
c3[k] = v2
|
||||
continue
|
||||
|
||||
if type(v1) != dict:
|
||||
v1 = { '=' : v1, }
|
||||
if type(v2) != dict:
|
||||
v2 = { '=' : v2, }
|
||||
c3.setdefault(k, {})
|
||||
q.append((v1, v2, c3[k],))
|
||||
|
||||
return merged
|
||||
@@ -0,0 +1,364 @@
|
||||
"""App.py
|
||||
|
||||
top-level install app
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys, os
|
||||
import logging
|
||||
import imp
|
||||
import glob
|
||||
import argparse
|
||||
import shutil
|
||||
import urllib
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from InstallUtils import InitrdContext
|
||||
from InstallUtils import SubprocessMixin
|
||||
from InstallUtils import ProcMountsParser
|
||||
import ConfUtils, BaseInstall
|
||||
|
||||
class App(SubprocessMixin):
|
||||
|
||||
def __init__(self, url=None,
|
||||
debug=False, force=False,
|
||||
log=None):
|
||||
|
||||
if log is not None:
|
||||
self.log = log
|
||||
else:
|
||||
self.log = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
self.url = url
|
||||
self.force = force
|
||||
self.debug = debug
|
||||
# remote-install mode
|
||||
|
||||
self.installer = None
|
||||
self.machineConf = None
|
||||
self.installerConf = None
|
||||
self.onlPlatform = None
|
||||
# local-install mode
|
||||
|
||||
self.nextUpdate = None
|
||||
|
||||
def run(self):
|
||||
|
||||
if self.url is not None:
|
||||
return self.runUrl()
|
||||
else:
|
||||
return self.runLocal()
|
||||
|
||||
def runUrl(self):
|
||||
pm = ProcMountsParser()
|
||||
for m in pm.mounts:
|
||||
if m.dir.startswith('/mnt/onl'):
|
||||
if not self.force:
|
||||
self.log.error("directory %s is still mounted", m.dir)
|
||||
return 1
|
||||
self.log.warn("unmounting %s (--force)", m.dir)
|
||||
self.check_call(('umount', m.dir,))
|
||||
|
||||
def reporthook(blocks, bsz, sz):
|
||||
if time.time() < self.nextUpdate: return
|
||||
self.nextUpdate = time.time() + 0.25
|
||||
if sz:
|
||||
pct = blocks * bsz * 100 / sz
|
||||
sys.stderr.write("downloaded %d%% ...\r" % pct)
|
||||
else:
|
||||
icon = "|/-\\"[blocks % 4]
|
||||
sys.stderr.write("downloading ... %s\r" % icon)
|
||||
|
||||
p = tempfile.mktemp(prefix="installer-",
|
||||
suffix=".bin")
|
||||
try:
|
||||
self.log.info("downloading installer from %s --> %s",
|
||||
self.url, p)
|
||||
self.nextUpdate = 0
|
||||
if os.isatty(sys.stdout.fileno()):
|
||||
dst, headers = urllib.urlretrieve(self.url, p, reporthook)
|
||||
else:
|
||||
dst, headers = urllib.urlretrieve(self.url, p)
|
||||
sys.stdout.write("\n")
|
||||
|
||||
self.log.debug("+ chmod +x %s", p)
|
||||
os.chmod(p, 0755)
|
||||
|
||||
env = {}
|
||||
env.update(os.environ)
|
||||
|
||||
if os.path.exists("/etc/onl/platform"):
|
||||
self.log.debug("enabling unzip features for ONL")
|
||||
env['SFX_UNZIP'] = '1'
|
||||
self.log.debug("+ export SFX_UNZIP=1")
|
||||
env['SFX_LOOP'] = '1'
|
||||
self.log.debug("+ export SFX_LOOP=1")
|
||||
env['SFX_PIPE'] = '1'
|
||||
self.log.debug("+ export SFX_PIPE=1")
|
||||
|
||||
self.log.debug("enabling in-place fixups")
|
||||
env['SFX_INPLACE'] = '1'
|
||||
self.log.debug("+ export SFX_INPLACE=1")
|
||||
|
||||
if self.debug:
|
||||
self.log.debug("enabling installer debug")
|
||||
env['installer_debug'] = 'y'
|
||||
self.log.debug("+ export installer_debug=y")
|
||||
if self.log.level < logging.INFO:
|
||||
self.log.debug("enabling installer verbose logging")
|
||||
env['installer_verbose'] = 'y'
|
||||
self.log.debug("+ export installer_verbose=y")
|
||||
|
||||
self.log.info("invoking installer...")
|
||||
try:
|
||||
self.check_call((p,), env=env)
|
||||
except subprocess.CalledProcessError as ex:
|
||||
self.log.error("installer failed")
|
||||
return ex.returncode
|
||||
finally:
|
||||
if os.path.exists(p):
|
||||
os.unlink(p)
|
||||
|
||||
self.log.info("please reboot this system now.")
|
||||
return 0
|
||||
|
||||
def runLocal(self):
|
||||
|
||||
self.log.info("getting installer configuration")
|
||||
if os.path.exists(ConfUtils.MachineConf.PATH):
|
||||
self.machineConf = ConfUtils.MachineConf()
|
||||
else:
|
||||
self.log.warn("missing /etc/machine.conf from ONIE runtime")
|
||||
self.machineConf = ConfUtils.MachineConf(path='/dev/null')
|
||||
self.installerConf = ConfUtils.InstallerConf()
|
||||
|
||||
##self.log.info("using native GRUB")
|
||||
##self.grubEnv = ConfUtils.GrubEnv(log=self.log.getChild("grub"))
|
||||
|
||||
pat = "/mnt/onie-boot/onie/initrd.img*"
|
||||
l = glob.glob(pat)
|
||||
if l:
|
||||
initrd = l[0]
|
||||
self.log.info("using native ONIE initrd+chroot GRUB (%s)", initrd)
|
||||
initrdDir = InitrdContext.mkChroot(initrd, log=self.log)
|
||||
self.grubEnv = ConfUtils.ChrootGrubEnv(initrdDir,
|
||||
bootDir="/mnt/onie-boot",
|
||||
path="/grub/grubenv",
|
||||
log=self.log.getChild("grub"))
|
||||
# direct access using ONIE initrd as a chroot
|
||||
# (will need to fix up bootDir and bootPart later)
|
||||
else:
|
||||
self.log.info("using proxy GRUB")
|
||||
self.grubEnv = ConfUtils.ProxyGrubEnv(self.installerConf,
|
||||
bootDir="/mnt/onie-boot",
|
||||
path="/grub/grubenv",
|
||||
chroot=False,
|
||||
log=self.log.getChild("grub"))
|
||||
# indirect access through chroot host
|
||||
# (will need to fix up bootDir and bootPart later)
|
||||
|
||||
if os.path.exists(ConfUtils.UbootEnv.SETENV):
|
||||
self.ubootEnv = ConfUtils.UbootEnv(log=self.log.getChild("u-boot"))
|
||||
else:
|
||||
self.ubootEnv = None
|
||||
|
||||
self.log.info("ONL Installer %s", self.installerConf.onl_version)
|
||||
|
||||
code = self.findPlatform()
|
||||
if code: return code
|
||||
|
||||
try:
|
||||
import onl.platform.current
|
||||
except:
|
||||
self.log.exception("cannot find platform config")
|
||||
code = 1
|
||||
if self.log.level < logging.INFO:
|
||||
self.post_mortem()
|
||||
if code: return code
|
||||
|
||||
self.onlPlatform = onl.platform.current.OnlPlatform()
|
||||
|
||||
if 'grub' in self.onlPlatform.platform_config:
|
||||
self.log.info("trying a GRUB based installer")
|
||||
iklass = BaseInstall.GrubInstaller
|
||||
elif 'flat_image_tree' in self.onlPlatform.platform_config:
|
||||
self.log.info("trying a U-Boot based installer")
|
||||
iklass = BaseInstall.UbootInstaller
|
||||
else:
|
||||
self.log.error("cannot detect installer type")
|
||||
return 1
|
||||
|
||||
# run the platform-specific installer
|
||||
self.installer = iklass(machineConf=self.machineConf,
|
||||
installerConf=self.installerConf,
|
||||
platformConf=self.onlPlatform.platform_config,
|
||||
grubEnv=self.grubEnv,
|
||||
ubootEnv=self.ubootEnv,
|
||||
force=self.force,
|
||||
log=self.log)
|
||||
try:
|
||||
code = self.installer.run()
|
||||
except:
|
||||
self.log.exception("installer failed")
|
||||
code = 1
|
||||
if self.log.level < logging.INFO:
|
||||
self.post_mortem()
|
||||
if code: return code
|
||||
|
||||
if getattr(self.installer, 'grub', False):
|
||||
code = self.finalizeGrub()
|
||||
if code: return code
|
||||
if getattr(self.installer, 'uboot', False):
|
||||
code = self.finalizeUboot()
|
||||
if code: return code
|
||||
|
||||
self.log.info("Install finished.")
|
||||
return 0
|
||||
|
||||
def findPlatform(self):
|
||||
|
||||
plat = arch = None
|
||||
if os.path.exists(ConfUtils.MachineConf.PATH):
|
||||
plat = getattr(self.machineConf, 'onie_platform', None)
|
||||
arch = getattr(self.machineConf, 'onie_arch', None)
|
||||
if plat and arch:
|
||||
self.log.info("ONL installer running under ONIE.")
|
||||
plat = plat.replace('_', '-')
|
||||
elif os.path.exists("/etc/onl/platform"):
|
||||
with open("/etc/onl/platform") as fd:
|
||||
plat = fd.read().strip()
|
||||
if plat.startswith('x86-64'):
|
||||
arch = 'x86_64'
|
||||
else:
|
||||
arch = plat.partition('-')[0]
|
||||
self.log.info("ONL installer running under ONL or ONL loader.")
|
||||
|
||||
if plat and arch:
|
||||
self.installerConf.installer_platform = plat
|
||||
self.installerConf.installer_arch = arch
|
||||
else:
|
||||
self.log.error("The installation platform cannot be determined.")
|
||||
self.log.error("It does not appear that we are running under ONIE or the ONL loader.")
|
||||
self.log.error("If you know what you are doing you can re-run this installer")
|
||||
self.log.error("with an explicit 'installer_platform=<platform>' setting,")
|
||||
self.log.error("though this is unlikely to be the correct procedure at this point.")
|
||||
return 1
|
||||
|
||||
self.log.info("Detected platform %s", self.installerConf.installer_platform)
|
||||
|
||||
self.installerConf.installer_platform_dir = ("/lib/platform-config/%s"
|
||||
% (self.installerConf.installer_platform,))
|
||||
if not os.path.isdir(self.installerConf.installer_platform_dir):
|
||||
self.log.error("This installer does not support the %s platform.",
|
||||
self.installerConf.installer_platform)
|
||||
self.log.error("Available platforms are:")
|
||||
for d in os.listdir("/lib/platform-config"):
|
||||
self.log.error(" %s", d)
|
||||
self.log.error("Installation cannot continue.")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
def finalizeGrub(self):
|
||||
|
||||
def _m(src, dst):
|
||||
val = getattr(self.installerConf, src, None)
|
||||
if val is not None:
|
||||
setattr(self.grubEnv, dst, val)
|
||||
else:
|
||||
delattr(self.grubEnv, dst)
|
||||
|
||||
_m('installer_md5', 'onl_installer_md5')
|
||||
_m('onl_version', 'onl_installer_version')
|
||||
_m('installer_url', 'onl_installer_url')
|
||||
|
||||
return 0
|
||||
|
||||
def finalizeUboot(self):
|
||||
|
||||
if self.installer.platform.isOnie():
|
||||
def _m(src, dst):
|
||||
val = getattr(self.installerConf, src, None)
|
||||
if val is not None:
|
||||
setattr(self.ubootEnv, dst, val)
|
||||
else:
|
||||
delattr(self.ubootEnv, dst)
|
||||
|
||||
_m('installer_md5', 'onl_installer_md5')
|
||||
_m('onl_version', 'onl_installer_version')
|
||||
_m('installer_url', 'onl_installer_url')
|
||||
else:
|
||||
self.log.info("To configure U-Boot to boot ONL automatically, reboot the switch,")
|
||||
self.log.info("enter the U-Boot shell, and run these 2 commands:")
|
||||
self.log.info("=> setenv bootcmd '%s'", self.installer.platform.str_bootcmd())
|
||||
self.log.info("saveenv")
|
||||
|
||||
return 0
|
||||
|
||||
def shutdown(self):
|
||||
|
||||
installer, self.installer = self.installer, None
|
||||
if installer is not None:
|
||||
installer.shutdown()
|
||||
|
||||
def post_mortem(self):
|
||||
self.log.info("re-attaching to tty")
|
||||
fdno = os.open("/dev/console", os.O_RDWR)
|
||||
os.dup2(fdno, sys.stdin.fileno())
|
||||
os.dup2(fdno, sys.stdout.fileno())
|
||||
os.dup2(fdno, sys.stderr.fileno())
|
||||
os.close(fdno)
|
||||
|
||||
self.log.info("entering Python debugger (installer_debug=1)")
|
||||
import pdb
|
||||
pdb.post_mortem(sys.exc_info()[2])
|
||||
|
||||
@classmethod
|
||||
def main(cls):
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger("install")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
# send to ONIE log
|
||||
hnd = logging.FileHandler("/dev/console")
|
||||
logger.addHandler(hnd)
|
||||
logger.propagate = False
|
||||
|
||||
onie_verbose = 'onie_verbose' in os.environ
|
||||
installer_debug = 'installer_debug' in os.environ
|
||||
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument('-v', '--verbose', action='store_true',
|
||||
default=onie_verbose,
|
||||
help="Enable verbose logging")
|
||||
ap.add_argument('-D', '--debug', action='store_true',
|
||||
default=installer_debug,
|
||||
help="Enable python debugging")
|
||||
ap.add_argument('-U', '--url', type=str,
|
||||
help="Install from a remote URL")
|
||||
ap.add_argument('-F', '--force', action='store_true',
|
||||
help="Unmount filesystems before install")
|
||||
ops = ap.parse_args()
|
||||
|
||||
if ops.verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
app = cls(url=ops.url, force=ops.force,
|
||||
log=logger)
|
||||
try:
|
||||
code = app.run()
|
||||
except:
|
||||
logger.exception("runner failed")
|
||||
code = 1
|
||||
if ops.debug:
|
||||
app.post_mortem()
|
||||
|
||||
app.shutdown()
|
||||
sys.exit(code)
|
||||
|
||||
main = App.main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,817 @@
|
||||
"""BaseInstall.py
|
||||
|
||||
Base classes for installers.
|
||||
"""
|
||||
|
||||
import os, stat
|
||||
import subprocess
|
||||
import re
|
||||
import tempfile
|
||||
import logging
|
||||
import StringIO
|
||||
import parted
|
||||
import yaml
|
||||
import zipfile
|
||||
import shutil
|
||||
|
||||
from InstallUtils import SubprocessMixin
|
||||
from InstallUtils import MountContext, BlkidParser, PartedParser
|
||||
from InstallUtils import ProcMountsParser
|
||||
|
||||
import onl.YamlUtils
|
||||
|
||||
class Base:
|
||||
|
||||
class installmeta:
|
||||
|
||||
grub = False
|
||||
uboot = False
|
||||
|
||||
def __init__(self,
|
||||
installerConf=None,
|
||||
machineConf=None,
|
||||
platformConf=None,
|
||||
grubEnv=None, ubootEnv=None):
|
||||
self.installerConf = installerConf
|
||||
self.machineConf = machineConf
|
||||
self.platformConf = platformConf
|
||||
self.grubEnv = grubEnv
|
||||
self.ubootEnv = ubootEnv
|
||||
|
||||
def isOnie(self):
|
||||
if self.machineConf is None: return False
|
||||
plat = getattr(self.machineConf, 'onie_platform', None)
|
||||
return plat is not None
|
||||
|
||||
def __init__(self,
|
||||
machineConf=None, installerConf=None, platformConf=None,
|
||||
grubEnv=None, ubootEnv=None,
|
||||
force=False,
|
||||
log=None):
|
||||
self.im = self.installmeta(installerConf=installerConf,
|
||||
machineConf=machineConf,
|
||||
platformConf=platformConf,
|
||||
grubEnv=grubEnv,
|
||||
ubootEnv = ubootEnv)
|
||||
self.log = log or logging.getLogger(self.__class__.__name__)
|
||||
|
||||
self.force = force
|
||||
# unmount filesystems as needed
|
||||
|
||||
self.device = None
|
||||
# target device, initialize this later
|
||||
|
||||
self.minpart = None
|
||||
self.nextBlock = None
|
||||
# keep track of next partition/next block
|
||||
|
||||
self.blkidParts = []
|
||||
# current scan of partitions and labels
|
||||
|
||||
self.partedDevice = None
|
||||
self.partedDisk = None
|
||||
# parted state
|
||||
|
||||
self.configArchive = None
|
||||
# backup of ONL-CONFIG during re-partitioning
|
||||
|
||||
self.zf = None
|
||||
# zipfile handle to installer archive
|
||||
|
||||
def run(self):
|
||||
self.log.error("not implemented")
|
||||
return 1
|
||||
|
||||
def shutdown(self):
|
||||
zf, self.zf = self.zf, None
|
||||
if zf: zf.close()
|
||||
|
||||
def installerCopy(self, basename, dst, optional=False):
|
||||
"""Copy the file as-is, or get it from the installer zip."""
|
||||
|
||||
src = os.path.join(self.im.installerConf.installer_dir, basename)
|
||||
if os.path.exists(src):
|
||||
self.copy2(src, dst)
|
||||
return
|
||||
|
||||
if basename in self.zf.namelist():
|
||||
self.log.debug("+ unzip -p %s %s > %s",
|
||||
self.im.installerConf.installer_zip, basename, dst)
|
||||
with self.zf.open(basename, "r") as rfd:
|
||||
with open(dst, "wb") as wfd:
|
||||
shutil.copyfileobj(rfd, wfd)
|
||||
return
|
||||
|
||||
if not optional:
|
||||
raise ValueError("missing installer file %s" % basename)
|
||||
|
||||
def installerDd(self, basename, device):
|
||||
|
||||
p = os.path.join(self.im.installerConf.installer_dir, basename)
|
||||
if os.path.exists(p):
|
||||
cmd = ('dd',
|
||||
'if=' + basename,
|
||||
'of=' + device,)
|
||||
self.check_call(cmd, vmode=self.V2)
|
||||
return
|
||||
|
||||
if basename in self.zf.namelist():
|
||||
self.log.debug("+ unzip -p %s %s | dd of=%s",
|
||||
self.im.installerConf.installer_zip, basename, device)
|
||||
with self.zf.open(basename, "r") as rfd:
|
||||
with open(device, "rb+") as wfd:
|
||||
shutil.copyfileobj(rfd, wfd)
|
||||
return
|
||||
|
||||
raise ValueError("cannot find file %s" % basename)
|
||||
|
||||
def installerExists(self, basename):
|
||||
if basename in os.listdir(self.im.installerConf.installer_dir): return True
|
||||
if basename in self.zf.namelist(): return True
|
||||
return False
|
||||
|
||||
def installSwi(self):
|
||||
|
||||
files = os.listdir(self.im.installerConf.installer_dir) + self.zf.namelist()
|
||||
swis = [x for x in files if x.endswith('.swi')]
|
||||
if not swis:
|
||||
self.log.info("No ONL Software Image available for installation.")
|
||||
self.log.info("Post-install ZTN installation will be required.")
|
||||
return
|
||||
if len(swis) > 1:
|
||||
self.log.warn("Multiple SWIs found in installer: %s", " ".join(swis))
|
||||
return
|
||||
|
||||
base = swis[0]
|
||||
|
||||
self.log.info("Installing ONL Software Image (%s)...", base)
|
||||
dev = self.blkidParts['ONL-IMAGES']
|
||||
with MountContext(dev.device, log=self.log) as ctx:
|
||||
dst = os.path.join(ctx.dir, base)
|
||||
self.installerCopy(base, dst)
|
||||
|
||||
return 0
|
||||
|
||||
def backupConfig(self, dev):
|
||||
"""Back up the ONL-CONFIG partition for later restore."""
|
||||
self.configArchive = tempfile.mktemp(prefix="onl-config-",
|
||||
suffix=".tar.gz")
|
||||
self.log.info("backing up ONL-CONFIG partition %s to %s",
|
||||
dev, self.configArchive)
|
||||
with MountContext(dev, log=self.log) as ctx:
|
||||
self.log.debug("+ tar -zcf %s -C %s .",
|
||||
self.configArchive, ctx.dir)
|
||||
pipe = subprocess.Popen(["tar", "-zcf", self.configArchive, ".",],
|
||||
cwd=ctx.dir)
|
||||
pipe.communicate()
|
||||
code = pipe.wait()
|
||||
if code:
|
||||
raise SystemExit("backup of ONL-CONFIG failed")
|
||||
|
||||
def restoreConfig(self, dev):
|
||||
"""Restore the saved ONL-CONFIG."""
|
||||
archive, self.configArchive = self.configArchive, None
|
||||
self.log.info("restoring ONL-CONFIG archive %s to %s",
|
||||
archive, dev)
|
||||
with MountContext(dev, log=self.log) as ctx:
|
||||
self.log.debug("+ tar -zxf %s -C %s",
|
||||
archive, ctx.dir)
|
||||
pipe = subprocess.Popen(["tar", "-zxf", archive,],
|
||||
cwd=ctx.dir)
|
||||
pipe.communicate()
|
||||
code = pipe.wait()
|
||||
if code:
|
||||
raise SystemExit("backup of ONL-CONFIG failed")
|
||||
self.unlink(archive)
|
||||
|
||||
def deletePartitions(self):
|
||||
|
||||
nextBlock = -1
|
||||
dirty = False
|
||||
for part in self.partedDisk.partitions:
|
||||
self.log.info("examining %s part %d",
|
||||
self.partedDisk.device.path, part.number)
|
||||
if part.number < self.minpart:
|
||||
self.log.info("skip this part")
|
||||
nextBlock = max(part.geometry.start+part.geometry.length,
|
||||
nextBlock)
|
||||
else:
|
||||
self.log.info("deleting this part")
|
||||
self.partedDisk.removePartition(part)
|
||||
dirty = True
|
||||
|
||||
if dirty:
|
||||
self.partedDisk.commit()
|
||||
self.check_call(('partprobe', self.device,))
|
||||
|
||||
if nextBlock > -1:
|
||||
self.nextBlock = nextBlock
|
||||
else:
|
||||
self.log.warn("no partitions, starting at block 1")
|
||||
|
||||
return 0
|
||||
|
||||
def partitionParted(self):
|
||||
"""Build partitions according to the partition spec.
|
||||
|
||||
XXX roth -- hopefully the GPT labels specified here
|
||||
work correctly (that is, are ignored) on an msdos label
|
||||
"""
|
||||
|
||||
constraint = self.partedDevice.optimalAlignedConstraint
|
||||
# default partition layout constraint
|
||||
|
||||
devices = {}
|
||||
|
||||
def _u2s(sz, u):
|
||||
bsz = sz * u
|
||||
bsz = bsz + self.partedDevice.physicalSectorSize - 1
|
||||
return bsz / self.partedDevice.physicalSectorSize
|
||||
|
||||
UNITS = {
|
||||
'GiB' : 1024 * 1024 * 1024,
|
||||
'G' : 1000 * 1000 * 1000,
|
||||
'MiB' : 1024 * 1024,
|
||||
'M' : 1000 * 1000,
|
||||
'KiB' : 1024,
|
||||
'K' : 1000,
|
||||
}
|
||||
|
||||
for part in self.im.platformConf['installer']:
|
||||
|
||||
label, partData = list(part.items())[0]
|
||||
if type(partData) == dict:
|
||||
sz, fmt = partData['='], partData.get('format', 'ext4')
|
||||
else:
|
||||
sz, fmt = partData, 'ext4'
|
||||
|
||||
cnt = None
|
||||
nextBlock = self.nextBlock or 1
|
||||
minpart = self.minpart or 1
|
||||
for ul, ub in UNITS.items():
|
||||
if sz.endswith(ul):
|
||||
cnt = _u2s(int(sz[:-len(ul)], 10), ub)
|
||||
break
|
||||
if sz == '100%':
|
||||
cnt = self.partedDevice.getLength() - nextBlock
|
||||
if cnt is None:
|
||||
self.log.error("invalid size (no units) for %s: %s",
|
||||
part, sz)
|
||||
return 1
|
||||
|
||||
start = nextBlock
|
||||
end = start + cnt - 1
|
||||
if end <= self.partedDevice.getLength():
|
||||
self.log.info("Allocating %d sectors for %s",
|
||||
cnt, label)
|
||||
else:
|
||||
self.log.warn("%s: start sector %d, end sector %d, max %d",
|
||||
label, start, end,
|
||||
self.partedDevice.getLength())
|
||||
self.log.error("invalid partition %s [%s] (too big)",
|
||||
label, sz)
|
||||
return 1
|
||||
|
||||
geom = parted.Geometry(device=self.partedDevice,
|
||||
start=start, length=end-start+1)
|
||||
fs = parted.FileSystem(type=fmt, geometry=geom)
|
||||
part = parted.Partition(disk=self.partedDisk,
|
||||
type=parted.PARTITION_NORMAL,
|
||||
fs=fs,
|
||||
geometry=geom)
|
||||
if self.partedDisk.type == 'gpt':
|
||||
part.getPedPartition().set_name(label)
|
||||
self.partedDisk.addPartition(part, constraint=constraint)
|
||||
self.partedDisk.commit()
|
||||
self.check_call(('partprobe', self.device,))
|
||||
|
||||
if fmt == 'raw':
|
||||
self.log.info("Leaving %s (%s) unformatted (raw)",
|
||||
part.path, label)
|
||||
else:
|
||||
self.log.info("Formatting %s (%s) as %s",
|
||||
part.path, label, fmt)
|
||||
if fmt == 'msdos':
|
||||
self.mkdosfs(part.path, label=label)
|
||||
elif fmt == 'ext4':
|
||||
self.mke4fs(part.path, label=label, huge_file=False)
|
||||
elif fmt == 'ext2':
|
||||
self.mke2fs(part.path, label=label)
|
||||
else:
|
||||
self.mkfs(part.path, fstype=fmt)
|
||||
|
||||
self.nextBlock, self.minpart = end+1, minpart+1
|
||||
|
||||
devices[label] = part.path
|
||||
|
||||
if label == 'ONL-CONFIG' and self.configArchive is not None:
|
||||
self.restoreConfig(part.path)
|
||||
|
||||
self.blkidParts = BlkidParser(log=self.log.getChild("blkid"))
|
||||
# re-read the partitions
|
||||
|
||||
return 0
|
||||
|
||||
def installBootConfig(self):
|
||||
|
||||
try:
|
||||
dev = self.blkidParts['ONL-BOOT']
|
||||
except IndexError as ex:
|
||||
self.log.warn("cannot find ONL-BOOT partition (maybe raw?) : %s", str(ex))
|
||||
return 1
|
||||
|
||||
self.log.info("Installing boot-config to %s", dev.device)
|
||||
|
||||
basename = 'boot-config'
|
||||
with MountContext(dev.device, log=self.log) as ctx:
|
||||
dst = os.path.join(ctx.dir, basename)
|
||||
self.installerCopy(basename, dst)
|
||||
with open(dst) as fd:
|
||||
buf = fd.read()
|
||||
|
||||
ecf = buf.encode('base64', 'strict').strip()
|
||||
if self.im.grub and self.im.grubEnv is not None:
|
||||
setattr(self.im.grubEnv, 'boot_config_default', ecf)
|
||||
if self.im.uboot and self.im.ubootEnv is not None:
|
||||
setattr(self.im.ubootEnv, 'boot-config-default', ecf)
|
||||
|
||||
return 0
|
||||
|
||||
def assertUnmounted(self):
|
||||
"""Make sure the install device does not have any active mounts."""
|
||||
pm = ProcMountsParser()
|
||||
for m in pm.mounts:
|
||||
if m.device.startswith(self.device):
|
||||
if not self.force:
|
||||
self.log.error("mount %s on %s will be erased by install",
|
||||
m.dir, m.device)
|
||||
return 1
|
||||
else:
|
||||
self.log.warn("unmounting %s from %s (--force)",
|
||||
m.dir, m.device)
|
||||
try:
|
||||
self.check_call(('umount', m.dir,))
|
||||
except subprocess.CalledProcessError:
|
||||
self.log.error("cannot unmount")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
GRUB_TPL = """\
|
||||
#serial --port=0x3f8 --speed=115200 --word=8 --parity=no --stop=1
|
||||
serial %(serial)s
|
||||
terminal_input serial
|
||||
terminal_output serial
|
||||
set timeout=5
|
||||
|
||||
menuentry OpenNetworkLinux {
|
||||
search --no-floppy --label --set=root ONL-BOOT
|
||||
echo 'Loading Open Network Linux ...'
|
||||
insmod gzio
|
||||
insmod part_msdos
|
||||
#linux /kernel-3.9.6-x86-64-all nopat console=ttyS0,115200n8 onl_platform=x86-64-kvm-x86-64-r0
|
||||
linux /%(kernel)s %(args)s onl_platform=%(platform)s
|
||||
initrd /%(initrd)s
|
||||
}
|
||||
|
||||
# Menu entry to chainload ONIE
|
||||
menuentry ONIE {
|
||||
search --no-floppy --label --set=root ONIE-BOOT
|
||||
echo 'Loading ONIE ...'
|
||||
chainloader +1
|
||||
}
|
||||
"""
|
||||
|
||||
class GrubInstaller(SubprocessMixin, Base):
|
||||
"""Installer for grub-based systems (x86)."""
|
||||
|
||||
class installmeta(Base.installmeta):
|
||||
grub = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Base.__init__(self, *args, **kwargs)
|
||||
|
||||
def findGpt(self):
|
||||
self.blkidParts = BlkidParser(log=self.log.getChild("blkid"))
|
||||
|
||||
deviceOrLabel = self.im.platformConf['grub']['device']
|
||||
if deviceOrLabel.startswith('/dev'):
|
||||
tgtDevice, tgtLabel = deviceOrLabel, None
|
||||
else:
|
||||
tgtDevice, tgtLabel = None, deviceOrLabel
|
||||
|
||||
# enumerate labeled partitions to try to identify
|
||||
# the boot device
|
||||
for part in self.blkidParts:
|
||||
dev, partno = part.splitDev()
|
||||
if tgtLabel is not None and tgtLabel == part.label:
|
||||
if not len(partno):
|
||||
self.log.error("cannot use whole disk")
|
||||
return 1
|
||||
if self.device is None:
|
||||
self.device = dev
|
||||
else:
|
||||
self.log.error("found multiple devices: %s, %s",
|
||||
dev, self.device)
|
||||
return 1
|
||||
elif tgtDevice is not None and tgtDevice == dev:
|
||||
if not len(partno):
|
||||
self.log.error("cannot use whole disk")
|
||||
return 1
|
||||
if self.device is None:
|
||||
self.device = dev
|
||||
else:
|
||||
self.log.error("found multiple devices: %s, %s",
|
||||
dev, self.device)
|
||||
return 1
|
||||
if self.device is None:
|
||||
self.log.error("cannot find an install device")
|
||||
return 1
|
||||
|
||||
code = self.assertUnmounted()
|
||||
if code: return code
|
||||
|
||||
# optionally back up a config partition
|
||||
# if it's on the boot device
|
||||
for part in self.blkidParts:
|
||||
dev, partno = part.splitDev()
|
||||
if dev == self.device and part.label == 'ONL-CONFIG':
|
||||
self.backupConfig(part.device)
|
||||
|
||||
self.partedDevice = parted.getDevice(self.device)
|
||||
self.partedDisk = parted.newDisk(self.partedDevice)
|
||||
|
||||
# enumerate the partitions that will stay and go
|
||||
minpart = -1
|
||||
for part in self.partedDisk.partitions:
|
||||
|
||||
if part.getFlag(parted.PARTITION_HIDDEN):
|
||||
minpart = max(minpart, part.number+1)
|
||||
continue
|
||||
|
||||
# else, the partition should exist
|
||||
blkidParts = [x for x in self.blkidParts if x.device == part.path]
|
||||
if not blkidParts:
|
||||
self.log.warn("cannot identify partition %s", part)
|
||||
continue
|
||||
|
||||
blkidPart = blkidParts[0]
|
||||
if not blkidPart.isOnieReserved(): continue
|
||||
|
||||
# else, check the GPT label for reserved-ness
|
||||
if (part.name
|
||||
and ('GRUB' in part.name
|
||||
or 'ONIE-BOOT' in part.name
|
||||
or 'DIAG' in part.name)):
|
||||
minpart = max(minpart, part.number+1)
|
||||
|
||||
if minpart < 0:
|
||||
self.log.error("cannot find an install partition")
|
||||
return 1
|
||||
self.minpart = minpart
|
||||
|
||||
return 0
|
||||
|
||||
def installLoader(self):
|
||||
|
||||
ctx = {}
|
||||
|
||||
kernel = self.im.platformConf['grub']['kernel']
|
||||
ctx['kernel'] = kernel['='] if type(kernel) == dict else kernel
|
||||
|
||||
initrd = self.im.platformConf['grub']['initrd']
|
||||
ctx['initrd'] = initrd['='] if type(initrd) == dict else initrd
|
||||
|
||||
ctx['args'] = self.im.platformConf['grub']['args']
|
||||
ctx['platform'] = self.im.installerConf.installer_platform
|
||||
ctx['serial'] = self.im.platformConf['grub']['serial']
|
||||
|
||||
cf = GRUB_TPL % ctx
|
||||
|
||||
self.log.info("Installing kernel")
|
||||
dev = self.blkidParts['ONL-BOOT']
|
||||
|
||||
files = set(os.listdir(self.im.installerConf.installer_dir) + self.zf.namelist())
|
||||
files = [b for b in files if b.startswith('kernel-') or b.startswith('onl-loader-initrd-')]
|
||||
|
||||
with MountContext(dev.device, log=self.log) as ctx:
|
||||
def _cp(b):
|
||||
dst = os.path.join(ctx.dir, b)
|
||||
self.installerCopy(b, dst, optional=True)
|
||||
[_cp(e) for e in files]
|
||||
|
||||
d = os.path.join(ctx.dir, "grub")
|
||||
self.makedirs(d)
|
||||
dst = os.path.join(ctx.dir, 'grub/grub.cfg')
|
||||
with open(dst, "w") as fd:
|
||||
fd.write(cf)
|
||||
|
||||
return 0
|
||||
|
||||
def installGrub(self):
|
||||
self.log.info("Installing GRUB to %s", self.partedDevice.path)
|
||||
self.im.grubEnv.install(self.partedDevice.path)
|
||||
return 0
|
||||
|
||||
def installGpt(self):
|
||||
|
||||
code = self.findGpt()
|
||||
if code: return code
|
||||
|
||||
self.log.info("Installing to %s starting at partition %d",
|
||||
self.device, self.minpart)
|
||||
|
||||
self.log.info("disk is %s", self.partedDevice.path)
|
||||
|
||||
if self.partedDisk.type != 'gpt':
|
||||
self.log.error("not a GPT partition table")
|
||||
return 1
|
||||
if self.partedDevice.sectorSize != 512:
|
||||
self.log.error("invalid logical block size")
|
||||
return 1
|
||||
if self.partedDevice.physicalSectorSize != 512:
|
||||
self.log.error("invalid physical block size")
|
||||
return 1
|
||||
|
||||
self.log.info("found a disk with %d blocks",
|
||||
self.partedDevice.getLength())
|
||||
|
||||
code = self.deletePartitions()
|
||||
if code: return code
|
||||
|
||||
self.log.info("next usable block is %s", self.nextBlock)
|
||||
|
||||
code = self.partitionParted()
|
||||
if code: return code
|
||||
|
||||
# once we assign the ONL-BOOT partition,
|
||||
# we can re-target the grub environment
|
||||
dev = self.blkidParts['ONL-BOOT']
|
||||
self.im.grubEnv.__dict__['bootPart'] = dev.device
|
||||
self.im.grubEnv.__dict__['bootDir'] = None
|
||||
|
||||
# get a handle to the installer zip
|
||||
p = os.path.join(self.im.installerConf.installer_dir,
|
||||
self.im.installerConf.installer_zip)
|
||||
self.zf = zipfile.ZipFile(p)
|
||||
|
||||
code = self.installSwi()
|
||||
if code: return code
|
||||
|
||||
code = self.installLoader()
|
||||
if code: return code
|
||||
|
||||
code = self.installBootConfig()
|
||||
if code: return code
|
||||
|
||||
code = self.installGrub()
|
||||
if code: return code
|
||||
|
||||
self.log.info("ONL loader install successful.")
|
||||
self.log.info("GRUB installation is required next.")
|
||||
|
||||
return 0
|
||||
|
||||
def run(self):
|
||||
if 'grub' not in self.im.platformConf:
|
||||
self.log.error("platform config is missing a GRUB section")
|
||||
return 1
|
||||
label = self.im.platformConf['grub'].get('label', None)
|
||||
if label != 'gpt':
|
||||
self.log.error("invalid GRUB label in platform config: %s", label)
|
||||
return 1
|
||||
return self.installGpt()
|
||||
|
||||
def shutdown(self):
|
||||
Base.shutdown(self)
|
||||
|
||||
class UbootInstaller(SubprocessMixin, Base):
|
||||
|
||||
class installmeta(Base.installmeta):
|
||||
|
||||
uboot = True
|
||||
|
||||
def getDevice(self):
|
||||
loader = self.platformConf.get('loader', {})
|
||||
dev = loader.get('device', None)
|
||||
return dev
|
||||
|
||||
def str_bootcmd(self):
|
||||
cmds = []
|
||||
cmds.append("setenv onl_loadaddr 0x%x"
|
||||
% self.platformConf['loader']['loadaddr'])
|
||||
cmds.append("setenv onl_platform %s"
|
||||
% self.installerConf.installer_platform)
|
||||
itb = self.platformConf['flat_image_tree']['itb']
|
||||
if type(itb) == dict: itb = itb['=']
|
||||
cmds.append("setenv onl_itb %s" % itb)
|
||||
for item in self.platformConf['loader']['setenv']:
|
||||
k, v = list(item.items())[0]
|
||||
cmds.append("setenv %s %s" % (k, v,))
|
||||
cmds.extend(self.platformConf['loader']['nos_bootcmds'])
|
||||
return "; ".join(cmds)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Base.__init__(self, *args, **kwargs)
|
||||
|
||||
self.device = self.im.getDevice()
|
||||
|
||||
code = self.assertUnmounted()
|
||||
if code: return code
|
||||
|
||||
self.rawLoaderDevice = None
|
||||
# set to a partition device for raw loader install,
|
||||
# default to None for FS-based install
|
||||
|
||||
def maybeCreateLabel(self):
|
||||
"""Set up an msdos label."""
|
||||
|
||||
self.partedDevice = parted.getDevice(self.device)
|
||||
try:
|
||||
self.partedDisk = parted.newDisk(self.partedDevice)
|
||||
if self.partedDisk.type == 'msdos':
|
||||
self.log.info("disk %s is already msdos", self.device)
|
||||
return 0
|
||||
self.log.warn("disk %s has wrong label %s",
|
||||
self.device, self.partedDisk.type)
|
||||
except parted._ped.PartedException as ex:
|
||||
self.log.error("cannot get partition table from %s: %s",
|
||||
self.device, str(ex))
|
||||
|
||||
self.log.info("creating msdos label on %s")
|
||||
self.partedDisk = parted.freshDisk(self.partedDevice, 'msdos')
|
||||
|
||||
return 0
|
||||
|
||||
def findMsdos(self):
|
||||
"""Backup any existing data.
|
||||
|
||||
The GPT version of this function is more tricky since it needs
|
||||
to save some of the partitions. Here with and msdos label that
|
||||
is on a different block device from u-boot or ONIE, we don't
|
||||
really care.
|
||||
"""
|
||||
|
||||
# optionally back up a config partition
|
||||
# if it's on the boot device
|
||||
for part in self.blkidParts:
|
||||
dev, partno = part.splitDev()
|
||||
if dev == self.device and part.label == 'ONL-CONFIG':
|
||||
self.backupConfig(part.device)
|
||||
|
||||
self.minPart = -1
|
||||
# default, delete all partitions
|
||||
# XXX roth -- tweak this if we intent to save e.g.
|
||||
# a diag partition from the vendor
|
||||
|
||||
return 0
|
||||
|
||||
def installLoader(self):
|
||||
|
||||
c1 = self.im.platformConf['flat_image_tree'].get('itb', None)
|
||||
if type(c1) == dict: c1 = c1.get('=', None)
|
||||
c2 = ("%s.itb"
|
||||
% (self.im.installerConf.installer_platform,))
|
||||
c3 = "onl-loader-fit.itb"
|
||||
|
||||
loaderBasename = None
|
||||
for c in (c1, c2, c3):
|
||||
if c is None: continue
|
||||
if self.installerExists(c):
|
||||
loaderBasename = c
|
||||
break
|
||||
|
||||
if not loaderBasename:
|
||||
self.log.error("The platform loader file is missing.")
|
||||
return 1
|
||||
|
||||
self.log.info("Installing the ONL loader from %s...", loaderBasename)
|
||||
|
||||
if self.rawLoaderDevice is not None:
|
||||
self.log.info("Installing ONL loader %s --> %s...",
|
||||
loaderBasename, self.rawLoaderDevice)
|
||||
self.installerDd(loaderBasename, self.rawLoaderDevice)
|
||||
return 0
|
||||
|
||||
dev = self.blkidParts['ONL-BOOT']
|
||||
self.log.info("Installing ONL loader %s --> %s:%s...",
|
||||
loaderBasename, dev.device, loaderBasename)
|
||||
with MountContext(dev.device, log=self.log) as ctx:
|
||||
dst = os.path.join(ctx.dir, loaderBasename)
|
||||
self.installerCopy(loaderBasename, dst)
|
||||
|
||||
return 0
|
||||
|
||||
def installUbootEnv(self):
|
||||
|
||||
# Special access instructions for initrd
|
||||
off = getattr(self.im.installerConf, 'initrd_offset', None)
|
||||
if off is not None:
|
||||
if self.rawLoaderDevice is not None:
|
||||
a = self.rawLoaderDevice
|
||||
else:
|
||||
a = self.im.installerConf.initrd_archive
|
||||
s = int(self.im.installerConf.initrd_offset)
|
||||
e = s + int(self.im.installerConf.initrd_size) - 1
|
||||
self.im.ubootEnv.onl_installer_initrd = ("%s:%x:%x" % (a, s, e,))
|
||||
else:
|
||||
try:
|
||||
del self.im.installerConf.onl_installer_initrd
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if self.im.isOnie():
|
||||
self.log.info("Setting ONIE nos_bootcmd to boot ONL")
|
||||
self.im.ubootEnv.nos_bootcmd = self.im.str_bootcmd()
|
||||
else:
|
||||
self.log.warn("U-boot boot setting is not changed")
|
||||
|
||||
return 0
|
||||
|
||||
def installUboot(self):
|
||||
|
||||
if self.device is None:
|
||||
self.log.error("missing block device YAML config")
|
||||
return 1
|
||||
st = os.stat(self.device)
|
||||
if not stat.S_ISBLK(st[stat.ST_MODE]):
|
||||
self.log.error("not a block device: %s", self.device)
|
||||
return 1
|
||||
|
||||
code = self.maybeCreateLabel()
|
||||
if code: return code
|
||||
|
||||
self.log.info("Installing to %s", self.device)
|
||||
|
||||
if self.partedDisk.type != 'msdos':
|
||||
self.log.error("not an MSDOS partition table")
|
||||
return 1
|
||||
if self.partedDevice.sectorSize != 512:
|
||||
self.log.error("invalid logical block size")
|
||||
return 1
|
||||
if self.partedDevice.physicalSectorSize != 512:
|
||||
self.log.error("invalid physical block size")
|
||||
return 1
|
||||
|
||||
self.log.info("found a disk with %d blocks",
|
||||
self.partedDevice.getLength())
|
||||
|
||||
code = self.findMsdos()
|
||||
if code: return code
|
||||
|
||||
code = self.deletePartitions()
|
||||
if code: return code
|
||||
|
||||
self.log.info("next usable block is %s", self.nextBlock)
|
||||
|
||||
code = self.partitionParted()
|
||||
if code: return code
|
||||
|
||||
# compute the path to the raw loader partition,
|
||||
# if indicated by the configuration
|
||||
|
||||
self.rawLoaderDevice = None
|
||||
for item in self.im.platformConf['installer']:
|
||||
partIdx, partData = list(item.items())[0]
|
||||
label, part = list(partData.items())[0]
|
||||
if label == 'ONL-BOOT' and part['format'] == 'raw':
|
||||
self.rawLoaderDevice = self.device + str(partIdx+1)
|
||||
break
|
||||
|
||||
# get a handle to the installer zip
|
||||
p = os.path.join(self.im.installerConf.installer_dir,
|
||||
self.im.installerConf.installer_zip)
|
||||
self.zf = zipfile.ZipFile(p)
|
||||
|
||||
code = self.installSwi()
|
||||
if code: return code
|
||||
|
||||
code = self.installLoader()
|
||||
if code: return code
|
||||
|
||||
if self.rawLoaderDevice is None:
|
||||
code = self.installBootConfig()
|
||||
if code: return code
|
||||
else:
|
||||
self.log.info("ONL-BOOT is a raw partition (%s), skipping boot-config",
|
||||
self.rawLoaderDevice)
|
||||
|
||||
self.log.info("syncing block devices")
|
||||
self.check_call(('sync',))
|
||||
# XXX roth probably not needed
|
||||
|
||||
code = self.installUbootEnv()
|
||||
if code: return code
|
||||
|
||||
return 0
|
||||
|
||||
def run(self):
|
||||
|
||||
if 'flat_image_tree' not in self.im.platformConf:
|
||||
self.log.error("platform config is missing a FIT section")
|
||||
return 1
|
||||
|
||||
return self.installUboot()
|
||||
|
||||
def shutdown(self):
|
||||
Base.shutdown(self)
|
||||
@@ -0,0 +1,240 @@
|
||||
"""BaseRecovery.py
|
||||
|
||||
Base classes for recovery.
|
||||
"""
|
||||
|
||||
import subprocess, os, stat
|
||||
import tempfile
|
||||
import binascii
|
||||
import glob
|
||||
import logging
|
||||
from InstallUtils import TempdirContext, MountContext, SubprocessMixin, ProcMountsParser
|
||||
from InstallUtils import InitrdContext, BlkidParser
|
||||
from ConfUtils import ChrootGrubEnv
|
||||
|
||||
class Base(SubprocessMixin):
|
||||
|
||||
class recovermeta:
|
||||
|
||||
bootConfig = "/mnt/flash/boot-config"
|
||||
bootConfigDfl = "/etc/boot-config.default"
|
||||
|
||||
@property
|
||||
def needRecovery(self):
|
||||
if os.path.exists('/mnt/flash/.notmounted'): return True
|
||||
if os.path.exists('/mnt/flash2/.notmounted'): return True
|
||||
return False
|
||||
|
||||
def __init__(self,
|
||||
ubootEnv=None,
|
||||
log=None):
|
||||
self.platform = self.recovermeta()
|
||||
self.ubootEnv = ubootEnv
|
||||
self.log = log or logging.getLogger(self.__class__.__name__)
|
||||
|
||||
def recoverFull(self):
|
||||
self.log.error("not implemented")
|
||||
return 1
|
||||
|
||||
def recoverConfig(self):
|
||||
if os.path.exists(self.platform.bootConfig): return 0
|
||||
self.copy2(self.platform.bootConfigDfl, self.platform.bootConfig)
|
||||
return 0
|
||||
|
||||
def run(self):
|
||||
|
||||
if self.platform.needRecovery:
|
||||
self.log.info("Attempting recovery")
|
||||
code = self.recoverFull()
|
||||
if code: return code
|
||||
|
||||
code = self.recoverConfig()
|
||||
if code: return code
|
||||
|
||||
return 0
|
||||
|
||||
def umountAny(self, device=None, label=None):
|
||||
p = ProcMountsParser()
|
||||
if label is not None:
|
||||
b = BlkidParser(log=self.log)
|
||||
for e in b.parts:
|
||||
if label == e.label:
|
||||
device = e.device
|
||||
break
|
||||
|
||||
for m in p.mounts:
|
||||
if device is not None and device in m.device:
|
||||
try:
|
||||
self.check_call(('umount', m.device,),
|
||||
vmode=self.V1)
|
||||
except CalledProcessError, what:
|
||||
self.log.warn("cannot umount %s: %s",
|
||||
m.device, str(what))
|
||||
return 0
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
class GrubRecovery(Base):
|
||||
|
||||
class recovermeta(Base.recovermeta):
|
||||
pass
|
||||
|
||||
def recoverX86(self):
|
||||
|
||||
def _u(l):
|
||||
self.umountAny(label=l)
|
||||
def _l(l):
|
||||
try:
|
||||
return self.check_output(('blkid', '-L', l,)).strip()
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
def _r(l):
|
||||
_u(l)
|
||||
dev = _l(l)
|
||||
if dev is not None:
|
||||
self.log.info("Recovering %s partition", l)
|
||||
self.check_call(('mkdosfs', '-n', l, dev,),
|
||||
vmode=self.V1)
|
||||
|
||||
_r('FLASH')
|
||||
_r('FLASH2')
|
||||
|
||||
return 0
|
||||
|
||||
def recoverGrubConfig(self):
|
||||
|
||||
with MountContext(label='ONIE-BOOT', log=self.log) as octx:
|
||||
|
||||
pat = "%s/onie/initrd.img*" % octx.dir
|
||||
l = glob.glob(pat)
|
||||
if not l:
|
||||
raise ValueError("cannot find ONIE initrd")
|
||||
initrd = l[0]
|
||||
|
||||
with InitrdContext(initrd=initrd, log=self.log) as ictx:
|
||||
|
||||
# copy the Switch Light grubenv out of its GRUB directory
|
||||
dst = os.path.join(ictx.dir, "tmp/grubenv")
|
||||
with MountContext(label='SL-BOOT', log=self.log) as sctx:
|
||||
src = os.path.join(sctx.dir, "grub/grubenv")
|
||||
self.copy2(src, dst)
|
||||
|
||||
# use the ONIE runtime's GRUB tools to read it
|
||||
grubEnv = ChrootGrubEnv(ictx.dir, mounted=True,
|
||||
bootDir="/",
|
||||
path="/tmp/grubenv",
|
||||
log=self.log)
|
||||
buf = getattr(grubEnv, 'boot_config_default', None)
|
||||
|
||||
if buf is None:
|
||||
raise ValueError("Cannot recover filesystem(s) -- missing boot_config_default.")
|
||||
if buf == "":
|
||||
raise ValueError("Cannot recover filesystem(s) -- empty boot_config_default.")
|
||||
try:
|
||||
buf = buf.decode('base64', 'strict')
|
||||
except binascii.Error:
|
||||
raise ValueError("Cannot recover filesystem(s) -- corrupted boot_config_default.")
|
||||
if "SWI=flash" in buf:
|
||||
raise ValueError("Cannot recover filesystem(s) -- local SWI cannot be recovered.")
|
||||
|
||||
with MountContext(label='FLASH', log=self.log) as ctx:
|
||||
dst = os.path.join(ctx.dir, 'boot-config')
|
||||
with open(dst, "w") as fd:
|
||||
self.log.debug("+ cat > %s", dst)
|
||||
fd.write(buf)
|
||||
|
||||
return 0
|
||||
|
||||
def recoverFull(self):
|
||||
self.log.info("Recovering flash partitions.")
|
||||
|
||||
code = self.recoverX86()
|
||||
if code: return code
|
||||
|
||||
code = self.recoverGrubConfig()
|
||||
if code: return code
|
||||
|
||||
self.check_call(('initmounts',))
|
||||
|
||||
return 0
|
||||
|
||||
class UbootRecovery(Base):
|
||||
|
||||
class recovermeta(Base.recovermeta):
|
||||
|
||||
def __init__(self, ubootEnv=None):
|
||||
self.ubootEnv = ubootEnv
|
||||
|
||||
device = None
|
||||
# fill this in per-platform
|
||||
|
||||
@property
|
||||
def bootConfigEnv(self):
|
||||
if self.ubootEnv is None:
|
||||
raise ValueError("missing u-boot environment tools")
|
||||
buf = getattr(self.ubootEnv, 'boot-config-default', None)
|
||||
if buf is None:
|
||||
raise ValueError("Cannot recover filesystem(s) -- missing boot-config-default.")
|
||||
if buf == "":
|
||||
raise ValueError("Cannot recover filesystem(s) -- empty boot-config-default.")
|
||||
try:
|
||||
buf = buf.decode('base64', 'strict')
|
||||
except binascii.Error:
|
||||
raise ValueError("Cannot recover filesystem(s) -- corrupted boot-config-default.")
|
||||
if "SWI=flash" in buf:
|
||||
raise ValueError("Cannot recover filesystem(s) -- local SWI cannot be recovered.")
|
||||
return buf
|
||||
|
||||
def __init__(self,
|
||||
ubootEnv=None,
|
||||
log=None):
|
||||
self.ubootEnv = ubootEnv
|
||||
self.platform = self.recovermeta(ubootEnv=ubootEnv)
|
||||
self.log = log or logging.getLogger(self.__class__.__name__)
|
||||
|
||||
self.flashDev = self.platform.device + '2'
|
||||
self.flash2Dev = self.platform.device + '3'
|
||||
|
||||
def recoverUboot(self):
|
||||
if not os.path.exists(self.platform.device):
|
||||
self.log.error("missing block device, cannot recover")
|
||||
return 1
|
||||
st = os.stat(self.platform.device)
|
||||
if not stat.S_ISBLK(st[stat.ST_MODE]):
|
||||
self.log.error("invalid block device")
|
||||
return 1
|
||||
|
||||
code = self.umountAny(device=self.platform.device)
|
||||
if code: return code
|
||||
|
||||
self.log.info("Re-formatting %s", self.platform.device)
|
||||
cmd = ('mkdosfs', '-n', 'FLASH', self.flashDev,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
cmd = ('mkdosfs', '-n', 'FLASH2', self.flash2Dev,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
return 0
|
||||
|
||||
def recoverUbootConfig(self):
|
||||
with MountContext(self.flashDev, log=self.log) as ctx:
|
||||
dst = os.path.join(ctx.dir, 'boot-config')
|
||||
with open(dst, "w") as fd:
|
||||
self.log.debug("+ cat > %s", dst)
|
||||
fd.write(self.platform.bootConfigEnv)
|
||||
return 0
|
||||
|
||||
def recoverFull(self):
|
||||
|
||||
code = self.recoverUboot()
|
||||
if code: return code
|
||||
|
||||
self.recoverUbootConfig()
|
||||
if code: return code
|
||||
|
||||
self.log.info("syncing block devices")
|
||||
self.check_call(('sync',))
|
||||
# XXX roth probably not needed
|
||||
|
||||
self.check_call(('initmounts',))
|
||||
|
||||
return 0
|
||||
@@ -0,0 +1,390 @@
|
||||
"""ConfUtils.py
|
||||
|
||||
Config interfaces to different backend mechanisms.
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
import subprocess
|
||||
from InstallUtils import SubprocessMixin, ChrootSubprocessMixin, MountContext
|
||||
|
||||
class ConfBase:
|
||||
|
||||
def __init__(self):
|
||||
self._parse()
|
||||
|
||||
def _parse(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def _feedLine(self, line):
|
||||
line = line.strip()
|
||||
if not line: return
|
||||
|
||||
idx = line.find('=')
|
||||
if idx < 0:
|
||||
raise ValueError("invalid line in %s: %s"
|
||||
% (self.path, line,))
|
||||
key, val = line[:idx], line[idx+1:]
|
||||
if val[:1] == '"' and val[-1:] == '"':
|
||||
val = val[1:-1]
|
||||
if val[:1] == "'" and val[-1:] == "'":
|
||||
val = val[1:-1]
|
||||
self.__dict__['_data'][key] = val
|
||||
|
||||
def __getattr__(self, attr, *args):
|
||||
if len(args) == 1:
|
||||
return self.__dict__['_data'].get(attr, args[0])
|
||||
elif len(args) == 0:
|
||||
try:
|
||||
return self.__dict__['_data'][attr]
|
||||
except KeyError, what:
|
||||
raise AttributeError(str(what))
|
||||
else:
|
||||
raise ValueError("extra arguments")
|
||||
|
||||
def __setattr__(self, attr, val):
|
||||
self.__dict__['_data'][attr] = val
|
||||
|
||||
class ConfFileBase(ConfBase):
|
||||
|
||||
PATH = None
|
||||
# Override me
|
||||
|
||||
def __init__(self, path=None):
|
||||
self.__dict__['path'] = path or self.PATH
|
||||
ConfBase.__init__(self)
|
||||
|
||||
def _parse(self):
|
||||
self.__dict__['_data'] = {}
|
||||
with open(self.path) as fd:
|
||||
for line in fd.xreadlines():
|
||||
self._feedLine(line)
|
||||
|
||||
class MachineConf(ConfFileBase):
|
||||
PATH = "/etc/machine.conf"
|
||||
|
||||
class InstallerConf(ConfFileBase):
|
||||
PATH = "/etc/onl/installer.conf"
|
||||
|
||||
class ConfBuf(ConfBase):
|
||||
|
||||
def __init__(self, buf):
|
||||
self.__dict__['buf'] = buf
|
||||
ConfBase.__init__(self)
|
||||
|
||||
def _parse(self):
|
||||
self.__dict__['_data'] = {}
|
||||
for line in self.buf.splitlines():
|
||||
self._feedLine(line)
|
||||
|
||||
class GrubEnv(SubprocessMixin):
|
||||
|
||||
INSTALL = "grub-install"
|
||||
EDITENV = "grub-editenv"
|
||||
# system default
|
||||
|
||||
ENV_PATH = "/grub/grubenv"
|
||||
# override me
|
||||
|
||||
def __init__(self,
|
||||
bootDir=None, bootPart=None,
|
||||
path=None,
|
||||
log=None):
|
||||
|
||||
if bootDir and bootPart:
|
||||
raise ValueError("cannot specify bootDir and bootPart")
|
||||
if not bootDir and not bootPart:
|
||||
raise ValueError("missing bootDir or bootPart")
|
||||
self.__dict__['bootDir'] = bootDir
|
||||
self.__dict__['bootPart'] = bootPart
|
||||
# location of GRUB boot files (mounted directory or unmounted partition)
|
||||
|
||||
self.__dict__['path'] = path or self.ENV_PATH
|
||||
# path to grubenv, relative to above
|
||||
|
||||
self.__dict__['log'] = log or logging.getLogger("grub")
|
||||
|
||||
def mountCtx(self, device):
|
||||
return MountContext(device, fsType='ext4', log=self.log)
|
||||
|
||||
def asDict(self):
|
||||
if self.bootPart:
|
||||
with self.mountCtx(self.bootPart) as ctx:
|
||||
p = os.path.join(ctx.dir, self.path.lstrip('/'))
|
||||
buf = self.check_output((self.EDITENV, p, 'list',)).strip()
|
||||
else:
|
||||
p = os.path.join(self.bootDir, self.path.lstrip('/'))
|
||||
buf = self.check_output((self.EDITENV, p, 'list',)).strip()
|
||||
cf = ConfBuf(buf)
|
||||
return cf.__dict__['_data']
|
||||
|
||||
toDict = asDict
|
||||
|
||||
def __getattr__(self, *args):
|
||||
|
||||
args = list(args)
|
||||
attr = args.pop(0)
|
||||
|
||||
d = self.asDict()
|
||||
if args:
|
||||
return d.get(attr, args[0])
|
||||
try:
|
||||
return d[attr]
|
||||
except KeyError, what:
|
||||
raise AttributeError(str(what))
|
||||
|
||||
def __setattr__(self, attr, val):
|
||||
if self.bootPart:
|
||||
with self.mountCtx(self.bootPart) as ctx:
|
||||
p = os.path.join(ctx.dir, self.path.lstrip('/'))
|
||||
cmd = (self.EDITENV, p, 'set', ("%s=%s" % (attr, val,)),)
|
||||
self.check_call(cmd)
|
||||
else:
|
||||
p = os.path.join(self.bootDir, self.path.lstrip('/'))
|
||||
cmd = (self.EDITENV, p, 'set', ("%s=%s" % (attr, val,)),)
|
||||
self.check_call(cmd)
|
||||
|
||||
def __delattr__(self, attr):
|
||||
if self.bootPart:
|
||||
with self.mountCtx(self.bootPart) as ctx:
|
||||
p = os.path.join(ctx.dir, self.path.lstrip('/'))
|
||||
cmd = (self.EDITENV, p, 'unset', attr,)
|
||||
self.check_call(cmd)
|
||||
else:
|
||||
p = os.path.join(self.bootDir, self.path.lstrip('/'))
|
||||
cmd = (self.EDITENV, p, 'unset', attr,)
|
||||
self.check_call(cmd)
|
||||
|
||||
def install(self, device):
|
||||
if self.bootDir is not None:
|
||||
self.check_call((self.INSTALL, '--boot-directory=' + self.bootDir, device,))
|
||||
elif self.bootPart is not None:
|
||||
with self.mountCtx(self.bootPart) as ctx:
|
||||
self.check_call((self.INSTALL, '--boot-directory=' + ctx.dir, device,))
|
||||
else:
|
||||
self.check_call((self.INSTALL, device,))
|
||||
|
||||
class ChrootGrubEnv(ChrootSubprocessMixin, GrubEnv):
|
||||
|
||||
def __init__(self,
|
||||
chrootDir,
|
||||
mounted=False,
|
||||
bootDir=None, bootPart=None,
|
||||
path=None,
|
||||
log=None):
|
||||
self.__dict__['chrootDir'] = chrootDir
|
||||
self.__dict__['mounted'] = mounted
|
||||
GrubEnv.__init__(self,
|
||||
bootDir=bootDir, bootPart=bootPart,
|
||||
path=path,
|
||||
log=log)
|
||||
|
||||
def mountCtx(self, device):
|
||||
return MountContext(device,
|
||||
chroot=self.chrootDir, fsType='ext4',
|
||||
log=self.log)
|
||||
|
||||
class ProxyGrubEnv:
|
||||
"""Pretend to manipulate the GRUB environment.
|
||||
|
||||
Instead, write a trace of shell commands to a log
|
||||
so that e.g. the chroot's host can execute it with
|
||||
the proper GRUB runtime.
|
||||
"""
|
||||
|
||||
INSTALL = "grub-install"
|
||||
EDITENV = "grub-editenv"
|
||||
# system defaults
|
||||
|
||||
ENV_PATH = "/grub/grubenv"
|
||||
# override this
|
||||
|
||||
def __init__(self,
|
||||
installerConf,
|
||||
bootDir=None, chroot=True, bootPart=None,
|
||||
path=None,
|
||||
log=None):
|
||||
|
||||
self.__dict__['installerConf'] = installerConf
|
||||
# installer state, to retrieve e.g. chroot directory and trace log
|
||||
|
||||
if bootDir and bootPart:
|
||||
raise ValueError("cannot specify bootDir and bootPart")
|
||||
if not bootDir and not bootPart:
|
||||
raise ValueError("missing bootDir or bootPart")
|
||||
self.__dict__['bootDir'] = bootDir
|
||||
self.__dict__['bootPart'] = bootPart
|
||||
# location of GRUB boot files (mounted directory or unmounted partition)
|
||||
|
||||
self.__dict__['chroot'] = chroot
|
||||
# True of the bootDir is inside the chroot,
|
||||
# else bootDir is in the host's file namespace
|
||||
|
||||
self.__dict__['path'] = path or self.ENV_PATH
|
||||
# path to grubenv, relative to above
|
||||
|
||||
self.__dict__['log'] = log or logging.getLogger("grub")
|
||||
|
||||
def asDict(self):
|
||||
raise NotImplementedError("proxy grubenv list not implemented")
|
||||
|
||||
toDict = asDict
|
||||
|
||||
def __getattr__(self, *args):
|
||||
raise NotImplementedError("proxy grubenv list not implemented")
|
||||
|
||||
def __setattr__(self, attr, val):
|
||||
self.log.warn("deferring commands to %s...", self.installerConf.installer_postinst)
|
||||
|
||||
cmds = []
|
||||
if self.bootDir and self.chroot:
|
||||
p = os.path.join(self.installerConf.installer_chroot,
|
||||
self.bootDir.lstrip('/'),
|
||||
self.path.lstrip('/'))
|
||||
cmds.append(("%s %s set %s=\"%s\"" % (self.EDITENV, p, attr, val,)))
|
||||
elif self.bootDir:
|
||||
p = os.path.join(self.bootDir,
|
||||
self.path.lstrip('/'))
|
||||
cmds.append(("%s %s set %s=\"%s\"" % (self.EDITENV, p, attr, val,)))
|
||||
else:
|
||||
p = ("${mpt}/%s"
|
||||
% (self.path.lstrip('/'),))
|
||||
cmds.append("mpt=$(mktemp -t -d)")
|
||||
cmds.append("mount %s $mpt" % self.bootPart)
|
||||
cmds.append(("sts=0; %s %s set %s=\"%s\" || sts=$?"
|
||||
% (self.EDITENV, p, attr, val,)))
|
||||
cmds.append("umount $mpt")
|
||||
cmds.append("rmdir $mpt")
|
||||
cmds.append("test $sts -eq 0")
|
||||
|
||||
with open(self.installerConf.installer_postinst, "a") as fd:
|
||||
for cmd in cmds:
|
||||
self.log.debug("+ [PROXY] " + cmd)
|
||||
fd.write(cmd)
|
||||
fd.write("\n")
|
||||
|
||||
def __delattr__(self, attr):
|
||||
self.log.warn("deferring commands to %s...", self.installerConf.installer_postinst)
|
||||
|
||||
cmds = []
|
||||
if self.bootDir and self.chroot:
|
||||
p = os.path.join(self.installerConf.installer_chroot,
|
||||
self.bootDir.lstrip('/'),
|
||||
self.path.lstrip('/'))
|
||||
cmds.append(("%s %s unset %s" % (self.EDITENV, p, attr,)))
|
||||
elif self.bootDir:
|
||||
p = os.path.join(self.bootDir,
|
||||
self.path.lstrip('/'))
|
||||
cmds.append(("%s %s unset %s" % (self.EDITENV, p, attr,)))
|
||||
else:
|
||||
p = ("$mpt%s"
|
||||
% (self.path.lstrip('/'),))
|
||||
cmds.append("mpt=$(mktemp -t -d)")
|
||||
cmds.append("mount %s $mpt" % self.bootPart)
|
||||
cmds.append(("sts=0; %s %s unset %s || sts=$?"
|
||||
% (self.EDITENV, p, attr,)))
|
||||
cmds.append("umount $mpt")
|
||||
cmds.append("rmdir $mpt")
|
||||
cmds.append("test $sts -eq 0")
|
||||
|
||||
with open(self.installerConf.installer_postinst, "a") as fd:
|
||||
for cmd in cmds:
|
||||
self.log.debug("+ [PROXY] " + cmd)
|
||||
fd.write(cmd)
|
||||
fd.write("\n")
|
||||
|
||||
def install(self, device):
|
||||
self.log.warn("deferring commands to %s...", self.installerConf.installer_postinst)
|
||||
cmds = []
|
||||
if self.bootDir and self.chroot:
|
||||
p = os.pat.join(self.installerConf.installer_chroot,
|
||||
self.bootDir.lstrip('/'))
|
||||
cmds.append(("%s --boot-directory=\"%s\" %s" % (self.INSTALL, p, device,)))
|
||||
elif self.bootDir:
|
||||
p = self.bootDir
|
||||
cmds.append(("%s --boot-directory=\"%s\" %s" % (self.INSTALL, p, device,)))
|
||||
elif self.bootPart:
|
||||
cmds.append("mpt=$(mktemp -t -d)")
|
||||
cmds.append("mount %s $mpt" % self.bootPart)
|
||||
cmds.append(("sts=0; %s --boot-directory=\"$mpt\" %s || sts=$?"
|
||||
% (self.INSTALL, device,)))
|
||||
cmds.append("umount $mpt")
|
||||
cmds.append("rmdir $mpt")
|
||||
cmds.append("test $sts -eq 0")
|
||||
else:
|
||||
cmds.append(("%s %s"
|
||||
% (self.INSTALL, device,)))
|
||||
|
||||
with open(self.installerConf.installer_postinst, "a") as fd:
|
||||
for cmd in cmds:
|
||||
self.log.debug("+ [PROXY] " + cmd)
|
||||
fd.write(cmd)
|
||||
fd.write("\n")
|
||||
|
||||
class UbootEnv(SubprocessMixin):
|
||||
|
||||
# ha ha, loader and SWI use different paths
|
||||
if os.path.exists("/usr/sbin/fw_setenv"):
|
||||
SETENV = "/usr/sbin/fw_setenv"
|
||||
elif os.path.exists("/usr/bin/fw_setenv"):
|
||||
SETENV = "/usr/bin/fw_setenv"
|
||||
else:
|
||||
SETENV = "/bin/false"
|
||||
|
||||
if os.path.exists("/usr/sbin/fw_printenv"):
|
||||
PRINTENV = "/usr/sbin/fw_printenv"
|
||||
elif os.path.exists("/usr/bin/fw_printenv"):
|
||||
PRINTENV = "/usr/bin/fw_printenv"
|
||||
else:
|
||||
PRINTENV = "/bin/false"
|
||||
|
||||
def __init__(self, log=None):
|
||||
self.__dict__['log'] = log or logging.getLogger("u-boot")
|
||||
|
||||
self.__dict__['hasForceUpdate'] = False
|
||||
try:
|
||||
out = self.check_output((self.SETENV, '--help',),
|
||||
stderr=subprocess.STDOUT)
|
||||
if "-f" in out and "Force update" in out:
|
||||
self.__dict__['hasForceUpdate'] = True
|
||||
except subprocess.CalledProcessError:
|
||||
if self.SETENV != '/bin/false':
|
||||
raise
|
||||
|
||||
def __getattr__(self, *args):
|
||||
|
||||
args = list(args)
|
||||
attr = args.pop(0)
|
||||
|
||||
with open(os.devnull, "w") as nfd:
|
||||
try:
|
||||
out = self.check_output((self.PRINTENV, '-n', attr,),
|
||||
stderr=nfd.fileno())
|
||||
except subprocess.CalledProcessError:
|
||||
out = None
|
||||
|
||||
if out is not None: return out
|
||||
|
||||
if args:
|
||||
return args[0]
|
||||
|
||||
raise AttributeError("firmware tag not found")
|
||||
|
||||
def __setattr__(self, attr, val):
|
||||
if self.hasForceUpdate:
|
||||
self.check_call((self.SETENV, '-f', attr, val,))
|
||||
else:
|
||||
self.check_call((self.SETENV, attr, val,))
|
||||
|
||||
def __delattr__(self, attr):
|
||||
|
||||
if self.hasForceUpdate:
|
||||
self.check_call((self.SETENV, '-f', attr,))
|
||||
else:
|
||||
self.check_call((self.SETENV, attr,))
|
||||
|
||||
def asDict(self):
|
||||
buf = self.check_output((self.PRINTENV,)).strip()
|
||||
return ConfBuf(buf).__dict__['_data']
|
||||
|
||||
toDict = asDict
|
||||
@@ -0,0 +1,578 @@
|
||||
"""Fit.py
|
||||
|
||||
Parse FIT files.
|
||||
"""
|
||||
|
||||
import os, sys
|
||||
import logging
|
||||
import struct
|
||||
import argparse
|
||||
import time
|
||||
|
||||
class FdtProperty:
|
||||
def __init__(self, name, offset, sz):
|
||||
self.name = name
|
||||
self.offset = offset
|
||||
self.sz = sz
|
||||
|
||||
class FdtNode:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.properties = {}
|
||||
self.nodes = {}
|
||||
|
||||
class Parser:
|
||||
|
||||
FDT_MAGIC = 0xd00dfeed
|
||||
|
||||
FDT_BEGIN_NODE = 1
|
||||
FDT_END_NODE = 2
|
||||
FDT_PROP = 3
|
||||
FDT_NOP = 4
|
||||
FDT_END = 9
|
||||
|
||||
def __init__(self, path=None, stream=None, log=None):
|
||||
self.log = log or logging.getLogger(self.__class__.__name__)
|
||||
self.path = path
|
||||
self.stream = stream
|
||||
self.rootNodes = {}
|
||||
self._parse()
|
||||
|
||||
def _parse(self):
|
||||
if self.stream is not None:
|
||||
try:
|
||||
pos = self.stream.tell()
|
||||
self._parseStream(self.stream)
|
||||
finally:
|
||||
self.stream.seek(pos, 0)
|
||||
elif self.path is not None:
|
||||
with open(self.path) as fd:
|
||||
self._parseStream(fd)
|
||||
else:
|
||||
raise ValueError("missing file or stream")
|
||||
|
||||
def _parseStream(self, fd):
|
||||
strings = {}
|
||||
|
||||
buf = fd.read(40)
|
||||
hdr = list(struct.unpack(">10I", buf))
|
||||
magic = hdr.pop(0)
|
||||
if magic != self.FDT_MAGIC:
|
||||
raise ValueError("missing magic")
|
||||
self.fdtSize = hdr.pop(0)
|
||||
self.structPos = hdr.pop(0)
|
||||
self.stringPos = hdr.pop(0)
|
||||
self.version = hdr.pop(0)
|
||||
if self.version < 17:
|
||||
raise ValueError("invalid format version")
|
||||
hdr.pop(0) # last compatible version
|
||||
hdr.pop(0) # boot cpu
|
||||
self.stringSize = hdr.pop(0)
|
||||
self.structSize = hdr.pop(0)
|
||||
|
||||
fd.seek(self.structPos, 0)
|
||||
|
||||
def _align():
|
||||
pos = fd.tell()
|
||||
pos = (pos+3) & ~3
|
||||
fd.seek(pos, 0)
|
||||
|
||||
def _label():
|
||||
buf = ""
|
||||
while True:
|
||||
c = fd.read(1)
|
||||
if c == '\x00': break
|
||||
if c:
|
||||
buf += c
|
||||
return buf
|
||||
|
||||
def _string(off):
|
||||
if off in strings:
|
||||
return strings[off]
|
||||
pos = fd.tell()
|
||||
fd.seek(self.stringPos, 0)
|
||||
fd.seek(off, 1)
|
||||
buf = _label()
|
||||
fd.seek(pos)
|
||||
return buf
|
||||
|
||||
nodeStack = []
|
||||
|
||||
while True:
|
||||
buf = fd.read(4)
|
||||
s = list(struct.unpack(">I", buf))
|
||||
tag = s.pop(0)
|
||||
|
||||
if tag == self.FDT_BEGIN_NODE:
|
||||
name = _label()
|
||||
_align()
|
||||
|
||||
newNode = FdtNode(name)
|
||||
|
||||
if nodeStack:
|
||||
if name in nodeStack[-1].nodes:
|
||||
raise ValueError("duplicate node")
|
||||
nodeStack[-1].nodes[name] = newNode
|
||||
nodeStack.append(newNode)
|
||||
else:
|
||||
if name in self.rootNodes:
|
||||
raise ValueError("duplicate node")
|
||||
self.rootNodes[name] = newNode
|
||||
nodeStack.append(newNode)
|
||||
|
||||
continue
|
||||
|
||||
if tag == self.FDT_PROP:
|
||||
buf = fd.read(8)
|
||||
s = list(struct.unpack(">2I", buf))
|
||||
plen = s.pop(0)
|
||||
nameoff = s.pop(0)
|
||||
name = _string(nameoff)
|
||||
pos = fd.tell()
|
||||
fd.seek(plen, 1)
|
||||
_align()
|
||||
|
||||
newProp = FdtProperty(name, pos, plen)
|
||||
|
||||
if nodeStack:
|
||||
if name in nodeStack[-1].properties:
|
||||
raise ValueError("duplicate property")
|
||||
nodeStack[-1].properties[name] = newProp
|
||||
else:
|
||||
raise ValueError("property with no node")
|
||||
|
||||
continue
|
||||
|
||||
if tag == self.FDT_END_NODE:
|
||||
if nodeStack:
|
||||
nodeStack.pop(-1)
|
||||
else:
|
||||
raise ValueError("missing begin node")
|
||||
continue
|
||||
|
||||
if tag == self.FDT_NOP:
|
||||
print "NOP"
|
||||
continue
|
||||
|
||||
if tag == self.FDT_END:
|
||||
if nodeStack:
|
||||
raise ValueError("missing end node(s)")
|
||||
break
|
||||
|
||||
raise ValueError("invalid tag %d" % tag)
|
||||
|
||||
def report(self, stream=sys.stdout):
|
||||
q = [(x, "") for x in self.rootNodes.values()]
|
||||
while q:
|
||||
n, pfx = q.pop(0)
|
||||
|
||||
name = n.name or "/"
|
||||
stream.write("%s%s\n" % (pfx, name,))
|
||||
|
||||
if n.properties:
|
||||
stream.write("\n")
|
||||
for p in n.properties.values():
|
||||
stream.write("%s %s (%d bytes)\n"
|
||||
% (pfx, p.name, p.sz,))
|
||||
if n.properties:
|
||||
stream.write("\n")
|
||||
|
||||
pfx2 = pfx + " "
|
||||
q[0:0] = [(x, pfx2) for x in n.nodes.values()]
|
||||
|
||||
def getNode(self, path):
|
||||
if path == '/':
|
||||
return self.rootNodes.get('', None)
|
||||
|
||||
els = path.split('/')
|
||||
n = None
|
||||
while els:
|
||||
b = els.pop(0)
|
||||
if n is None:
|
||||
if b not in self.rootNodes: return None
|
||||
n = self.rootNodes[b]
|
||||
else:
|
||||
if b not in n.nodes: return None
|
||||
n = n.nodes[b]
|
||||
return n
|
||||
|
||||
def getNodeProperty(self, node, propName):
|
||||
if propName not in node.properties: return None
|
||||
prop = node.properties[propName]
|
||||
def _get(fd):
|
||||
fd.seek(self.structPos, 0)
|
||||
fd.seek(prop.offset)
|
||||
buf = fd.read(prop.sz)
|
||||
if buf[-1] == '\x00':
|
||||
return buf[:-1]
|
||||
return buf
|
||||
if self.stream is not None:
|
||||
return _get(self.stream)
|
||||
else:
|
||||
with open(self.path) as fd:
|
||||
return _get(fd)
|
||||
|
||||
def dumpNodeProperty(self, node, propIsh, outPath):
|
||||
if isinstance(propIsh, FdtProperty):
|
||||
prop = propIsh
|
||||
else:
|
||||
if propIsh not in node.properties:
|
||||
raise ValueError("missing property")
|
||||
prop = node.properties[propIsh]
|
||||
def _dump(fd):
|
||||
with open(outPath, "w") as wfd:
|
||||
fd.seek(prop.offset, 0)
|
||||
buf = fd.read(prop.sz)
|
||||
wfd.write(buf)
|
||||
if self.stream is not None:
|
||||
try:
|
||||
pos = self.stream.tell()
|
||||
_dump(self.stream)
|
||||
finally:
|
||||
self.stream.seek(pos, 0)
|
||||
else:
|
||||
with open(self.path) as fd:
|
||||
_dump(fd)
|
||||
|
||||
def getInitrdNode(self, profile=None):
|
||||
"""U-boot mechanism to retrieve boot profile."""
|
||||
|
||||
node = self.getNode('/configurations')
|
||||
if node is None:
|
||||
self.log.warn("missing /configurations node")
|
||||
return None
|
||||
if profile is not None:
|
||||
if profile not in node.nodes:
|
||||
self.log.warn("missing profile %s", profile)
|
||||
return None
|
||||
node = node.nodes[profile]
|
||||
elif 'default' in node.properties:
|
||||
pf = self.getNodeProperty(node, 'default')
|
||||
self.log.debug("default profile is %s", pf)
|
||||
node = node.nodes[pf]
|
||||
else:
|
||||
pf = node.nodes.keys()[0]
|
||||
self.log.debug("using profile %s", pf)
|
||||
node = node.nodes[pf]
|
||||
|
||||
if 'ramdisk' not in node.properties:
|
||||
self.log.warn("ramdisk property not found")
|
||||
return None
|
||||
rdName = self.getNodeProperty(node, 'ramdisk')
|
||||
|
||||
self.log.debug("retrieving ramdisk %s", rdName)
|
||||
node = self.getNode('/images/' + rdName)
|
||||
return node
|
||||
|
||||
class DumpRunner:
|
||||
|
||||
def __init__(self, stream,
|
||||
log=None):
|
||||
self.log = log or logging.getLogger(self.__class__.__name__)
|
||||
self.stream = stream
|
||||
|
||||
def run(self):
|
||||
p = Parser(stream=self.stream, log=self.log)
|
||||
p.report()
|
||||
return 0
|
||||
|
||||
def shutdown(self):
|
||||
stream, self.stream = self.stream, None
|
||||
if stream is not None: stream.close()
|
||||
|
||||
class ExtractBase:
|
||||
|
||||
def __init__(self, stream,
|
||||
initrd=False, profile=None, path=None,
|
||||
property=None,
|
||||
log=None):
|
||||
self.log = log or logging.getLogger(self.__class__.__name__)
|
||||
self.stream = stream
|
||||
self.initrd = initrd
|
||||
self.profile = profile
|
||||
self.path = path
|
||||
self.property = property
|
||||
|
||||
self.parser = None
|
||||
self.node = None
|
||||
self.dataProp = None
|
||||
|
||||
def run(self):
|
||||
self.parser = Parser(stream=self.stream, log=self.log)
|
||||
if self.path is not None:
|
||||
self.node = self.parser.getNode(self.path)
|
||||
if self.node is None:
|
||||
self.log.error("cannot find path")
|
||||
return 1
|
||||
elif self.initrd:
|
||||
self.node = self.parser.getInitrdNode(profile=self.profile)
|
||||
if self.node is None:
|
||||
self.log.error("cannot find initrd")
|
||||
return 1
|
||||
else:
|
||||
self.log.error("missing path or initrd")
|
||||
return 1
|
||||
|
||||
def _t(n):
|
||||
if n is None: return
|
||||
self.dataProp = self.dataProp or self.node.properties.get(n, None)
|
||||
_t(self.property)
|
||||
_t('data')
|
||||
_t('value')
|
||||
if self.dataProp is None:
|
||||
self.log.error("cannot find %s property", self.property)
|
||||
return 1
|
||||
|
||||
return self._handleParsed()
|
||||
|
||||
def _handleParsed(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
stream, self.stream = self.stream, None
|
||||
if stream is not None: stream.close()
|
||||
|
||||
class ExtractRunner(ExtractBase):
|
||||
|
||||
def __init__(self, stream,
|
||||
outStream=None,
|
||||
initrd=False, profile=None, path=None,
|
||||
property=None,
|
||||
text=False, numeric=False, timestamp=False, hex=False,
|
||||
log=None):
|
||||
ExtractBase.__init__(self, stream,
|
||||
initrd=initrd, profile=profile,
|
||||
path=path,
|
||||
property=property,
|
||||
log=log)
|
||||
self.outStream = outStream
|
||||
self.text = text
|
||||
self.numeric = numeric
|
||||
self.timestamp = timestamp
|
||||
self.hex = hex
|
||||
|
||||
def _handleParsed(self):
|
||||
if (self.numeric or self.timestamp) and self.dataProp.sz != 4:
|
||||
self.log.error("invalid size for number")
|
||||
return 1
|
||||
def _dump(rfd, wfd):
|
||||
rfd.seek(self.dataProp.offset, 0)
|
||||
buf = rfd.read(self.dataProp.sz)
|
||||
if self.text:
|
||||
if buf[-1:] != '\x00':
|
||||
self.log.error("missing NUL terminator")
|
||||
return 1
|
||||
wfd.write(buf[:-1])
|
||||
return 0
|
||||
if self.numeric:
|
||||
n = struct.unpack(">I", buf)[0]
|
||||
wfd.write(str(n))
|
||||
return 0
|
||||
if self.timestamp:
|
||||
n = struct.unpack(">I", buf)[0]
|
||||
wfd.write(time.ctime(n))
|
||||
return 0
|
||||
if self.hex:
|
||||
for c in buf:
|
||||
wfd.write("%02x" % ord(c))
|
||||
return 0
|
||||
wfd.write(buf)
|
||||
return 0
|
||||
if self.outStream is not None:
|
||||
return _dump(self.stream, self.outStream)
|
||||
else:
|
||||
return _dump(self.stream, sys.stdout)
|
||||
|
||||
class OffsetRunner(ExtractBase):
|
||||
|
||||
def __init__(self, stream,
|
||||
initrd=False, profile=None, path=None,
|
||||
property=None,
|
||||
log=None):
|
||||
ExtractBase.__init__(self, stream,
|
||||
initrd=initrd, profile=profile,
|
||||
path=path,
|
||||
property=property,
|
||||
log=log)
|
||||
|
||||
def _handleParsed(self):
|
||||
start = self.dataProp.offset
|
||||
self.log.debug("first byte is %d", start)
|
||||
end = start + self.dataProp.sz - 1
|
||||
self.log.debug("data size is %d", self.dataProp.sz)
|
||||
self.log.debug("last byte is %d", end)
|
||||
sys.stdout.write("%s %s\n" % (start, end,))
|
||||
return 0
|
||||
|
||||
USAGE = """\
|
||||
pyfit [OPTIONS] dump|extract ...
|
||||
"""
|
||||
|
||||
EPILOG = """\
|
||||
Payload for 'offset' and 'extract' is specified as a given
|
||||
PROPERTY for a tree node at PATH.
|
||||
|
||||
Alternately, the initrd/ramdisk can be specified with '--initrd',
|
||||
using the PROFILE machine configuration. If no PROFILE is specified,
|
||||
the built-in default configuration from the FDT is used.
|
||||
"""
|
||||
|
||||
DESC="""\
|
||||
Extract or examine FIT file contents.
|
||||
"""
|
||||
|
||||
DUMP_USAGE = """\
|
||||
pyfit [OPTIONS] dump FIT-FILE
|
||||
"""
|
||||
|
||||
EXTRACT_USAGE = """\
|
||||
pyfit [OPTIONS] extract [OPTIONS] FIT-FILE
|
||||
"""
|
||||
|
||||
EXTRACT_EPILOG = """\
|
||||
Extracts payload to OUTPUT or to stdout if not specified.
|
||||
|
||||
Output can be optionally reformatted
|
||||
as a NUL-terminated string ('--text'),
|
||||
as a decimal number ('--number'),
|
||||
as a UNIX timestamp ('--timestamp'),
|
||||
or as hex data ('--hex').
|
||||
|
||||
Numbers and timestamps must be 4-byte payloads.
|
||||
"""
|
||||
|
||||
OFFSET_USAGE = """\
|
||||
pyfit [OPTIONS] offset [OPTIONS] FIT-FILE
|
||||
"""
|
||||
|
||||
OFFSET_EPILOG = """\
|
||||
Outputs the first and last byte offsets, inclusive, containing the
|
||||
payload.
|
||||
"""
|
||||
|
||||
class App:
|
||||
|
||||
def __init__(self, log=None):
|
||||
self.log = log or logging.getLogger("pyfit")
|
||||
|
||||
def run(self):
|
||||
|
||||
ap = argparse.ArgumentParser(usage=USAGE,
|
||||
description=DESC,
|
||||
epilog=EPILOG)
|
||||
ap.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Suppress log messages")
|
||||
ap.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Add more logging")
|
||||
|
||||
sp = ap.add_subparsers()
|
||||
|
||||
apd = sp.add_parser('dump',
|
||||
help="Dump tree structure",
|
||||
usage=DUMP_USAGE)
|
||||
apd.set_defaults(mode='dump')
|
||||
apd.add_argument('fit-file', type=open,
|
||||
help="FIT file")
|
||||
|
||||
apx = sp.add_parser('extract',
|
||||
help="Extract items",
|
||||
usage=EXTRACT_USAGE,
|
||||
epilog=EXTRACT_EPILOG)
|
||||
apx.set_defaults(mode='extract')
|
||||
apx.add_argument('fit-file', type=open,
|
||||
help="FIT file")
|
||||
apx.add_argument('-o', '--output',
|
||||
type=argparse.FileType('wb', 0),
|
||||
help="File destination")
|
||||
apx.add_argument('--initrd', action="store_true",
|
||||
help="Extract platform initrd")
|
||||
apx.add_argument('--profile', type=str,
|
||||
help="Platform profile for initrd selection")
|
||||
apx.add_argument('--path', type=str,
|
||||
help="Tree path to extract")
|
||||
apx.add_argument('--property', type=str,
|
||||
help="Node property to extract")
|
||||
apx.add_argument('--text', action='store_true',
|
||||
help="Format property as text")
|
||||
apx.add_argument('--numeric', action='store_true',
|
||||
help="Format property as a number")
|
||||
apx.add_argument('--hex', action='store_true',
|
||||
help="Format property as hex")
|
||||
apx.add_argument('--timestamp', action='store_true',
|
||||
help="Format property as a date")
|
||||
|
||||
apo = sp.add_parser('offset',
|
||||
help="Extract item offset",
|
||||
usage=OFFSET_USAGE,
|
||||
epilog=OFFSET_EPILOG)
|
||||
apo.set_defaults(mode='offset')
|
||||
apo.add_argument('fit-file', type=open,
|
||||
help="FIT file")
|
||||
apo.add_argument('--initrd', action="store_true",
|
||||
help="Extract platform initrd")
|
||||
apo.add_argument('--profile', type=str,
|
||||
help="Platform profile for initrd selection")
|
||||
apo.add_argument('--path', type=str,
|
||||
help="Tree path to extract")
|
||||
apo.add_argument('--property', type=str,
|
||||
help="Node property to extract")
|
||||
|
||||
try:
|
||||
args = ap.parse_args()
|
||||
except SystemExit, what:
|
||||
return what.code
|
||||
|
||||
if args.quiet:
|
||||
self.log.setLevel(logging.ERROR)
|
||||
if args.verbose:
|
||||
self.log.setLevel(logging.DEBUG)
|
||||
|
||||
if args.mode == 'dump':
|
||||
r = DumpRunner(getattr(args, 'fit-file'), log=self.log)
|
||||
elif args.mode == 'extract':
|
||||
r = ExtractRunner(getattr(args, 'fit-file'),
|
||||
outStream=args.output,
|
||||
path=args.path,
|
||||
initrd=args.initrd, profile=args.profile,
|
||||
property=args.property,
|
||||
text=args.text, numeric=args.numeric,
|
||||
timestamp=args.timestamp, hex=args.hex,
|
||||
log=self.log)
|
||||
elif args.mode == 'offset':
|
||||
r = OffsetRunner(getattr(args, 'fit-file'),
|
||||
path=args.path,
|
||||
initrd=args.initrd, profile=args.profile,
|
||||
property=args.property,
|
||||
log=self.log)
|
||||
else:
|
||||
self.log.error("invalid mode")
|
||||
return 1
|
||||
|
||||
try:
|
||||
code = r.run()
|
||||
except:
|
||||
self.log.exception("runner failed")
|
||||
code = 1
|
||||
r.shutdown()
|
||||
return code
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def main(cls):
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger("pyfit")
|
||||
app = cls(log=logger)
|
||||
try:
|
||||
code = app.run()
|
||||
except:
|
||||
logger.exception("app failed")
|
||||
code = 1
|
||||
app.shutdown()
|
||||
sys.exit(code)
|
||||
|
||||
main = App.main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,871 @@
|
||||
"""InstallUtils.py
|
||||
|
||||
"""
|
||||
|
||||
import os, sys
|
||||
import stat
|
||||
import logging
|
||||
import subprocess
|
||||
import tempfile
|
||||
import string
|
||||
import shutil
|
||||
|
||||
class SubprocessMixin:
|
||||
|
||||
V1 = "V1"
|
||||
V2 = "V2"
|
||||
|
||||
def check_call(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
kwargs = dict(kwargs)
|
||||
|
||||
cwd = kwargs.pop('cwd', None)
|
||||
if cwd is not None:
|
||||
self.log.debug("+ cd " + cwd)
|
||||
|
||||
if args:
|
||||
cmd = args.pop(0)
|
||||
else:
|
||||
cmd = kwargs.pop('cmd')
|
||||
|
||||
vmode = kwargs.pop('vmode', None)
|
||||
if vmode == self.V1 and self.log.isEnabledFor(logging.DEBUG):
|
||||
if isinstance(cmd, basestring):
|
||||
raise ValueError("vmode=V1 requires a list")
|
||||
cmd = list(cmd)
|
||||
cmd[1:1] = ['-v',]
|
||||
if vmode == self.V2 and self.log.isEnabledFor(logging.DEBUG):
|
||||
stdout = kwargs.pop('stdout', None)
|
||||
stderr = kwargs.pop('stderr', None)
|
||||
if stdout is not None:
|
||||
raise ValueError("vmode=V2 conflicts with stdout")
|
||||
if stderr is not None and stderr != subprocess.STDOUT:
|
||||
raise ValueError("vmode=V2 conflicts with stderr")
|
||||
fno, v2Out = tempfile.mkstemp(prefix='subprocess-',
|
||||
suffix='out')
|
||||
kwargs['stdout'] = fno
|
||||
kwargs['stderr'] = subprocess.STDOUT
|
||||
|
||||
if isinstance(cmd, basestring):
|
||||
self.log.debug("+ " + cmd)
|
||||
else:
|
||||
self.log.debug("+ " + " ".join(cmd))
|
||||
|
||||
if vmode == self.V2 and self.log.isEnabledFor(logging.DEBUG):
|
||||
try:
|
||||
subprocess.check_call(cmd, *args, cwd=cwd, **kwargs)
|
||||
finally:
|
||||
with open(v2Out) as fd:
|
||||
sys.stderr.write(fd.read())
|
||||
os.unlink(v2Out)
|
||||
else:
|
||||
subprocess.check_call(cmd, *args, cwd=cwd, **kwargs)
|
||||
|
||||
def check_output(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
kwargs = dict(kwargs)
|
||||
|
||||
cwd = kwargs.pop('cwd', None)
|
||||
if cwd is not None:
|
||||
self.log.debug("+ cd " + cwd)
|
||||
|
||||
if args:
|
||||
cmd = args.pop(0)
|
||||
else:
|
||||
cmd = kwargs.pop('cmd')
|
||||
|
||||
vmode = kwargs.pop('vmode', None)
|
||||
if vmode == self.V1 and self.log.isEnabledFor(logging.DEBUG):
|
||||
if isinstance(cmd, basestring):
|
||||
raise ValueError("vmode=V1 requires a list")
|
||||
cmd = list(cmd)
|
||||
cmd[1:1] = ['-v',]
|
||||
if vmode == self.V2 and self.log.isEnabledFor(logging.DEBUG):
|
||||
stdout = kwargs.pop('stdout', None)
|
||||
stderr = kwargs.pop('stderr', None)
|
||||
if stdout is not None:
|
||||
raise ValueError("vmode=V2 conflicts with stdout")
|
||||
if stderr is not None and stderr != subprocess.STDOUT:
|
||||
raise ValueError("vmode=V2 conflicts with stderr")
|
||||
fno, v2Out = tempfile.mkstemp(prefix='subprocess-',
|
||||
suffix='out')
|
||||
kwargs['stderr'] = fno
|
||||
|
||||
if isinstance(cmd, basestring):
|
||||
self.log.debug("+ " + cmd)
|
||||
else:
|
||||
self.log.debug("+ " + " ".join(cmd))
|
||||
|
||||
if vmode == self.V2 and self.log.isEnabledFor(logging.DEBUG):
|
||||
try:
|
||||
return subprocess.check_output(cmd, *args, cwd=cwd, **kwargs)
|
||||
finally:
|
||||
with open(v2Out) as fd:
|
||||
sys.stderr.write(fd.read())
|
||||
os.unlink(v2Out)
|
||||
else:
|
||||
return subprocess.check_output(cmd, *args, cwd=cwd, **kwargs)
|
||||
|
||||
def rmdir(self, path):
|
||||
self.log.debug("+ /bin/rmdir %s", path)
|
||||
os.rmdir(path)
|
||||
|
||||
def unlink(self, path):
|
||||
self.log.debug("+ /bin/rm %s", path)
|
||||
os.unlink(path)
|
||||
|
||||
def rmtree(self, path):
|
||||
self.log.debug("+ /bin/rm -fr %s", path)
|
||||
shutil.rmtree(path)
|
||||
|
||||
def mkdtemp(self, *args, **kwargs):
|
||||
path = tempfile.mkdtemp(*args, **kwargs)
|
||||
self.log.debug("+ /bin/mkdir %s", path)
|
||||
return path
|
||||
|
||||
def copy2(self, src, dst):
|
||||
self.log.debug("+ /bin/cp -a %s %s", src, dst)
|
||||
shutil.copy2(src, dst)
|
||||
|
||||
def copyfile(self, src, dst):
|
||||
self.log.debug("+ /bin/cp %s %s", src, dst)
|
||||
shutil.copyfile(src, dst)
|
||||
|
||||
def mkdir(self, path):
|
||||
self.log.debug("+ /bin/mkdir %s", path)
|
||||
os.mkdir(path)
|
||||
|
||||
def makedirs(self, path):
|
||||
self.log.debug("+ /bin/mkdir -p %s", path)
|
||||
os.makedirs(path)
|
||||
|
||||
def symlink(self, tgt, dst):
|
||||
self.log.debug("+ /bin/ln -s %s %s", tgt, dst)
|
||||
os.symlink(tgt, dst)
|
||||
|
||||
def mkdosfs(self, dev, label=None):
|
||||
if label is not None:
|
||||
cmd = ('mkdosfs', '-n', label, dev,)
|
||||
else:
|
||||
cmd = ('mkdosfs', dev,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
def mke2fs(self, dev, label=None):
|
||||
if label is not None:
|
||||
cmd = ('mkfs.ext2', '-L', label, dev,)
|
||||
else:
|
||||
cmd = ('mkfs.ext2', dev,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
def mke4fs(self, dev, label=None, huge_file=True):
|
||||
if label is not None:
|
||||
cmd = ['mkfs.ext4', '-L', label, dev,]
|
||||
else:
|
||||
cmd = ['mkfs.ext4', dev,]
|
||||
|
||||
if not huge_file:
|
||||
cmd[1:1] = ['-O', '^huge_file',]
|
||||
# hack needed for some old ONIE kernels
|
||||
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
def mkfs(self, dev, fstype):
|
||||
mkfs = 'mkfs.%s' % fstype
|
||||
cmd = (mkfs, dev,)
|
||||
|
||||
# 'mkfs -h' says to use '-V' for verbose,
|
||||
# don't believe it
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
class TempdirContext(SubprocessMixin):
|
||||
|
||||
def __init__(self, prefix=None, suffix=None, chroot=None, log=None):
|
||||
self.prefix = prefix
|
||||
self.suffix = suffix
|
||||
self.chroot = chroot
|
||||
self.dir = None
|
||||
self.hostDir = None
|
||||
self.log = log or logging.getLogger("mount")
|
||||
|
||||
def __enter__(self):
|
||||
if self.chroot is not None:
|
||||
self.hostDir = self.mkdtemp(prefix=self.prefix,
|
||||
suffix=self.suffix,
|
||||
dir=self.chroot + "/tmp")
|
||||
self.dir = self.hostDir[len(self.chroot):]
|
||||
else:
|
||||
self.dir = self.hostDir = self.mkdtemp(prefix=self.prefix,
|
||||
suffix=self.suffix)
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, tb):
|
||||
if self.path: self.rmtree(self.hostDir)
|
||||
return False
|
||||
|
||||
class MountContext(SubprocessMixin):
|
||||
|
||||
def __init__(self, device=None, chroot=None, label=None, fsType=None, log=None):
|
||||
self.device = device
|
||||
self.chroot = chroot
|
||||
self.label = label
|
||||
self.fsType = fsType
|
||||
self.dir = None
|
||||
self.hostDir = None
|
||||
self.mounted = False
|
||||
self.log = log or logging.getLogger("mount")
|
||||
|
||||
if self.device and self.label:
|
||||
raise ValueError("cannot specify device and label")
|
||||
if not self.device and not self.label:
|
||||
raise ValueError("no device or label specified")
|
||||
|
||||
def __enter__(self):
|
||||
dev = self.device
|
||||
if dev is None:
|
||||
try:
|
||||
dev = self.check_output(('blkid', '-L', self.label,)).strip()
|
||||
except subprocess.CalledProcessError, what:
|
||||
raise ValueError("cannot find label %s: %s"
|
||||
% (self.label, str(what),))
|
||||
|
||||
if self.chroot is not None:
|
||||
self.hostDir = self.mkdtemp(prefix="mount-",
|
||||
suffix=".d",
|
||||
dir=self.chroot + "/tmp")
|
||||
self.dir = self.hostDir[len(self.chroot):]
|
||||
else:
|
||||
self.dir = self.hostDir = self.mkdtemp(prefix="mount-",
|
||||
suffix=".d")
|
||||
|
||||
if self.fsType is not None:
|
||||
cmd = ('mount', '-t', self.fsType, dev, self.hostDir,)
|
||||
else:
|
||||
cmd = ('mount', dev, self.hostDir,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
self.mounted = True
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, tb):
|
||||
|
||||
mounted = False
|
||||
if self.mounted:
|
||||
p = ProcMountsParser()
|
||||
for e in p.mounts:
|
||||
if e.dir == self.hostDir:
|
||||
mounted = True
|
||||
break
|
||||
# really mounted?
|
||||
# maybe unmounted e.g. if inside a chroot
|
||||
if mounted:
|
||||
cmd = ('umount', self.hostDir,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
self.rmdir(self.hostDir)
|
||||
return False
|
||||
|
||||
class BlkidEntry:
|
||||
|
||||
def __init__(self, device, **kwargs):
|
||||
|
||||
self.device = device
|
||||
|
||||
kwargs = dict(kwargs)
|
||||
self.label = kwargs.pop('label', None)
|
||||
self.uuid = kwargs.pop('uuid', None)
|
||||
self.fsType = kwargs.pop('fsType', None)
|
||||
|
||||
@classmethod
|
||||
def fromLine(cls, line):
|
||||
line = line.strip()
|
||||
p = line.find(':')
|
||||
if p < 0:
|
||||
raise ValueError("invalid blkid output %s"
|
||||
% line)
|
||||
dev, line = line[:p], line[p+1:].strip()
|
||||
|
||||
attrs = {}
|
||||
while line:
|
||||
p = line.find('=')
|
||||
if p < 0:
|
||||
raise ValueError("invalid blkid output %s"
|
||||
% line)
|
||||
key = line[:p].lower()
|
||||
if line[p+1:p+2] == "'":
|
||||
q = line.find("'", p+2)
|
||||
if q < 0:
|
||||
val, line = line[p+1:], ""
|
||||
else:
|
||||
val, line = line[p+2:q], line[q+1:].strip()
|
||||
elif line[p+1:p+2] == '"':
|
||||
q = line.find('"', p+2)
|
||||
if q < 0:
|
||||
val, line = line[p+1:], ""
|
||||
else:
|
||||
val, line = line[p+2:q], line[q+1:].strip()
|
||||
else:
|
||||
q = line.find(" ", p+1)
|
||||
if q < 0:
|
||||
val, line = line[p+1:], ""
|
||||
else:
|
||||
val, line = line[p+1:], line[:q].strip()
|
||||
|
||||
if key == 'type': key = 'fsType'
|
||||
attrs[key] = val
|
||||
|
||||
return cls(dev, **attrs)
|
||||
|
||||
def splitDev(self):
|
||||
dev, part = self.device, ""
|
||||
while dev[-1:] in string.digits:
|
||||
dev, part = dev[:-1], dev[-1] + part
|
||||
return dev, part
|
||||
|
||||
def isOnieReserved(self):
|
||||
if self.label is None: return False
|
||||
|
||||
if 'GRUB' in self.label: return True
|
||||
if 'ONIE-BOOT' in self.label: return True
|
||||
if 'DIAG' in self.label: return True
|
||||
|
||||
return False
|
||||
|
||||
class BlkidParser(SubprocessMixin):
|
||||
|
||||
def __init__(self, log=None):
|
||||
self.log = log or logging.getLogger("blkid")
|
||||
self.parse()
|
||||
|
||||
def parse(self):
|
||||
cmd = ('blkid',)
|
||||
lines = self.check_output(cmd).splitlines()
|
||||
self.parts = [BlkidEntry.fromLine(line) for line in lines]
|
||||
|
||||
def __getitem__(self, idxOrName):
|
||||
if type(idxOrName) == int:
|
||||
return self.parts[idxOrName]
|
||||
for part in self.parts:
|
||||
if part.label == idxOrName: return part
|
||||
if part.uuid == idxOrName: return part
|
||||
raise IndexError("cannot find partition %s" % repr(idxOrName))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.parts)
|
||||
|
||||
class ProcMtdEntry:
|
||||
|
||||
def __init__(self,
|
||||
charDevice, blockDevice,
|
||||
offset, size, eraseSize,
|
||||
label=None):
|
||||
|
||||
self.charDevice = charDevice
|
||||
self.blockDevice = blockDevice
|
||||
self.offset = offset
|
||||
self.size = size
|
||||
self.eraseSize = eraseSize
|
||||
self.label = label
|
||||
|
||||
@classmethod
|
||||
def fromLine(cls, line, offset=0):
|
||||
buf = line.strip()
|
||||
p = buf.find(':')
|
||||
if p < 0:
|
||||
raise ValueError("invalid /proc/mtd entry %s"
|
||||
% line)
|
||||
dev, buf = buf[:p], buf[p+1:].strip()
|
||||
dev = '/dev/' + dev
|
||||
if not os.path.exists(dev):
|
||||
raise ValueError("invalid /proc/mtd entry %s (missing device)"
|
||||
% line)
|
||||
st = os.stat(dev)
|
||||
if stat.S_ISBLK(st.st_mode):
|
||||
cdev, bdev = None, dev
|
||||
elif stat.S_ISCHR(st.st_mode):
|
||||
cdev, bdev = dev, None
|
||||
else:
|
||||
cdev, bdev = None, None
|
||||
|
||||
if cdev and not bdev:
|
||||
if cdev.startswith("/dev/mtd") and not cdev.startswith("/dev/mtdblock"):
|
||||
bdev = "/dev/mtdblock" + cdev[8:]
|
||||
if not os.path.exists(bdev):
|
||||
raise ValueError("invalid /proc/mtd entry %s (cannot find block device)"
|
||||
% line)
|
||||
st = os.stat(bdev)
|
||||
if not stat.S_ISBLK(st.st_mode):
|
||||
raise ValueError("invalid /proc/mtd entry %s (cannot find block device)"
|
||||
% line)
|
||||
else:
|
||||
raise ValueError("invalid /proc/mtd entry %s (cannot find block device)"
|
||||
% line)
|
||||
elif not bdev:
|
||||
raise ValueError("invalid /proc/mtd entry %s (not a block or char device)"
|
||||
% line)
|
||||
|
||||
p = buf.find(" ")
|
||||
if p < 0:
|
||||
raise ValueError("invalid /proc/mtd entry %s (missing size)"
|
||||
% line)
|
||||
sz, buf = buf[:p], buf[p+1:].strip()
|
||||
sz = int(sz, 16)
|
||||
|
||||
if not buf:
|
||||
raise ValueError("invalid /proc/mtd entry %s (missing erase size)"
|
||||
% line)
|
||||
p = buf.find(" ")
|
||||
if p < 0:
|
||||
esz, buf = buf, ""
|
||||
else:
|
||||
esz, buf = buf[:p], buf[p+1:].strip()
|
||||
esz = int(esz, 16)
|
||||
|
||||
if not buf:
|
||||
label = None
|
||||
elif len(buf) > 1 and buf[0:1] == "'" and buf[-1:] == "'":
|
||||
label = buf[1:-1]
|
||||
elif len(buf) > 1 and buf[0:1] == '"' and buf[-1:] == '"':
|
||||
label = buf[1:-1]
|
||||
else:
|
||||
label = buf
|
||||
|
||||
return cls(cdev, bdev, offset, sz, esz, label=label)
|
||||
|
||||
class ProcMtdParser():
|
||||
|
||||
def __init__(self, log=None):
|
||||
self.log = log or logging.getLogger("blkid")
|
||||
self.parse()
|
||||
|
||||
def parse(self):
|
||||
self.parts = []
|
||||
offset = 0
|
||||
if os.path.exists("/proc/mtd"):
|
||||
with open("/proc/mtd") as fd:
|
||||
for line in fd.xreadlines():
|
||||
if line.startswith("dev:"):
|
||||
pass
|
||||
else:
|
||||
part = ProcMtdEntry.fromLine(line, offset=offset)
|
||||
offset += part.size
|
||||
self.parts.append(part)
|
||||
|
||||
def __getitem__(self, idxOrName):
|
||||
if type(idxOrName) == int:
|
||||
return self.parts[idxOrName]
|
||||
for part in self.parts:
|
||||
if part.label == idxOrName: return part
|
||||
raise IndexError("cannot find MTD partition %s" % repr(idxOrName))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.parts)
|
||||
|
||||
class PartedDiskEntry:
|
||||
|
||||
def __init__(self, device, blocks, lbsz, pbsz,
|
||||
model=None, typ=None, flags=[]):
|
||||
self.device = device
|
||||
|
||||
self.blocks = blocks
|
||||
self.lbsz = lbsz
|
||||
self.pbsz = pbsz
|
||||
|
||||
self.model = model
|
||||
self.typ = typ
|
||||
self.flags = flags
|
||||
|
||||
@classmethod
|
||||
def fromLine(cls, line):
|
||||
|
||||
line = line.strip()
|
||||
if not line.endswith(';'):
|
||||
raise ValueError("invalid parted line %s" % line)
|
||||
line = line[:-1]
|
||||
rec = line.split(':')
|
||||
|
||||
def _s():
|
||||
secs = rec.pop(0)
|
||||
if secs[-1:] != 's':
|
||||
raise ValueError("invalid sector count %s" % secs)
|
||||
return int(secs[:-1])
|
||||
|
||||
dev = rec.pop(0)
|
||||
blocks = _s()
|
||||
model = rec.pop(0) or None
|
||||
lbsz = int(rec.pop(0), 10)
|
||||
pbsz = int(rec.pop(0), 10)
|
||||
typ = rec.pop(0)
|
||||
label = rec.pop(0) or None
|
||||
flags = rec.pop(0)
|
||||
flags = [x.strip() for x in flags.split(',')]
|
||||
|
||||
if rec:
|
||||
raise ValueError("invalid parted line %s" % line)
|
||||
|
||||
return cls(dev, blocks, lbsz, pbsz,
|
||||
model=model, typ=typ,
|
||||
flags=flags)
|
||||
|
||||
class PartedPartEntry:
|
||||
|
||||
def __init__(self, part, start, end, sz,
|
||||
fs=None, label=None, flags=[]):
|
||||
self.part = part
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.sz = sz
|
||||
self.fs = fs
|
||||
self.label = label
|
||||
self.flags = flags
|
||||
|
||||
@classmethod
|
||||
def fromLine(cls, line):
|
||||
|
||||
line = line.strip()
|
||||
if not line.endswith(';'):
|
||||
raise ValueError("invalid parted line %s" % line)
|
||||
line = line[:-1]
|
||||
rec = line.split(':')
|
||||
|
||||
def _s():
|
||||
secs = rec.pop(0)
|
||||
if secs[-1:] != 's':
|
||||
raise ValueError("invalid sector count %s" % secs)
|
||||
return int(secs[:-1])
|
||||
|
||||
part = int(rec.pop(0), 10)
|
||||
if part < 1:
|
||||
raise ValueError("invalid partition %d" % part)
|
||||
start = _s()
|
||||
end = _s()
|
||||
sz = _s()
|
||||
fs = rec.pop(0) or None
|
||||
label = rec.pop(0) or None
|
||||
flags = rec.pop(0)
|
||||
flags = [x.strip() for x in flags.split(',')]
|
||||
|
||||
if rec:
|
||||
raise ValueError("invalid parted line %s" % line)
|
||||
|
||||
return cls(part, start, end, sz,
|
||||
fs=fs, label=label,
|
||||
flags=flags)
|
||||
|
||||
class PartedParser(SubprocessMixin):
|
||||
|
||||
def __init__(self, device, log=None):
|
||||
self.device = device
|
||||
self.log = log or logging.getLogger("parted")
|
||||
self.parse()
|
||||
|
||||
def parse(self):
|
||||
|
||||
cmd = ('parted', '-m', self.device,
|
||||
'unit', 's',
|
||||
'print',)
|
||||
lines = self.check_output(cmd).splitlines()
|
||||
self.disk = None
|
||||
parts = {}
|
||||
for line in lines:
|
||||
if line.startswith('/dev/'):
|
||||
self.disk = PartedDiskEntry.fromLine(line)
|
||||
elif line[0:1] in string.digits:
|
||||
ent = PartedPartEntry.fromLine(line)
|
||||
if ent.part in parts:
|
||||
raise ValueError("duplicate partition")
|
||||
parts[ent.part] = ent
|
||||
|
||||
self.parts = []
|
||||
for partno in sorted(parts.keys()):
|
||||
self.parts.append(parts[partno])
|
||||
|
||||
if self.disk is None:
|
||||
raise ValueError("no partition table found")
|
||||
|
||||
def __len__(self):
|
||||
return len(self.parts)
|
||||
|
||||
class ProcMountsEntry:
|
||||
|
||||
def __init__(self, device, dir, fsType, flags={}):
|
||||
self.device = device
|
||||
self.dir = dir
|
||||
self.fsType = fsType
|
||||
self.flags = flags
|
||||
|
||||
@classmethod
|
||||
def fromLine(cls, line):
|
||||
buf = line.strip()
|
||||
|
||||
idx = buf.find(' ')
|
||||
if idx < 0:
|
||||
raise ValueError("invalid /proc/mounts line %s", line)
|
||||
|
||||
device, buf = buf[:idx], buf[idx+1:].strip()
|
||||
|
||||
idx = buf.find(' ')
|
||||
if idx < 0:
|
||||
raise ValueError("invalid /proc/mounts line %s", line)
|
||||
|
||||
dir, buf = buf[:idx], buf[idx+1:].strip()
|
||||
|
||||
idx = buf.find(' ')
|
||||
if idx < 0:
|
||||
raise ValueError("invalid /proc/mounts line %s", line)
|
||||
|
||||
fsType, buf = buf[:idx], buf[idx+1:].strip()
|
||||
|
||||
idx = buf.rfind(' ')
|
||||
if idx < 0:
|
||||
raise ValueError("invalid /proc/mounts line %s", line)
|
||||
|
||||
buf, _ = buf[:idx], buf[idx+1:].strip()
|
||||
|
||||
idx = buf.rfind(' ')
|
||||
if idx < 0:
|
||||
buf = ""
|
||||
else:
|
||||
buf, _ = buf[:idx], buf[idx+1:].strip()
|
||||
|
||||
flags = {}
|
||||
if buf:
|
||||
for flag in buf.split(','):
|
||||
idx = flag.find('=')
|
||||
if idx > -1:
|
||||
key, val = flag[:idx], flag[idx+1:]
|
||||
else:
|
||||
key, val = flag, True
|
||||
flags[key] = val
|
||||
|
||||
return cls(device, dir, fsType, flags)
|
||||
|
||||
class ProcMountsParser:
|
||||
|
||||
def __init__(self):
|
||||
self.parse()
|
||||
|
||||
def parse(self):
|
||||
self.mounts = []
|
||||
with open("/proc/mounts") as fd:
|
||||
for line in fd.readlines():
|
||||
self.mounts.append(ProcMountsEntry.fromLine(line))
|
||||
|
||||
class InitrdContext(SubprocessMixin):
|
||||
|
||||
def __init__(self, initrd=None, dir=None, log=None):
|
||||
if initrd is None and dir is None:
|
||||
raise ValueError("missing initrd or initrd dir")
|
||||
if initrd and dir:
|
||||
raise ValueError("cannot specify initrd and initrd dir")
|
||||
self.initrd = initrd
|
||||
self.dir = dir
|
||||
self.hlog = log or logging.getLogger("mount")
|
||||
self.ilog = self.hlog.getChild("initrd")
|
||||
self.ilog.setLevel(logging.INFO)
|
||||
self.log = self.hlog
|
||||
|
||||
def _unpack(self):
|
||||
self.dir = self.mkdtemp(prefix="chroot-",
|
||||
suffix=".d")
|
||||
with open(self.initrd) as fd:
|
||||
mbuf = fd.read(1024)
|
||||
if mbuf[0:2] == "\x1f\x8b":
|
||||
c1 = ('gzip', '-dc', self.initrd,)
|
||||
elif mbuf[0:2] == "BZ":
|
||||
c1 = ('bzip2', '-dc', self.initrd,)
|
||||
elif mbuf[0:6] == "\xfd7zXZ\x00":
|
||||
c1 = ('xz', '-dc', self.initrd,)
|
||||
else:
|
||||
raise ValueError("cannot decode initrd")
|
||||
c2 = ('cpio', '-imd',)
|
||||
self.log.debug("+ %s | %s",
|
||||
" ".join(c1), " ".join(c2))
|
||||
try:
|
||||
p1 = subprocess.Popen(c1,
|
||||
stdout=subprocess.PIPE)
|
||||
except OSError as ex:
|
||||
self.log.exception("command not found: %s" % c1[0])
|
||||
raise ValueError("cannot start pipe")
|
||||
try:
|
||||
if self.log.isEnabledFor(logging.DEBUG):
|
||||
p2 = subprocess.Popen(c2,
|
||||
cwd=self.dir,
|
||||
stdin=p1.stdout)
|
||||
else:
|
||||
p2 = subprocess.Popen(c2,
|
||||
cwd=self.dir,
|
||||
stdin=p1.stdout,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
except OSError as ex:
|
||||
self.log.exception("cannot start command: %s" % c2[0])
|
||||
raise ValueError("cannot start pipe")
|
||||
c1 = p1.wait()
|
||||
out, _ = p2.communicate()
|
||||
c2 = p2.wait()
|
||||
if c2 and out:
|
||||
sys.stderr.write(out)
|
||||
if c1 or c2:
|
||||
raise ValueError("initrd unpack failed")
|
||||
|
||||
def _prepDirs(self):
|
||||
|
||||
dev2 = os.path.join(self.dir, "dev")
|
||||
if not os.path.exists(dev2):
|
||||
self.mkdir(dev2)
|
||||
|
||||
for e in os.listdir(dev2):
|
||||
dst = os.path.join(dev2, e)
|
||||
if os.path.islink(dst):
|
||||
self.unlink(dst)
|
||||
elif os.path.isdir(dst):
|
||||
self.rmtree(dst)
|
||||
else:
|
||||
self.unlink(dst)
|
||||
|
||||
for e in os.listdir("/dev"):
|
||||
src = os.path.join("/dev", e)
|
||||
dst = os.path.join(dev2, e)
|
||||
if os.path.islink(src):
|
||||
self.symlink(os.readlink(src), dst)
|
||||
elif os.path.isdir(src):
|
||||
self.mkdir(dst)
|
||||
elif os.path.isfile(src):
|
||||
self.copy2(src, dst)
|
||||
else:
|
||||
st = os.stat(src)
|
||||
if stat.S_ISBLK(st.st_mode):
|
||||
maj, min = os.major(st.st_rdev), os.minor(st.st_rdev)
|
||||
self.log.debug("+ mknod %s b %d %d", dst, maj, min)
|
||||
os.mknod(dst, st.st_mode, st.st_rdev)
|
||||
elif stat.S_ISCHR(st.st_mode):
|
||||
maj, min = os.major(st.st_rdev), os.minor(st.st_rdev)
|
||||
self.log.debug("+ mknod %s c %d %d", dst, maj, min)
|
||||
os.mknod(dst, st.st_mode, st.st_rdev)
|
||||
else:
|
||||
self.log.debug("skipping device %s", src)
|
||||
|
||||
dst = os.path.join(self.dir, "dev/pts")
|
||||
if not os.path.exists(dst):
|
||||
self.mkdir(dst)
|
||||
|
||||
if 'TMPDIR' in os.environ:
|
||||
dst = self.dir + os.environ['TMPDIR']
|
||||
if not os.path.exists(dst):
|
||||
self.makedirs(dst)
|
||||
|
||||
def __enter__(self):
|
||||
|
||||
if self.initrd is not None:
|
||||
|
||||
self.log.debug("extracting initrd %s", self.initrd)
|
||||
self._unpack()
|
||||
|
||||
self.log.debug("preparing chroot in %s", self.dir)
|
||||
try:
|
||||
self.log = self.ilog
|
||||
self._prepDirs()
|
||||
finally:
|
||||
self.log = self.hlog
|
||||
|
||||
dst = os.path.join(self.dir, "proc")
|
||||
cmd = ('mount', '-t', 'proc', 'proc', dst,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
dst = os.path.join(self.dir, "sys")
|
||||
cmd = ('mount', '-t', 'sysfs', 'sysfs', dst,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
dst = os.path.join(self.dir, "dev/pts")
|
||||
cmd = ('mount', '-t', 'devpts', 'devpts', dst,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, tb):
|
||||
|
||||
p = ProcMountsParser()
|
||||
dirs = [e.dir for e in p.mounts if e.dir.startswith(self.dir)]
|
||||
|
||||
# XXX probabaly also kill files here
|
||||
|
||||
# umount any nested mounts
|
||||
self.log.debug("un-mounting mounts points in chroot %s", self.dir)
|
||||
dirs.sort(reverse=True)
|
||||
for p in dirs:
|
||||
cmd = ('umount', p,)
|
||||
self.check_call(cmd, vmode=self.V1)
|
||||
|
||||
if self.initrd is not None:
|
||||
self.log.debug("cleaning up chroot in %s", self.dir)
|
||||
self.rmtree(self.dir)
|
||||
else:
|
||||
self.log.debug("saving chroot in %s", self.dir)
|
||||
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def mkChroot(self, initrd, log=None):
|
||||
with InitrdContext(initrd=initrd, log=log) as ctx:
|
||||
initrdDir = ctx.dir
|
||||
ctx.initrd = None
|
||||
# save the unpacked directory, do not clean it up
|
||||
# (it's inside this chroot anyway)
|
||||
return initrdDir
|
||||
|
||||
class ChrootSubprocessMixin:
|
||||
|
||||
chrootDir = None
|
||||
mounted = False
|
||||
# initialize this in a concrete class
|
||||
|
||||
def check_call(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
kwargs = dict(kwargs)
|
||||
|
||||
cwd = kwargs.pop('cwd', None)
|
||||
if cwd is not None:
|
||||
self.log.debug("+ cd " + cwd)
|
||||
|
||||
if args:
|
||||
cmd = args.pop(0)
|
||||
else:
|
||||
cmd = kwargs.pop('cmd')
|
||||
if isinstance(cmd, basestring):
|
||||
cmd = ('chroot', self.chrootDir,
|
||||
'/bin/sh', '-c', 'IFS=;' + cmd,)
|
||||
else:
|
||||
cmd = ['chroot', self.chrootDir,] + list(cmd)
|
||||
|
||||
if not self.mounted:
|
||||
with InitrdContext(dir=self.chrootDir, log=self.log) as ctx:
|
||||
self.log.debug("+ " + " ".join(cmd))
|
||||
subprocess.check_call(cmd, *args, cwd=cwd, **kwargs)
|
||||
else:
|
||||
self.log.debug("+ " + " ".join(cmd))
|
||||
subprocess.check_call(cmd, *args, cwd=cwd, **kwargs)
|
||||
|
||||
def check_output(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
kwargs = dict(kwargs)
|
||||
|
||||
cwd = kwargs.pop('cwd', None)
|
||||
if cwd is not None:
|
||||
self.log.debug("+ cd " + cwd)
|
||||
|
||||
if args:
|
||||
cmd = args.pop(0)
|
||||
else:
|
||||
cmd = kwargs.pop('cmd')
|
||||
if isinstance(cmd, basestring):
|
||||
cmd = ('chroot', self.chrootDir,
|
||||
'/bin/sh', '-c', 'IFS=;' + cmd,)
|
||||
else:
|
||||
cmd = ['chroot', self.chrootDir,] + list(cmd)
|
||||
|
||||
if not self.mounted:
|
||||
with InitrdContext(self.chrootDir, log=self.log) as ctx:
|
||||
self.log.debug("+ " + " ".join(cmd))
|
||||
return subprocess.check_output(cmd, *args, cwd=cwd, **kwargs)
|
||||
else:
|
||||
self.log.debug("+ " + " ".join(cmd))
|
||||
return subprocess.check_output(cmd, *args, cwd=cwd, **kwargs)
|
||||
@@ -0,0 +1,81 @@
|
||||
"""RecoverApp.py
|
||||
|
||||
Application-level code for Switch Light recovery.
|
||||
"""
|
||||
|
||||
import os, sys
|
||||
import imp
|
||||
import logging
|
||||
from ConfUtils import UbootEnv
|
||||
|
||||
class App:
|
||||
|
||||
def __init__(self, log=None):
|
||||
|
||||
if log is not None:
|
||||
self.log = log
|
||||
else:
|
||||
self.log = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
self.recovery = None
|
||||
|
||||
def run(self):
|
||||
|
||||
if os.path.exists(UbootEnv.SETENV):
|
||||
self.ubootEnv = UbootEnv(log=self.log.getChild("u-boot"))
|
||||
else:
|
||||
self.ubootEnv = None
|
||||
|
||||
# load the platform-specific blob
|
||||
if os.path.exists("/etc/onl/platform"):
|
||||
with open("/etc/onl/platform") as fd:
|
||||
plat = fd.read().strip()
|
||||
else:
|
||||
self.log.error("cannot recover non-ONL platform")
|
||||
return 1
|
||||
|
||||
p = ("/lib/platform-config/%s/python/recover.py"
|
||||
% (plat,))
|
||||
if not os.path.exists(p):
|
||||
self.log.error("missing recover profile %s", p)
|
||||
return 1
|
||||
mod = imp.load_source("platform_recover", p)
|
||||
|
||||
# run the platform-specific installer
|
||||
self.recovery = mod.Recovery(ubootEnv=self.ubootEnv,
|
||||
log=self.log)
|
||||
try:
|
||||
code = self.recovery.run()
|
||||
except:
|
||||
self.log.exception("recovery failed")
|
||||
code = 1
|
||||
if code: return code
|
||||
|
||||
return 0
|
||||
|
||||
def shutdown(self):
|
||||
|
||||
recovery, self.recovery = self.recovery, None
|
||||
if recovery is not None:
|
||||
recovery.shutdown()
|
||||
|
||||
@classmethod
|
||||
def main(cls):
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger("recover")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
app = cls(log=logger)
|
||||
try:
|
||||
code = app.run()
|
||||
except:
|
||||
logger.exception("runner failed")
|
||||
code = 1
|
||||
app.shutdown()
|
||||
sys.exit(code)
|
||||
|
||||
main = App.main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,303 @@
|
||||
"""ShellApp.py
|
||||
"""
|
||||
|
||||
import os, sys
|
||||
import glob
|
||||
import tempfile
|
||||
import logging
|
||||
import subprocess
|
||||
import argparse
|
||||
import string
|
||||
import struct
|
||||
from InstallUtils import InitrdContext, MountContext
|
||||
from InstallUtils import SubprocessMixin
|
||||
from InstallUtils import ProcMountsParser, ProcMtdParser
|
||||
from InstallUtils import BlkidParser
|
||||
import Fit
|
||||
|
||||
import onl.platform.current
|
||||
|
||||
class AppBase(SubprocessMixin):
|
||||
|
||||
@property
|
||||
def PROG(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def __init__(self, command=None, log=None):
|
||||
|
||||
if log is not None:
|
||||
self.log = log
|
||||
else:
|
||||
self.log = logging.getLogger(self.__class__.__name__)
|
||||
self.command = command
|
||||
|
||||
def _runInitrdShell(self, initrd):
|
||||
with InitrdContext(initrd=initrd, log=self.log) as ctx:
|
||||
if self.command is not None:
|
||||
cmd = ('chroot', ctx.dir,
|
||||
'/bin/sh', '-c', 'IFS=;' + self.command)
|
||||
else:
|
||||
cmd = ('chroot', ctx.dir,
|
||||
'/bin/sh', '-i')
|
||||
try:
|
||||
self.check_call(cmd)
|
||||
except subprocess.CalledProcessError, what:
|
||||
pass
|
||||
return 0
|
||||
|
||||
def _runFitShell(self, device):
|
||||
self.log.debug("parsing FIT image in %s", device)
|
||||
p = Fit.Parser(path=device, log=self.log)
|
||||
node = p.getInitrdNode()
|
||||
if node is None:
|
||||
self.log.error("cannot find initrd node in FDT")
|
||||
return 1
|
||||
prop = node.properties.get('data', None)
|
||||
if prop is None:
|
||||
self.log.error("cannot find initrd data property in FDT")
|
||||
return 1
|
||||
with open(device) as fd:
|
||||
self.log.debug("reading initrd at [%x:%x]",
|
||||
prop.offset, prop.offset+prop.sz)
|
||||
fd.seek(prop.offset, 0)
|
||||
buf = fd.read(prop.sz)
|
||||
try:
|
||||
fno, initrd = tempfile.mkstemp(prefix="initrd-",
|
||||
suffix=".img")
|
||||
self.log.debug("+ cat > %s", initrd)
|
||||
with os.fdopen(fno, "w") as fd:
|
||||
fd.write(buf)
|
||||
return self._runInitrdShell(initrd)
|
||||
finally:
|
||||
self.unlink(initrd)
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def main(cls):
|
||||
|
||||
logging.basicConfig()
|
||||
logger = logging.getLogger(cls.PROG)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
ap = argparse.ArgumentParser(prog=cls.PROG)
|
||||
ap.add_argument('-v', '--verbose', action='store_true',
|
||||
help='Enable verbose logging')
|
||||
ap.add_argument('-q', '--quiet', action='store_true',
|
||||
help='Suppress logging')
|
||||
ap.add_argument('-c', type=str, dest='command',
|
||||
help='Run a batch command')
|
||||
|
||||
try:
|
||||
args = ap.parse_args()
|
||||
except SystemExit, what:
|
||||
sys.exit(what.code)
|
||||
|
||||
if args.verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
if args.quiet:
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
app = cls(command=args.command, log=logger)
|
||||
try:
|
||||
code = app.run()
|
||||
except:
|
||||
logger.exception("runner failed")
|
||||
code = 1
|
||||
app.shutdown()
|
||||
sys.exit(code)
|
||||
|
||||
class Onie(AppBase):
|
||||
|
||||
PROG = "onie-shell"
|
||||
|
||||
def run(self):
|
||||
|
||||
self.pm = ProcMountsParser()
|
||||
self.blkid = BlkidParser(log=self.log.getChild("blkid"))
|
||||
self.mtd = ProcMtdParser(log=self.log.getChild("mtd"))
|
||||
|
||||
def _g(d):
|
||||
pat = os.path.join(d, "onie/initrd.img*")
|
||||
l = glob.glob(pat)
|
||||
if l: return l[0]
|
||||
return None
|
||||
|
||||
# try to find a mounted, labeled partition
|
||||
try:
|
||||
dev = self.blkid['ONIE-BOOT'].device
|
||||
except IndexError:
|
||||
dev = None
|
||||
if dev is not None:
|
||||
self.log.debug("found ONIE boot device %s", dev)
|
||||
|
||||
parts = [p for p in self.pm.mounts if p.device == dev]
|
||||
if parts:
|
||||
onieDir = parts[0]
|
||||
self.log.debug("found ONIE boot mounted at %s", onieDir)
|
||||
initrd = _g(onieDir)
|
||||
if initrd is None:
|
||||
self.log.warn("cannot find ONIE initrd on %s", onieDir)
|
||||
else:
|
||||
self.log.debug("found ONIE initrd at %s", initrd)
|
||||
return _runInitrdShell(initrd)
|
||||
|
||||
with MountContext(dev, log=self.log) as ctx:
|
||||
initrd = _g(ctx.dir)
|
||||
if initrd is None:
|
||||
self.log.warn("cannot find ONIE initrd on %s", dev)
|
||||
else:
|
||||
self.log.debug("found ONIE initrd at %s", initrd)
|
||||
return self._runInitrdShell(initrd)
|
||||
|
||||
self.log.warn("cannot find an ONIE initrd")
|
||||
return 1
|
||||
|
||||
# try to find onie initrd on a mounted fs (GRUB);
|
||||
# for ONIE images this is usually /mnt/onie-boot
|
||||
for part in self.pm.mounts:
|
||||
if not part.device.startswith('/dev/'): continue
|
||||
initrd = _g(part.dir)
|
||||
if initrd is None:
|
||||
self.log.debug("cannot find ONIE initrd on %s (%s)",
|
||||
part.device, part.dir)
|
||||
else:
|
||||
self.log.debug("found ONIE initrd at %s", initrd)
|
||||
return self._runInitrdShell(initrd)
|
||||
|
||||
# grovel through MTD devices (u-boot)
|
||||
parts = [p for p in self.mtd.parts if p.label == "onie"]
|
||||
if parts:
|
||||
part = parts[0]
|
||||
self.log.debug("found ONIE MTD device %s",
|
||||
part.charDevice or part.blockDevice)
|
||||
return self._runFitShell(part.blockDevice)
|
||||
elif self.mtd.mounts:
|
||||
self.log.error("cannot find ONIE MTD device")
|
||||
return 1
|
||||
|
||||
self.log.error("cannot find ONIE initrd")
|
||||
return 1
|
||||
|
||||
class Loader(AppBase):
|
||||
|
||||
PROG = "loader-shell"
|
||||
|
||||
def runGrub(self):
|
||||
|
||||
try:
|
||||
dev = self.blkid['ONL-BOOT'].device
|
||||
except KeyError:
|
||||
pass
|
||||
if dev is None:
|
||||
self.log.error("cannot find GRUB partition %s", dev)
|
||||
return 1
|
||||
|
||||
initrd = self.pc['grub']['initrd']
|
||||
if type(initrd) == dict: initrd = initrd['=']
|
||||
|
||||
parts = [p for p in self.pm.mounts if p.device == dev]
|
||||
if parts:
|
||||
grubDir = parts[0]
|
||||
self.log.debug("found loader device %s mounted at %s",
|
||||
dev, grubDir)
|
||||
p = os.path.join(grubDir, initrd)
|
||||
if not os.path.exists(p):
|
||||
self.log.error("cannot find initrd %s", p)
|
||||
return 1
|
||||
self.log.debug("found loader initrd at %s", p)
|
||||
return self._runInitrdShell(p)
|
||||
|
||||
with MountContext(dev, log=self.log) as ctx:
|
||||
p = os.path.join(ctx.dir, initrd)
|
||||
if not os.path.exists(p):
|
||||
self.log.error("cannot find initrd %s:%s", dev, p)
|
||||
return 1
|
||||
self.log.debug("found loader initrd at %s:%s", dev, p)
|
||||
return self._runInitrdShell(p)
|
||||
|
||||
def runUboot(self):
|
||||
|
||||
dev = self.pc['loader']['device']
|
||||
self.log.info("found loader device %s", dev)
|
||||
|
||||
parts = self.pc['installer']
|
||||
bootPart = None
|
||||
bootPartno = None
|
||||
for idx, part in enumerate(self.pc['installer']):
|
||||
label, pdata = list(part.items())[0]
|
||||
if label == 'ONL-BOOT':
|
||||
bootPart = pdata
|
||||
bootPartno = idx + 1
|
||||
break
|
||||
if bootPart is None:
|
||||
self.log.info("cannot find ONL-BOOT declaration")
|
||||
return 1
|
||||
|
||||
fmt = bootPart.get('format', 'ext2')
|
||||
if fmt == 'raw':
|
||||
bootDevice = dev + str(bootPartno)
|
||||
else:
|
||||
bootDevice = self.blkid['ONL-BOOT'].device
|
||||
|
||||
# run from a raw partition
|
||||
if fmt == 'raw':
|
||||
self.log.info("found (raw) boot partition %s", bootDevice)
|
||||
return self._runFitShell(bootDevice)
|
||||
|
||||
l = []
|
||||
|
||||
p = self.pc['flat_image_tree']['itb']
|
||||
if type(p) == dict: p = p['=']
|
||||
if p not in l: l.append(p)
|
||||
|
||||
p = self.platform.platform() + '.itb'
|
||||
if p not in l: l.append(p)
|
||||
|
||||
p = 'onl-loader-fit.itb'
|
||||
if p not in l: l.append(p)
|
||||
|
||||
self.log.info("looking for loader images %s", ", ".join(l))
|
||||
|
||||
# run from a file in a mounted filesystem
|
||||
parts = [p for p in self.pm.mounts if p.device == bootDevice]
|
||||
if parts:
|
||||
loaderDir = parts[0]
|
||||
self.log.debug("found loader device mounted at %s", loaderDir)
|
||||
for e in l:
|
||||
p = os.path.join(loaderDir, e)
|
||||
if os.path.exists(p): return self._runFitShell(p)
|
||||
self.log.error("cannot find an ITB")
|
||||
return 1
|
||||
|
||||
# run from a file in an umounted filesystem
|
||||
with MountContext(bootDevice, log=self.log) as ctx:
|
||||
self.log.info("found (%s) loader device %s", fmt, bootDevice)
|
||||
for e in l:
|
||||
p = os.path.join(ctx.dir, e)
|
||||
if os.path.exists(p): return self._runFitShell(p)
|
||||
self.log.error("cannot find an ITB")
|
||||
return 1
|
||||
|
||||
def run(self):
|
||||
|
||||
self.platform = onl.platform.current.OnlPlatform()
|
||||
self.pc = self.platform.platform_config
|
||||
|
||||
self.pm = ProcMountsParser()
|
||||
self.blkid = BlkidParser(log=self.log.getChild("blkid"))
|
||||
|
||||
if 'grub' in self.pc:
|
||||
return self.runGrub()
|
||||
|
||||
if 'flat_image_tree' in self.pc:
|
||||
return self.runUboot()
|
||||
|
||||
self.log.error("invalid platform-config")
|
||||
return 1
|
||||
|
||||
main = Onie.main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,4 @@
|
||||
"""__init__.py
|
||||
|
||||
Module setup for switchlight.install
|
||||
"""
|
||||
@@ -10,11 +10,13 @@
|
||||
############################################################
|
||||
|
||||
import pprint
|
||||
import yaml
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
import yaml
|
||||
import onl.YamlUtils
|
||||
|
||||
class OnlInfoObject(object):
|
||||
DEFAULT_INDENT=" "
|
||||
|
||||
@@ -99,18 +101,41 @@ class OnlPlatformBase(object):
|
||||
CONFIG_DIR='/lib/platform-config'
|
||||
CURRENT_DIR=os.path.join(CONFIG_DIR, 'current')
|
||||
|
||||
CONFIG_DEFAULT_GRUB = "/lib/vendor-config/onl/platform-config-defaults-x86-64.yml"
|
||||
CONFIG_DEFAULT_UBOOT = "/lib/vendor-config/onl/platform-config-defaults-uboot.yml"
|
||||
|
||||
def __init__(self):
|
||||
self.add_info_json("onie_info", "%s/onie-info.json" % self.basedir_onl(), OnieInfo,
|
||||
required=False)
|
||||
self.add_info_json("platform_info", "%s/platform-info.json" % self.basedir_onl(),
|
||||
required=False)
|
||||
|
||||
# Load the platform config yaml file
|
||||
y = os.path.join(self.basedir_onl(), "%s.yml" % self.platform())
|
||||
if os.path.exists(y):
|
||||
self.platform_config = yaml.load(open(y))
|
||||
# Find the base platform config
|
||||
if self.platform().startswith('x86-64'):
|
||||
y1 = self.CONFIG_DEFAULT_GRUB
|
||||
elif self.platform().startswith('powerpc'):
|
||||
y1 = self.CONFIG_DEFAULT_UBOOT
|
||||
elif self.platform().startswith('arm'):
|
||||
y1 = self.CONFIG_DEFAULT_UBOOT
|
||||
else:
|
||||
y1 = None
|
||||
|
||||
# Find and load the platform config yaml file
|
||||
y2 = os.path.join(self.basedir_onl(), "%s.yml" % self.platform())
|
||||
if os.path.exists(y1) and os.path.exists(y2):
|
||||
self.platform_config = onl.YamlUtils.merge(y1, y2)
|
||||
if self.platform() in self.platform_config:
|
||||
self.platform_config = self.platform_config[self.platform()]
|
||||
elif os.path.exists(y2):
|
||||
with open(y2) as fd:
|
||||
self.platform_config = yaml.load(fd)
|
||||
if self.platform() in self.platform_config:
|
||||
self.platform_config = self.platform_config[self.platform()]
|
||||
elif os.path.exists(y1):
|
||||
with open(y1) as fd:
|
||||
self.platform_config = yaml.load(fd)
|
||||
if 'default' in self.platform_config:
|
||||
self.platform_config = self.platform_config['default']
|
||||
else:
|
||||
self.platform_config = {}
|
||||
|
||||
|
||||
@@ -14,12 +14,23 @@
|
||||
# platform-config packages.
|
||||
#
|
||||
############################################################
|
||||
import os
|
||||
import importlib
|
||||
|
||||
def import_subsystem_platform_class(subsystem='onl', klass='OnlPlatform'):
|
||||
# Determine the current platform name.
|
||||
with open("/etc/onl/platform", 'r') as f:
|
||||
platform=f.read().strip()
|
||||
platform = None
|
||||
if os.path.exists("/etc/onl/platform"):
|
||||
with open("/etc/onl/platform", 'r') as f:
|
||||
platform=f.read().strip()
|
||||
elif os.path.exists("/etc/machine.conf"):
|
||||
with open("/etc/machine.conf", 'r') as f:
|
||||
lines = f.readlines(False)
|
||||
lines = [x for x in lines if x.startswith('onie_platform=')]
|
||||
if lines:
|
||||
platform = lines[0].partition('=')[2].strip()
|
||||
if platform is None:
|
||||
raise RuntimeError("cannot find a platform declaration")
|
||||
|
||||
platform_module = platform.replace('-', '_')
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
config BR2_PACKAGE_PYTHON_PYBLKID
|
||||
bool "python-pyblkid"
|
||||
depends on BR2_PACKAGE_PYTHON
|
||||
depends on BR2_PACKAGE_UTIL_LINUX
|
||||
help
|
||||
Include the 'pyblkid' Python library
|
||||
@@ -1,41 +0,0 @@
|
||||
######################################################################
|
||||
##
|
||||
## python-pyblkid.mk
|
||||
##
|
||||
######################################################################
|
||||
|
||||
PYTHON_PYBLKID_VERSION = 0.0.1
|
||||
PYTHON_PYBLKID_SOURCE = pyblkid-$(PYTHON_PYBLKID_VERSION).tar.bz2
|
||||
PYTHON_PYBLKID_INSTALL_STAGING = NO
|
||||
PYTHON_PYBLKID_INSTALL_TARGET = YES
|
||||
PYTHON_PYBLKID_LICENSE = GPL
|
||||
PYTHON_PYBLKID_LICENSE_FILES = COPYING
|
||||
|
||||
PYTHON_PYBLKID_DEPENDENCIES = python util-linux
|
||||
|
||||
PYTHON_PYBLKID_INCLUDES = \
|
||||
--include-dirs $(STAGING_DIR)/usr/include:$(STAGING_DIR)/usr/include/python$(PYTHON_VERSION_MAJOR) \
|
||||
# THIS LINE INTENTIONALLY LEFT BLANK
|
||||
|
||||
PYTHON_PYBLKID_LIBDIRS = \
|
||||
--library-dirs $(STAGING_DIR)/usr/lib \
|
||||
# THIS LINE INTENTIONALLY LEFT BLANK
|
||||
|
||||
# see python-mad.mk
|
||||
PYTHON_PYBLKID_ENVIRONMENT = \
|
||||
CC="$(TARGET_CC)" \
|
||||
CFLAGS="$(TARGET_CFLAGS)" \
|
||||
LDSHARED="$(TARGET_CC) -shared" \
|
||||
LDFLAGS="$(TARGET_LDFLAGS)" \
|
||||
# THIS LINE INTENTIONALLY LEFT BLANK
|
||||
|
||||
define PYTHON_PYBLKID_BUILD_CMDS
|
||||
(cd $(@D); $(HOST_DIR)/usr/bin/python setup.py build_py)
|
||||
(cd $(@D); $(PYTHON_PYBLKID_ENVIRONMENT) $(HOST_DIR)/usr/bin/python setup.py build_ext $(PYTHON_PYBLKID_INCLUDES) $(PYTHON_PYBLKID_LIBDIRS))
|
||||
endef
|
||||
|
||||
define PYTHON_PYBLKID_INSTALL_TARGET_CMDS
|
||||
(cd $(@D); $(HOST_DIR)/usr/bin/python setup.py install --prefix=$(TARGET_DIR)/usr --install-scripts=$(TARGET_DIR)/usr/bin)
|
||||
endef
|
||||
|
||||
$(eval $(generic-package))
|
||||
@@ -13,6 +13,7 @@ PLATFORMS := $(shell onlpm --list-platforms --arch $(ARCH))
|
||||
endif
|
||||
|
||||
PLATFORM_PACKAGES := $(foreach p,$(PLATFORMS),onl-platform-config-$(p):$(ARCH))
|
||||
VENDOR_PACKAGES := $(foreach p,$(PLATFORMS),$(shell python $(ONL)/tools/onlplatform.py $(p) $(ARCH) vendor))
|
||||
|
||||
ROOT := root
|
||||
TARGET := onl-loader-initrd-$(ARCH).cpio.gz
|
||||
@@ -24,13 +25,22 @@ $(TARGET):
|
||||
$(ONLPM) --sudo --force --extract-dir onl-loader-initrd-files:all $(ROOT)
|
||||
$(ONLPM) --sudo --force --extract-dir onl-vendor-config-onl-loader:all $(ROOT)
|
||||
$(ONLPM) --sudo $(foreach p,$(PLATFORM_PACKAGES),--extract-dir $(p) $(ROOT))
|
||||
$(MAKE) __vendor_config_data
|
||||
$(ONLPM) --sudo --force --extract-dir onl-vendor-config-onl:all $(ROOT)
|
||||
$(ONL)/tools/sjson.py --kj version $(ONL)/make/version-onl.json --kl platforms $(PLATFORMS) --kv arch $(ARCH) --out manifest.json
|
||||
sudo mkdir -p $(ROOT)/etc/onl/loader && sudo cp manifest.json $(ROOT)/etc/onl/loader
|
||||
sudo $(ONL)/tools/makedevs -d $(ROOT)/etc/rootperms $(abspath $(ROOT))
|
||||
sudo $(ONL)/tools/cpiomod.py --cpio onl-buildroot-initrd-$(ARCH).cpio.gz --add-directory $(ROOT) --out $@
|
||||
sudo rm -rf $(ROOT) onl-buildroot-initrd-$(ARCH).cpio.gz
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
__vendor_config_data:
|
||||
set -e ;\
|
||||
vpkgs= ;\
|
||||
l="$(PLATFORMS)"; for p in $$l; do \
|
||||
vpkg=$$(python $(ONL)/tools/onlplatform.py $$p $(ARCH) vendor) ;\
|
||||
case " $$vpkgs " in *" $$vpkg "*) continue ;; esac ;\
|
||||
vpkgs=$$vpkgs$${vpkgs:+" "}$$vpkg ;\
|
||||
echo "Adding vendor package $$vpkg" ;\
|
||||
$(ONLPM) --sudo --force --extract-dir $$vpkg $(ROOT) ;\
|
||||
done ;\
|
||||
:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#
|
||||
# Automatically generated file; DO NOT EDIT.
|
||||
# Linux/x86_64 3.18.25 Kernel Configuration
|
||||
# Linux/x86 3.18.25 Kernel Configuration
|
||||
#
|
||||
CONFIG_64BIT=y
|
||||
CONFIG_X86_64=y
|
||||
@@ -772,10 +772,11 @@ CONFIG_BRIDGE_NETFILTER=y
|
||||
# Core Netfilter Configuration
|
||||
#
|
||||
CONFIG_NETFILTER_NETLINK=y
|
||||
# CONFIG_NETFILTER_NETLINK_ACCT is not set
|
||||
CONFIG_NETFILTER_NETLINK_ACCT=y
|
||||
CONFIG_NETFILTER_NETLINK_QUEUE=y
|
||||
CONFIG_NETFILTER_NETLINK_LOG=y
|
||||
CONFIG_NF_CONNTRACK=y
|
||||
CONFIG_NF_LOG_COMMON=y
|
||||
CONFIG_NF_CONNTRACK_MARK=y
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_ZONES=y
|
||||
@@ -783,6 +784,7 @@ CONFIG_NF_CONNTRACK_PROCFS=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
# CONFIG_NF_CONNTRACK_TIMEOUT is not set
|
||||
CONFIG_NF_CONNTRACK_TIMESTAMP=y
|
||||
CONFIG_NF_CONNTRACK_LABELS=y
|
||||
CONFIG_NF_CT_PROTO_DCCP=y
|
||||
CONFIG_NF_CT_PROTO_GRE=y
|
||||
CONFIG_NF_CT_PROTO_SCTP=y
|
||||
@@ -799,8 +801,9 @@ CONFIG_NF_CONNTRACK_SANE=y
|
||||
CONFIG_NF_CONNTRACK_SIP=y
|
||||
CONFIG_NF_CONNTRACK_TFTP=y
|
||||
CONFIG_NF_CT_NETLINK=y
|
||||
# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
|
||||
# CONFIG_NETFILTER_NETLINK_QUEUE_CT is not set
|
||||
CONFIG_NF_CT_NETLINK_TIMEOUT=y
|
||||
CONFIG_NF_CT_NETLINK_HELPER=y
|
||||
CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
|
||||
CONFIG_NF_NAT=y
|
||||
CONFIG_NF_NAT_NEEDED=y
|
||||
CONFIG_NF_NAT_PROTO_DCCP=y
|
||||
@@ -832,17 +835,17 @@ CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_DSCP=y
|
||||
CONFIG_NETFILTER_XT_TARGET_HL=y
|
||||
# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
|
||||
CONFIG_NETFILTER_XT_TARGET_HMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
# CONFIG_NETFILTER_XT_TARGET_LOG is not set
|
||||
CONFIG_NETFILTER_XT_TARGET_LOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_NAT=y
|
||||
# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set
|
||||
CONFIG_NETFILTER_XT_TARGET_NETMAP=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_RATEEST=y
|
||||
# CONFIG_NETFILTER_XT_TARGET_REDIRECT is not set
|
||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
@@ -854,12 +857,12 @@ CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y
|
||||
# Xtables matches
|
||||
#
|
||||
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_BPF is not set
|
||||
# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CGROUP=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
@@ -872,20 +875,20 @@ CONFIG_NETFILTER_XT_MATCH_ESP=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HL=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_IPCOMP=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPVS=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_L2TP=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_NFACCT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_OSF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_OWNER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
|
||||
CONFIG_NETFILTER_XT_MATCH_RATEEST=y
|
||||
@@ -965,7 +968,7 @@ CONFIG_NF_DEFRAG_IPV4=y
|
||||
CONFIG_NF_CONNTRACK_IPV4=y
|
||||
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
|
||||
# CONFIG_NF_LOG_ARP is not set
|
||||
# CONFIG_NF_LOG_IPV4 is not set
|
||||
CONFIG_NF_LOG_IPV4=y
|
||||
CONFIG_NF_REJECT_IPV4=y
|
||||
CONFIG_NF_NAT_IPV4=y
|
||||
CONFIG_NF_NAT_MASQUERADE_IPV4=y
|
||||
@@ -1000,7 +1003,7 @@ CONFIG_IP_NF_ARP_MANGLE=y
|
||||
CONFIG_NF_DEFRAG_IPV6=y
|
||||
CONFIG_NF_CONNTRACK_IPV6=y
|
||||
CONFIG_NF_REJECT_IPV6=y
|
||||
# CONFIG_NF_LOG_IPV6 is not set
|
||||
CONFIG_NF_LOG_IPV6=y
|
||||
# CONFIG_NF_NAT_IPV6 is not set
|
||||
CONFIG_IP6_NF_IPTABLES=y
|
||||
CONFIG_IP6_NF_MATCH_AH=y
|
||||
@@ -1019,7 +1022,27 @@ CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
# CONFIG_IP6_NF_NAT is not set
|
||||
# CONFIG_BRIDGE_NF_EBTABLES is not set
|
||||
CONFIG_BRIDGE_NF_EBTABLES=y
|
||||
CONFIG_BRIDGE_EBT_BROUTE=y
|
||||
CONFIG_BRIDGE_EBT_T_FILTER=y
|
||||
CONFIG_BRIDGE_EBT_T_NAT=y
|
||||
CONFIG_BRIDGE_EBT_802_3=y
|
||||
CONFIG_BRIDGE_EBT_AMONG=y
|
||||
CONFIG_BRIDGE_EBT_ARP=y
|
||||
CONFIG_BRIDGE_EBT_IP=y
|
||||
CONFIG_BRIDGE_EBT_IP6=y
|
||||
CONFIG_BRIDGE_EBT_LIMIT=y
|
||||
CONFIG_BRIDGE_EBT_MARK=y
|
||||
CONFIG_BRIDGE_EBT_PKTTYPE=y
|
||||
CONFIG_BRIDGE_EBT_STP=y
|
||||
CONFIG_BRIDGE_EBT_VLAN=y
|
||||
CONFIG_BRIDGE_EBT_ARPREPLY=y
|
||||
CONFIG_BRIDGE_EBT_DNAT=y
|
||||
CONFIG_BRIDGE_EBT_MARK_T=y
|
||||
CONFIG_BRIDGE_EBT_REDIRECT=y
|
||||
CONFIG_BRIDGE_EBT_SNAT=y
|
||||
CONFIG_BRIDGE_EBT_LOG=y
|
||||
CONFIG_BRIDGE_EBT_NFLOG=y
|
||||
# CONFIG_IP_DCCP is not set
|
||||
CONFIG_IP_SCTP=y
|
||||
# CONFIG_SCTP_DBG_OBJCNT is not set
|
||||
@@ -1035,14 +1058,14 @@ CONFIG_SCTP_COOKIE_HMAC_MD5=y
|
||||
CONFIG_STP=y
|
||||
CONFIG_BRIDGE=y
|
||||
CONFIG_BRIDGE_IGMP_SNOOPING=y
|
||||
# CONFIG_BRIDGE_VLAN_FILTERING is not set
|
||||
CONFIG_BRIDGE_VLAN_FILTERING=y
|
||||
CONFIG_HAVE_NET_DSA=y
|
||||
CONFIG_VLAN_8021Q=y
|
||||
# CONFIG_VLAN_8021Q_GVRP is not set
|
||||
# CONFIG_VLAN_8021Q_MVRP is not set
|
||||
# CONFIG_DECNET is not set
|
||||
CONFIG_LLC=y
|
||||
# CONFIG_LLC2 is not set
|
||||
CONFIG_LLC2=y
|
||||
# CONFIG_IPX is not set
|
||||
# CONFIG_ATALK is not set
|
||||
# CONFIG_X25 is not set
|
||||
@@ -1056,15 +1079,15 @@ CONFIG_DNS_RESOLVER=y
|
||||
# CONFIG_BATMAN_ADV is not set
|
||||
# CONFIG_OPENVSWITCH is not set
|
||||
# CONFIG_VSOCKETS is not set
|
||||
# CONFIG_NETLINK_MMAP is not set
|
||||
# CONFIG_NETLINK_DIAG is not set
|
||||
CONFIG_NETLINK_MMAP=y
|
||||
CONFIG_NETLINK_DIAG=y
|
||||
# CONFIG_NET_MPLS_GSO is not set
|
||||
# CONFIG_HSR is not set
|
||||
CONFIG_RPS=y
|
||||
CONFIG_RFS_ACCEL=y
|
||||
CONFIG_XPS=y
|
||||
# CONFIG_CGROUP_NET_PRIO is not set
|
||||
# CONFIG_CGROUP_NET_CLASSID is not set
|
||||
CONFIG_CGROUP_NET_CLASSID=y
|
||||
CONFIG_NET_RX_BUSY_POLL=y
|
||||
CONFIG_BQL=y
|
||||
# CONFIG_BPF_JIT is not set
|
||||
@@ -1173,7 +1196,7 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
|
||||
#
|
||||
# CONFIG_SENSORS_LIS3LV02D is not set
|
||||
# CONFIG_AD525X_DPOT is not set
|
||||
# CONFIG_DUMMY_IRQ is not set
|
||||
CONFIG_DUMMY_IRQ=y
|
||||
# CONFIG_IBM_ASM is not set
|
||||
# CONFIG_PHANTOM is not set
|
||||
# CONFIG_SGI_IOC4 is not set
|
||||
@@ -1486,7 +1509,7 @@ CONFIG_NETDEVICES=y
|
||||
CONFIG_MII=y
|
||||
CONFIG_NET_CORE=y
|
||||
# CONFIG_BONDING is not set
|
||||
# CONFIG_DUMMY is not set
|
||||
CONFIG_DUMMY=y
|
||||
# CONFIG_EQUALIZER is not set
|
||||
# CONFIG_NET_FC is not set
|
||||
# CONFIG_NET_TEAM is not set
|
||||
@@ -3007,6 +3030,21 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
|
||||
# CONFIG_UFS_FS is not set
|
||||
# CONFIG_EXOFS_FS is not set
|
||||
# CONFIG_F2FS_FS is not set
|
||||
CONFIG_AUFS_FS=y
|
||||
CONFIG_AUFS_BRANCH_MAX_127=y
|
||||
# CONFIG_AUFS_BRANCH_MAX_511 is not set
|
||||
# CONFIG_AUFS_BRANCH_MAX_1023 is not set
|
||||
# CONFIG_AUFS_BRANCH_MAX_32767 is not set
|
||||
CONFIG_AUFS_SBILIST=y
|
||||
# CONFIG_AUFS_HNOTIFY is not set
|
||||
# CONFIG_AUFS_EXPORT is not set
|
||||
# CONFIG_AUFS_XATTR is not set
|
||||
# CONFIG_AUFS_FHSM is not set
|
||||
# CONFIG_AUFS_RDU is not set
|
||||
# CONFIG_AUFS_SHWH is not set
|
||||
# CONFIG_AUFS_BR_RAMFS is not set
|
||||
CONFIG_AUFS_BDEV_LOOP=y
|
||||
# CONFIG_AUFS_DEBUG is not set
|
||||
CONFIG_ORE=y
|
||||
CONFIG_NETWORK_FILESYSTEMS=y
|
||||
CONFIG_NFS_FS=y
|
||||
|
||||
33877
packages/base/any/kernels/3.18.25/patches/aufs.patch
vendored
Normal file
33877
packages/base/any/kernels/3.18.25/patches/aufs.patch
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
packages/base/any/kernels/3.18.25/patches/series
vendored
Normal file
1
packages/base/any/kernels/3.18.25/patches/series
vendored
Normal file
@@ -0,0 +1 @@
|
||||
aufs.patch
|
||||
@@ -1,7 +1,23 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS4610
|
||||
#
|
||||
######################################################################
|
||||
|
||||
arm-accton-as4610-54-r0:
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.2-deb7-arm-iproc-all:armel, kernel-3.2-deb7-arm-iproc-all.bin.gz
|
||||
dtb: onl-kernel-3.2-deb7-arm-iproc-all:armel, accton_as4610_54.dtb
|
||||
kernel:
|
||||
<<: *arm-iproc-kernel
|
||||
dtb:
|
||||
=: accton_as4610_54.dtb
|
||||
<<: *arm-iproc-kernel-package
|
||||
itb:
|
||||
<<: *arm-itb
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
device: /dev/sda
|
||||
##partition: /dev/sda1
|
||||
loadaddr: 0x70000000
|
||||
nos_bootcmds: *usb2_bootcmds
|
||||
|
||||
@@ -1,7 +1,34 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS4600
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-accton-as4600-54t-r0:
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.9.6-powerpc-e500v:powerpc, kernel-3.9.6-powerpc-e500v.bin.gz
|
||||
dtb: onl-kernel-3.9.6-powerpc-e500v:powerpc, powerpc-as4600-54t.dtb
|
||||
kernel:
|
||||
<<: *e500v-kernel
|
||||
dtb:
|
||||
=: powerpc-as4600-54t.dtb
|
||||
<<: *e500v-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
device: /dev/sda
|
||||
##partition: /dev/sda1
|
||||
nos_bootcmds: *usb_bootcmds
|
||||
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 32MiB
|
||||
format: ext2
|
||||
- ONL-CONFIG:
|
||||
=: 32MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 448MiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
|
||||
@@ -1,7 +1,35 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS5610
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-accton-as5610-52x-r0:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.9.6-powerpc-e500v:powerpc, kernel-3.9.6-powerpc-e500v.bin.gz
|
||||
dtb: onl-kernel-3.9.6-powerpc-e500v:powerpc, powerpc-as5610-52x.dtb
|
||||
kernel:
|
||||
<<: *e500v-kernel
|
||||
dtb:
|
||||
=: powerpc-as5610-52x.dtb
|
||||
<<: *e500v-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
device: /dev/sda
|
||||
##partition: /dev/sda1
|
||||
nos_bootcmds: *usb_bootcmds
|
||||
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 128MiB
|
||||
format: ext2
|
||||
- ONL-CONFIG:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 768MiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
|
||||
@@ -1,9 +1,35 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS5710
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-accton-as5710-54x-r0:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.8.13-powerpc-e500mc:powerpc, kernel-3.8.13-powerpc-e500mc.bin.gz
|
||||
dtb: onl-kernel-3.8.13-powerpc-e500mc:powerpc, powerpc-accton-as5710-54x-r0.dtb
|
||||
kernel:
|
||||
<<: *e500mc-kernel
|
||||
dtb:
|
||||
=: powerpc-accton-as5710-54x-r0.dtb
|
||||
<<: *e500mc-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
raw: True
|
||||
device: /dev/sda
|
||||
nos_bootcmds: *usb_bootcmds
|
||||
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 128MiB
|
||||
format: ext2
|
||||
##format: raw
|
||||
- ONL-CONFIG:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 1GiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
|
||||
@@ -1,9 +1,21 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for as5710
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-accton-as5710-54x-r0b:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.8.13-powerpc-e500mc:powerpc, kernel-3.8.13-powerpc-e500mc.bin.gz
|
||||
dtb: onl-kernel-3.8.13-powerpc-e500mc:powerpc, powerpc-accton-as5710-54x-r0b.dtb
|
||||
kernel:
|
||||
<<: *e500mc-kernel
|
||||
dtb:
|
||||
=: powerpc-accton-as5710-54x-r0b.dtb
|
||||
<<: *e500mc-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
raw: True
|
||||
device: /dev/sda
|
||||
nos_bootcmds: *usb_bootcmds
|
||||
|
||||
|
||||
@@ -1,8 +1,36 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS6700
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-accton-as6700-32x-r0:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.8.13-powerpc-e500mc:powerpc, kernel-3.8.13-powerpc-e500mc.bin.gz
|
||||
dtb: onl-kernel-3.8.13-powerpc-e500mc:powerpc, powerpc-accton-as6700-32x-r0.dtb
|
||||
kernel:
|
||||
<<: *e500mc-kernel
|
||||
dtb:
|
||||
=: powerpc-accton-as6700-32x-r0.dtb
|
||||
<<: *e500mc-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
device: /dev/sda
|
||||
##partition: /dev/sda1
|
||||
nos_bootcmds: *usb_bootcmds
|
||||
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 128MiB
|
||||
format: ext2
|
||||
- ONL-CONFIG:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 768MiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
|
||||
|
||||
@@ -1,8 +1,36 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS6700
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-accton-as6700-32x-r1:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.8.13-powerpc-e500mc:powerpc, kernel-3.8.13-powerpc-e500mc.bin.gz
|
||||
dtb: onl-kernel-3.8.13-powerpc-e500mc:powerpc, powerpc-accton-as6700-32x-r1.dtb
|
||||
kernel:
|
||||
<<: *e500mc-kernel
|
||||
dtb:
|
||||
=: powerpc-accton-as6700-32x-r1.dtb
|
||||
<<: *e500mc-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
device: /dev/sda
|
||||
##partition: /dev/sda1
|
||||
nos_bootcmds: *usb_bootcmds
|
||||
|
||||
installer:
|
||||
- ONL-BOOT:
|
||||
=: 128MiB
|
||||
format: ext2
|
||||
- ONL-CONFIG:
|
||||
=: 128MiB
|
||||
format: ext4
|
||||
- ONL-IMAGES:
|
||||
=: 768MiB
|
||||
format: ext4
|
||||
- ONL-DATA:
|
||||
=: 100%
|
||||
format: ext4
|
||||
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS5712
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as5712-54x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS5812
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as5812-54t-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS5812
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as5812-54x-r0:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS6712
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as6712-32x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS6812
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as6812-32x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS7512
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as5712-32x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
@@ -11,5 +11,5 @@
|
||||
|
||||
platform_installer() {
|
||||
# Standard isntallation to an available GPT partition
|
||||
installer_standard_gpt_install /dev/sdb
|
||||
installer_standard_gpt_install
|
||||
}
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS7712
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as7712-32x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,115200n8
|
||||
@@ -11,5 +11,5 @@
|
||||
|
||||
platform_installer() {
|
||||
# Standard isntallation to an available GPT partition
|
||||
installer_standard_gpt_install /dev/sdb
|
||||
installer_standard_gpt_install
|
||||
}
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for AS7716
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-as7716-32x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x3f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS0,115200n8
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for WEDGE
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-accton-wedge-16x-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--unit=0
|
||||
--speed=57600
|
||||
--word=8
|
||||
--parity=0
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS1,57600n8
|
||||
rd_NO_MD
|
||||
rd_NO_LUKS
|
||||
intel_iommu=off
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for REDSTONE
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-cel-redstone-xp-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x3f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-2
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS0,115200n8
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for KVM
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-kvm-x86-64-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x3f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-9-6
|
||||
|
||||
args: >-
|
||||
nopat
|
||||
console=ttyS0,115200n8
|
||||
|
||||
@@ -1,4 +1,18 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for ARM/QEMU
|
||||
#
|
||||
######################################################################
|
||||
|
||||
arm-qemu-armv7a-r0:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.2-deb7-arm-iproc-all:armel, kernel-3.2-deb7-arm-iproc-all.bin.gz
|
||||
dtb: onl-kernel-3.2-deb7-arm-iproc-all:armel, accton_as4610_54.dtb
|
||||
kernel:
|
||||
<<: *arm-iproc-kernel
|
||||
dtb:
|
||||
=: accton_as4610_54.dtb
|
||||
<<: *arm-iproc-kernel-package
|
||||
itb:
|
||||
<<: *arm-itb
|
||||
|
||||
@@ -1,8 +1,21 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform definition for LB9
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-quanta-lb9-r0:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.9.6-powerpc-e500v:powerpc, kernel-3.9.6-powerpc-e500v.bin.gz
|
||||
dtb: onl-kernel-3.9.6-powerpc-e500v:powerpc, powerpc-quanta-lb9-r0.dtb
|
||||
|
||||
kernel:
|
||||
<<: *e500v-kernel
|
||||
dtb:
|
||||
=: powerpc-quanta-lb9-r0.dtb
|
||||
<<: *e500v-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/sda1
|
||||
raw: True
|
||||
device: /dev/sda
|
||||
nos_bootcmds: *ide_bootcmds
|
||||
|
||||
@@ -1,7 +1,21 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for LY2
|
||||
#
|
||||
######################################################################
|
||||
|
||||
powerpc-quanta-ly2-r0:
|
||||
|
||||
flat_image_tree:
|
||||
kernel: onl-kernel-3.9.6-powerpc-e500v:powerpc, kernel-3.9.6-powerpc-e500v.bin.gz
|
||||
dtb: onl-kernel-3.9.6-powerpc-e500v:powerpc, powerpc-quanta-ly2-r0.dtb
|
||||
kernel:
|
||||
<<: *e500v-kernel
|
||||
dtb:
|
||||
=: powerpc-quanta-ly2-r0.dtb
|
||||
<<: *e500v-kernel-package
|
||||
|
||||
loader:
|
||||
partition: /dev/mmcblk0p1
|
||||
device: /dev/mmcblk0
|
||||
##partition: /dev/mmcblk0p1
|
||||
nos_bootcmds: *mmc_bootcmds
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for LY6
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-quanta-ly6-rangeley-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-9-6
|
||||
|
||||
args: >-
|
||||
console=ttyS1,115200n8
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
######################################################################
|
||||
#
|
||||
# platform-config for LY8
|
||||
#
|
||||
######################################################################
|
||||
|
||||
x86-64-quanta-ly8-rangeley-r0:
|
||||
|
||||
grub:
|
||||
|
||||
serial: >-
|
||||
--port=0x2f8
|
||||
--speed=115200
|
||||
--word=8
|
||||
--parity=no
|
||||
--stop=1
|
||||
|
||||
kernel:
|
||||
<<: *kernel-3-9-6
|
||||
|
||||
args: >-
|
||||
console=ttyS1,115200n8
|
||||
|
||||
@@ -7,7 +7,16 @@
|
||||
import subprocess
|
||||
import yaml
|
||||
import tempfile
|
||||
import json
|
||||
|
||||
import os, sys
|
||||
toolsdir = os.path.dirname(os.path.abspath(__file__))
|
||||
onldir = os.path.dirname(toolsdir)
|
||||
pydir = os.path.join(onldir, "packages/base/all/vendor-config-onl/src/python")
|
||||
sys.path.append(pydir)
|
||||
import onl.YamlUtils
|
||||
|
||||
from onlpm import *
|
||||
pm = defaultPm()
|
||||
|
||||
class Image(object):
|
||||
"""Base ITS Image Class"""
|
||||
@@ -20,19 +29,31 @@ class Image(object):
|
||||
self.entry = None
|
||||
self.os = None
|
||||
|
||||
if ',' in data:
|
||||
# Shorthand for tuple specifier
|
||||
data = tuple([ x.strip() for x in data.split(',') ])
|
||||
if type(data) == str:
|
||||
if ',' in data:
|
||||
pkg, fname = [x.strip() for x in data.split(',')]
|
||||
else:
|
||||
pkg, fname = None, data
|
||||
elif type(data) == list:
|
||||
pkg, fname = data
|
||||
elif type(data) == dict:
|
||||
fname = data['=']
|
||||
pkg = data.get('package', None)
|
||||
else:
|
||||
raise ValueError("invalid image specifier: %s" % repr(data))
|
||||
|
||||
if(isinstance(data, tuple)):
|
||||
#
|
||||
# The data specifies an ONLPM (package,file) pair.
|
||||
#
|
||||
self.data = subprocess.check_output("onlpm --quiet --find-file %s %s" % data, shell=True).strip()
|
||||
if pkg is not None:
|
||||
pm.require(pkg, force=False, build_missing=False)
|
||||
self.data = pm.opr.get_file(pkg, fname)
|
||||
else:
|
||||
self.data = data
|
||||
|
||||
self.name = os.path.basename(self.data)
|
||||
try:
|
||||
self.name = os.path.basename(fname)
|
||||
except:
|
||||
import pdb
|
||||
pdb.set_trace()
|
||||
raise
|
||||
self.description = self.name
|
||||
|
||||
|
||||
@@ -66,8 +87,8 @@ class Image(object):
|
||||
class KernelImage(Image):
|
||||
"""Kernel image entry"""
|
||||
|
||||
def __init__(self, fname, arch):
|
||||
Image.__init__(self, "kernel", fname, compression='gzip')
|
||||
def __init__(self, fdata, arch):
|
||||
Image.__init__(self, "kernel", fdata, compression='gzip')
|
||||
self.os = '"linux"'
|
||||
|
||||
# Fixme -- thse should be parameterized
|
||||
@@ -87,8 +108,8 @@ class KernelImage(Image):
|
||||
class InitrdImage(Image):
|
||||
"""Initrd image entry"""
|
||||
|
||||
def __init__(self, fname, arch):
|
||||
Image.__init__(self, "ramdisk", fname, compression='gzip')
|
||||
def __init__(self, fdata, arch):
|
||||
Image.__init__(self, "ramdisk", fdata, compression='gzip')
|
||||
|
||||
# Fixme -- thse should be parameterized
|
||||
if arch == 'powerpc':
|
||||
@@ -108,8 +129,8 @@ class InitrdImage(Image):
|
||||
class DtbImage(Image):
|
||||
"""DTB Image Entry"""
|
||||
|
||||
def __init__(self, fname):
|
||||
Image.__init__(self, "flat_dt", fname, compression="none")
|
||||
def __init__(self, fdata):
|
||||
Image.__init__(self, "flat_dt", fdata, compression="none")
|
||||
|
||||
def write(self, f):
|
||||
self.start_image(f)
|
||||
@@ -169,18 +190,31 @@ class FlatImageTree(object):
|
||||
|
||||
initrd = d.get('initrd', None)
|
||||
|
||||
sys.stderr.write("*** platform %s kernel %s\n"
|
||||
% (name, kernel,))
|
||||
self.add_config(name, kernel, dtb, initrd)
|
||||
|
||||
|
||||
def add_yaml(self, name, fname):
|
||||
d = yaml.load(open(fname))
|
||||
def add_yaml(self, name, fname, defaults=None):
|
||||
if defaults is not None:
|
||||
d = onl.YamlUtils.merge(defaults, fname)
|
||||
else:
|
||||
with open(fname) as fd:
|
||||
d = yaml.load(fd)
|
||||
self.add_dict(name, d)
|
||||
|
||||
def add_platform_package(self, package):
|
||||
print package
|
||||
platform = package.replace(":%s" % ops.arch, "").replace("onl-platform-config-", "")
|
||||
y = subprocess.check_output("onlpm --quiet --find-file %s %s.yml" % (package, platform), shell=True).strip()
|
||||
self.add_yaml(platform, y)
|
||||
|
||||
vpkg = "onl-vendor-config-onl:all"
|
||||
pm.require(vpkg, force=False, build_missing=False)
|
||||
y1 = pm.opr.get_file(vpkg, "platform-config-defaults-uboot.yml")
|
||||
|
||||
pm.require(package, force=False, build_missing=False)
|
||||
y2 = pm.opr.get_file(package, platform + '.yml')
|
||||
|
||||
self.add_yaml(platform, y2, defaults=y1)
|
||||
|
||||
def add_platform(self, platform):
|
||||
if (":%s" % ops.arch) in platform:
|
||||
@@ -195,17 +229,19 @@ class FlatImageTree(object):
|
||||
def writef(self, f):
|
||||
|
||||
kdict = {}
|
||||
for k in set(self.kernels):
|
||||
kdict[k] = KernelImage(k, ops.arch)
|
||||
for k in self.kernels:
|
||||
ki = KernelImage(k, ops.arch)
|
||||
kdict[ki.name] = ki
|
||||
|
||||
ddict = {}
|
||||
for d in set(self.dtbs):
|
||||
ddict[d] = DtbImage(d)
|
||||
for d in self.dtbs:
|
||||
di = DtbImage(d)
|
||||
ddict[di.name] = di
|
||||
|
||||
idict = {}
|
||||
for i in set(self.initrds):
|
||||
idict[i] = InitrdImage(i, ops.arch)
|
||||
|
||||
for i in self.initrds:
|
||||
ii = InitrdImage(i, ops.arch)
|
||||
idict[ii.name] = ii
|
||||
|
||||
|
||||
f.write("""/* \n""")
|
||||
@@ -220,27 +256,27 @@ class FlatImageTree(object):
|
||||
f.write(""" images {\n\n""")
|
||||
|
||||
f.write(""" /* Kernel Images */\n""")
|
||||
for k in set(self.kernels):
|
||||
KernelImage(k, ops.arch).write(f)
|
||||
for k in kdict.values():
|
||||
k.write(f)
|
||||
|
||||
f.write("""\n""")
|
||||
f.write(""" /* DTB Images */\n""")
|
||||
for d in set(self.dtbs):
|
||||
DtbImage(d).write(f)
|
||||
for d in ddict.values():
|
||||
d.write(f)
|
||||
|
||||
f.write("""\n""")
|
||||
f.write(""" /* Initrd Images */\n""")
|
||||
for i in set(self.initrds):
|
||||
InitrdImage(i, ops.arch).write(f)
|
||||
for i in idict.values():
|
||||
i.write(f)
|
||||
|
||||
f.write(""" };\n""")
|
||||
f.write(""" configurations {\n""")
|
||||
for (name, (kernel, dtb, initrd)) in self.configurations.iteritems():
|
||||
f.write(""" %s {\n""" % name)
|
||||
f.write(""" description = "%s";\n""" % name)
|
||||
f.write(""" kernel = "%s";\n""" % (kdict[kernel].name))
|
||||
f.write(""" ramdisk = "%s";\n""" % (idict[initrd].name))
|
||||
f.write(""" fdt = "%s";\n""" % (ddict[dtb].name))
|
||||
f.write(""" kernel = "%s";\n""" % (KernelImage(kernel, ops.arch).name))
|
||||
f.write(""" ramdisk = "%s";\n""" % (InitrdImage(initrd, ops.arch).name))
|
||||
f.write(""" fdt = "%s";\n""" % (DtbImage(dtb).name))
|
||||
f.write(""" };\n\n""")
|
||||
f.write(""" };\n""")
|
||||
f.write("""};\n""")
|
||||
@@ -291,12 +327,14 @@ if __name__ == '__main__':
|
||||
fit.add_yaml(y)
|
||||
|
||||
if ops.add_platform == [['all']]:
|
||||
ops.add_platform = [ subprocess.check_output("onlpm --list-platforms --arch %s" % (ops.arch), shell=True).split() ]
|
||||
ops.add_platform = [ pm.list_platforms(ops.arch) ]
|
||||
|
||||
if ops.add_platform == [['initrd']]:
|
||||
# Add support for the platforms listed in the initrd's platform manifest
|
||||
(package,f) = initrd.split(':')
|
||||
mfile = subprocess.check_output("onlpm --find-file %s:%s manifest.json" % (package, ops.arch), shell=True).strip()
|
||||
pkg = package + ':' + ops.arch
|
||||
pm.require(pkg, force=False, build_missing=False)
|
||||
mfile = pm.opr.get_file(pkg, "manifest.json")
|
||||
manifest = json.load(open(mfile))
|
||||
ops.add_platform = [[ "%s" % p for p in manifest['platforms'] ]]
|
||||
|
||||
|
||||
23
tools/mkshar
23
tools/mkshar
@@ -36,6 +36,9 @@ parser.add_option('--unzip-loop',
|
||||
parser.add_option('--unzip-pad',
|
||||
action='store_true',
|
||||
help="Special pad options for deficient unzip")
|
||||
parser.add_option('--inplace',
|
||||
action='store_true',
|
||||
help="Perform fixups in-place")
|
||||
parser.add_option('--fixup-perms',
|
||||
type=str,
|
||||
help="Post-unpack shell script to fix permissions")
|
||||
@@ -142,6 +145,10 @@ def _splice(tag, val):
|
||||
line = line + ('#' * llen)
|
||||
buf = buf[:p] + line + buf[q:]
|
||||
|
||||
def _spliceMaybe(tag, val):
|
||||
val = "${%s-\"%s\"}" % (tag, val,)
|
||||
_splice(tag, val)
|
||||
|
||||
logger.info("prepping SFX")
|
||||
|
||||
_splice('SFX_BYTES', len(buf))
|
||||
@@ -153,23 +160,27 @@ if opts.lazy:
|
||||
else:
|
||||
_splice('SFX_LAZY', '')
|
||||
if opts.unzip_sfx:
|
||||
_splice('SFX_UNZIP', '1')
|
||||
_spliceMaybe('SFX_UNZIP', '1')
|
||||
else:
|
||||
_splice('SFX_UNZIP', '')
|
||||
_spliceMaybe('SFX_UNZIP', '')
|
||||
if opts.unzip_pipe:
|
||||
_splice('SFX_PIPE', '1')
|
||||
_spliceMaybe('SFX_PIPE', '1')
|
||||
else:
|
||||
_splice('SFX_PIPE', '')
|
||||
_spliceMaybe('SFX_PIPE', '')
|
||||
if opts.unzip_loop:
|
||||
_splice('SFX_LOOP', '1')
|
||||
_spliceMaybe('SFX_LOOP', '1')
|
||||
else:
|
||||
_splice('SFX_LOOP', '')
|
||||
_spliceMaybe('SFX_LOOP', '')
|
||||
if opts.unzip_pad:
|
||||
_splice('SFX_PAD', 'pad.bin')
|
||||
else:
|
||||
_splice('SFX_PAD', '')
|
||||
if opts.fixup_perms:
|
||||
_splice('SFX_PERMS', opts.fixup_perms)
|
||||
if opts.inplace:
|
||||
_spliceMaybe('SFX_INPLACE', '1')
|
||||
else:
|
||||
_spliceMaybe('SFX_INPLACE', '')
|
||||
|
||||
# remember the checksum offset
|
||||
ckStart = buf.find("SFX_CHECKSUM=")
|
||||
|
||||
82
tools/onlplatform.py
Normal file
82
tools/onlplatform.py
Normal file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""onlplatform.py
|
||||
|
||||
Extract install file requirements from the platform YAML file and/or
|
||||
the platform package metadata.
|
||||
"""
|
||||
|
||||
import sys, os
|
||||
import itertools
|
||||
|
||||
toolsdir = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append(toolsdir)
|
||||
|
||||
onldir = os.path.dirname(toolsdir)
|
||||
onlpydir = os.path.join(onldir, "packages/base/all/vendor-config-onl/src/python")
|
||||
sys.path.append(onlpydir)
|
||||
|
||||
import onl.YamlUtils
|
||||
|
||||
from onlpm import *
|
||||
# glob import is required here so pickle load load properly
|
||||
|
||||
pm = defaultPm()
|
||||
|
||||
platform = sys.argv[1]
|
||||
arch = sys.argv[2]
|
||||
key = sys.argv[3]
|
||||
|
||||
def extractKey(platform, arch, key):
|
||||
|
||||
pkg = "onl-platform-config-%s:%s" % (platform, arch,)
|
||||
basename = "%s.yml" % platform
|
||||
pm.require(pkg, force=False, build_missing=False)
|
||||
platformConfigPath = pm.opr.get_file(pkg, basename)
|
||||
|
||||
if arch in ('amd64',):
|
||||
pkg = "onl-vendor-config-onl:all"
|
||||
basename = "platform-config-defaults-x86-64.yml"
|
||||
subkey = 'grub'
|
||||
else:
|
||||
pkg = "onl-vendor-config-onl:all"
|
||||
basename = "platform-config-defaults-uboot.yml"
|
||||
subkey = 'flat_image_tree'
|
||||
pm.require(pkg, force=False, build_missing=False)
|
||||
defaultConfigPath = pm.opr.get_file(pkg, basename)
|
||||
|
||||
platformConf = onl.YamlUtils.merge(defaultConfigPath, platformConfigPath)
|
||||
resource = platformConf[platform][subkey][key]
|
||||
if type(resource) == dict:
|
||||
pkg = resource['package']
|
||||
basename = resource['=']
|
||||
else:
|
||||
pkg, sep, basename = resource.partition(',')
|
||||
if not sep:
|
||||
raise ValueError("resource missing package declaration: %s" % resource)
|
||||
pkg = pkg.strip()
|
||||
basename = basename.strip()
|
||||
pm.require(pkg, force=False, build_missing=False)
|
||||
resourcePath = pm.opr.get_file(pkg, basename)
|
||||
return resourcePath
|
||||
|
||||
def extractVendor(platform, arch):
|
||||
pkg = "onl-platform-config-%s:%s" % (platform, arch,)
|
||||
l = pm.opr.lookup_all(pkg)
|
||||
if not l:
|
||||
raise SystemExit("cannot find package %s:%s"
|
||||
% (platform, arch,))
|
||||
l = [x for x in pm.package_groups if pkg in x]
|
||||
l = list(itertools.chain(*[x.prerequisite_packages() for x in l]))
|
||||
l = [x for x in l if x.startswith('onl-vendor-config-')]
|
||||
return "\n".join(l)
|
||||
|
||||
if key in ('kernel', 'initrd', 'dtb', 'itb',):
|
||||
print extractKey(platform, arch, key)
|
||||
sys.exit(0)
|
||||
|
||||
if key == 'vendor':
|
||||
print extractVendor(platform, arch)
|
||||
sys.exit(0)
|
||||
|
||||
raise SystemExit("invalid key %s" % key)
|
||||
@@ -366,12 +366,12 @@ class OnlPackage(object):
|
||||
if 'init' in self.pkg:
|
||||
if not os.path.exists(self.pkg['init']):
|
||||
raise OnlPackageError("Init script '%s' does not exist." % self.pkg['init'])
|
||||
command = command + "--deb-init %s" % self.pkg['init']
|
||||
command = command + "--deb-init %s " % self.pkg['init']
|
||||
|
||||
if 'post-install' in self.pkg:
|
||||
if not os.path.exists(self.pkg['post-install']):
|
||||
raise OnlPackageError("Post-install script '%s' does not exist." % self.pkg['post-install'])
|
||||
command = command + "--after-install %s" % self.pkg['post-install']
|
||||
command = command + "--after-install %s " % self.pkg['post-install']
|
||||
|
||||
if logger.level < logging.INFO:
|
||||
command = command + "--verbose "
|
||||
@@ -561,7 +561,6 @@ class OnlPackageGroup(object):
|
||||
with onlu.Lock(os.path.join(self._pkgs['__directory'], '.lock')):
|
||||
self.gmake_locked("clean", 'Clean')
|
||||
|
||||
|
||||
class OnlPackageRepo(object):
|
||||
"""Package Repository and Interchange Class
|
||||
|
||||
@@ -765,8 +764,8 @@ class OnlPackageManager(object):
|
||||
self.package_groups = []
|
||||
self.opr = None
|
||||
|
||||
def set_repo(self, repodir):
|
||||
self.opr = OnlPackageRepo(repodir, ops.repo_package_dir)
|
||||
def set_repo(self, repodir, packagedir='packages'):
|
||||
self.opr = OnlPackageRepo(repodir, packagedir=packagedir)
|
||||
|
||||
|
||||
def filter(self, subdir=None, arches=None, substr=None):
|
||||
@@ -999,6 +998,43 @@ class OnlPackageManager(object):
|
||||
def pkg_info(self):
|
||||
return "\n".join([ pg.pkg_info() for pg in self.package_groups if not pg.filtered ])
|
||||
|
||||
def list_platforms(self, arch):
|
||||
platforms = []
|
||||
for pg in self.package_groups:
|
||||
for p in pg.packages:
|
||||
(name, pkgArch) = OnlPackage.idparse(p.id())
|
||||
m = re.match(r'onl-platform-config-(?P<platform>.*)', name)
|
||||
if m:
|
||||
if arch in [ pkgArch, "all", None ]:
|
||||
platforms.append(m.groups('platform')[0])
|
||||
return platforms
|
||||
|
||||
def defaultPm():
|
||||
repo = os.environ.get('ONLPM_OPTION_REPO', None)
|
||||
envJson = os.environ.get('ONLPM_OPTION_INCLUDE_ENV_JSON', None)
|
||||
packagedirs = os.environ['ONLPM_OPTION_PACKAGEDIRS'].split(':')
|
||||
repoPackageDir = os.environ.get('ONLPM_OPTION_REPO_PACKAGE_DIR', 'packages')
|
||||
subdir = os.getcwd()
|
||||
arches = ['amd64', 'powerpc', 'armel', 'all',]
|
||||
|
||||
if envJson:
|
||||
for j in envJson.split(':'):
|
||||
data = json.load(open(j))
|
||||
for (k, v) in data.iteritems():
|
||||
try:
|
||||
v = v.encode('ascii')
|
||||
except UnicodeEncodeError:
|
||||
pass
|
||||
os.environ[k] = v
|
||||
|
||||
pm = OnlPackageManager()
|
||||
pm.set_repo(repo, packagedir=repoPackageDir)
|
||||
for pdir in packagedirs:
|
||||
pm.load(pdir, usecache=True, rebuildcache=False)
|
||||
pm.filter(subdir = subdir, arches=arches)
|
||||
|
||||
return pm
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
ap = argparse.ArgumentParser("onlpm")
|
||||
@@ -1090,7 +1126,7 @@ if __name__ == '__main__':
|
||||
pm = OnlPackageManager()
|
||||
if ops.repo:
|
||||
logger.debug("Setting repo as '%s'..." % ops.repo)
|
||||
pm.set_repo(ops.repo)
|
||||
pm.set_repo(ops.repo, packagedir=ops.repo_package_dir)
|
||||
|
||||
if ops.in_repo:
|
||||
for p in ops.in_repo:
|
||||
@@ -1113,15 +1149,10 @@ if __name__ == '__main__':
|
||||
print
|
||||
|
||||
if ops.list_platforms:
|
||||
platforms = []
|
||||
for pg in pm.package_groups:
|
||||
for p in pg.packages:
|
||||
(name, arch) = OnlPackage.idparse(p.id())
|
||||
m = re.match(r'onl-platform-config-(?P<platform>.*)', name)
|
||||
if m:
|
||||
if ops.arch in [ arch, "all", None ]:
|
||||
platforms.append(m.groups('platform')[0])
|
||||
|
||||
if not ops.arch:
|
||||
logger.error("missing --arch with --list-platforms")
|
||||
sys.exit(1)
|
||||
platforms = pm.list_platforms(ops.arch)
|
||||
if ops.csv:
|
||||
print ','.join(platforms)
|
||||
else:
|
||||
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
|
||||
CMD=${0##*/}
|
||||
|
||||
UNZIP=/usr/bin/unzip
|
||||
UNZIP=${UNZIP-"/usr/bin/unzip"}
|
||||
|
||||
UNZIPOPTS=
|
||||
UNZIPARGS=
|
||||
@@ -29,9 +29,10 @@ SFX_INSTALL=install ## internal script in the payload to run #################
|
||||
SFX_PERMS= ## internal script to correct file permissions ####################
|
||||
SFX_PAD= ## pad file (this payload) to skip during unpack ####################
|
||||
SFX_LAZY= ## set to '1' to defer extraction to SFX_INSTALL ##################
|
||||
SFX_UNZIP=1 ## set to '' if this unzip cannot parse SFX headers #############
|
||||
SFX_LOOP=1 ## set to '' if this unzip cannot read from a loopback/block ####
|
||||
SFX_PIPE=1 ## set to '' if this unzip cannot read from a pipe ##############
|
||||
SFX_UNZIP=1 ## set to '' if this unzip cannot parse SFX headers ##############
|
||||
SFX_LOOP=1 ## set to '' if this unzip cannot read from a loopback/block ######
|
||||
SFX_PIPE=1 ## set to '' if this unzip cannot read from a pipe ################
|
||||
SFX_INPLACE= ## set to '1' if this zip file can be modified in place##########
|
||||
|
||||
if test "$SFX_PAD"; then
|
||||
UNZIPARGS=$UNZIPARGS${UNZIPARGS:+" "}"-x $SFX_PAD"
|
||||
@@ -100,6 +101,23 @@ do_cleanup()
|
||||
}
|
||||
trap "do_cleanup" 0 1
|
||||
|
||||
echo "$CMD: computing checksum of original archive"
|
||||
{
|
||||
dd if="$SHARABS" bs=$SFX_BLOCKSIZE count=$SFX_BLOCKS 2>/dev/null | sed -e "/^SFX_CHECKSUM=/d";
|
||||
dd if="$SHARABS" bs=$SFX_BLOCKSIZE skip=$SFX_BLOCKS 2>/dev/null
|
||||
} | md5sum > "$workdir/ck"
|
||||
|
||||
set dummy `cat "$workdir/ck"`
|
||||
newck=$2
|
||||
rm -f "$workdir/ck"
|
||||
|
||||
if test "$SFX_CHECKSUM" = "$newck"; then
|
||||
echo "$CMD: checksum is OK"
|
||||
else
|
||||
echo "$CMD: *** checksum mismatch" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
_t()
|
||||
{
|
||||
local c z
|
||||
@@ -144,11 +162,19 @@ case "$SFX_PAD:$SFX_UNZIP:$SFX_LOOP:$SFX_PIPE" in
|
||||
esac
|
||||
|
||||
if test "$SFX_PAD"; then
|
||||
echo "$CMD: copying file and resetting pad"
|
||||
cp "$SHARABS" $workdir/onie-installer.zip
|
||||
dd if="$SHARABS" of=$workdir/onie-installer.zip bs=512 skip=$(($SFX_BLOCKS-1)) count=1 conv=notrunc
|
||||
_CAT=":"
|
||||
_ZIP="$workdir/onie-installer.zip"
|
||||
echo "$CMD: extracting pad"
|
||||
dd if="$SHARABS" of=$workdir/zip.bin bs=512 skip=$(($SFX_BLOCKS-1)) count=1
|
||||
if test "$SFX_INPLACE"; then
|
||||
_CAT=":"
|
||||
_ZIP="$SHARABS"
|
||||
else
|
||||
echo "$CMD: copying file before resetting pad"
|
||||
cp "$SHARABS" $workdir/onie-installer.zip
|
||||
_CAT=":"
|
||||
_ZIP="$workdir/onie-installer.zip"
|
||||
fi
|
||||
echo "$CMD: resetting pad"
|
||||
dd if="$workdir/zip.bin" of="$_ZIP" bs=512 count=1 conv=notrunc
|
||||
elif test "$SFX_UNZIP"; then
|
||||
echo "$CMD: processing SFX with unzip"
|
||||
_CAT=":"
|
||||
@@ -209,24 +235,6 @@ case "$banner" in
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "$CMD: computing checksum"
|
||||
{
|
||||
dd if="$SHARABS" bs=$SFX_BLOCKSIZE count=$SFX_BLOCKS 2>/dev/null | sed -e "/^SFX_CHECKSUM=/d";
|
||||
dd if="$SHARABS" bs=$SFX_BLOCKSIZE skip=$SFX_BLOCKS 2>/dev/null
|
||||
} | md5sum > "$UNZIPDIR/ck"
|
||||
|
||||
set dummy `cat "$UNZIPDIR/ck"`
|
||||
newck=$2
|
||||
|
||||
rm -f "$UNZIPDIR/ck"
|
||||
|
||||
if test "$SFX_CHECKSUM" = "$newck"; then
|
||||
echo "$CMD: checksum is OK"
|
||||
else
|
||||
echo "$CMD: *** checksum mismatch" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
shardir=`dirname $0`
|
||||
shardir=`cd $shardir && pwd`
|
||||
|
||||
|
||||
Reference in New Issue
Block a user