mirror of
https://github.com/Telecominfraproject/OpenNetworkLinux.git
synced 2025-12-25 17:27:01 +00:00
Merge branch 'master' of github.com:carlroth/OpenNetworkLinux
This commit is contained in:
@@ -23,9 +23,11 @@ from onl.sysconfig import sysconfig
|
||||
|
||||
try:
|
||||
PartedException = parted._ped.PartedException
|
||||
DiskException = parted._ped.DiskException
|
||||
except AttributeError:
|
||||
import _ped
|
||||
PartedException = _ped.PartedException
|
||||
DiskException = _ped.DiskException
|
||||
|
||||
class Base:
|
||||
|
||||
@@ -689,14 +691,17 @@ class UbootInstaller(SubprocessMixin, Base):
|
||||
return 0
|
||||
self.log.warn("disk %s has wrong label %s",
|
||||
self.device, self.partedDisk.type)
|
||||
except PartedException as ex:
|
||||
except (DiskException, PartedException) as ex:
|
||||
self.log.error("cannot get partition table from %s: %s",
|
||||
self.device, str(ex))
|
||||
except Exception:
|
||||
self.log.exception("cannot get partition table from %s",
|
||||
self.device)
|
||||
|
||||
self.log.info("creating msdos label on %s")
|
||||
self.log.info("clobbering disk label on %s", self.device)
|
||||
self.partedDevice.clobber()
|
||||
|
||||
self.log.info("creating msdos label on %s", self.device)
|
||||
self.partedDisk = parted.freshDisk(self.partedDevice, 'msdos')
|
||||
|
||||
return 0
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
diff -urpN a/include/linux/mm.h b/include/linux/mm.h
|
||||
--- a/include/linux/mm.h 2016-11-02 14:46:33.278862661 -0700
|
||||
+++ b/include/linux/mm.h 2016-11-02 14:47:01.338863270 -0700
|
||||
@@ -1526,6 +1526,7 @@ struct page *follow_page(struct vm_area_
|
||||
#define FOLL_MLOCK 0x40 /* mark page as mlocked */
|
||||
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
|
||||
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
|
||||
+#define FOLL_COW 0x4000 /* internal GUP flag */
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||
void *data);
|
||||
diff -urpN a/mm/memory.c b/mm/memory.c
|
||||
--- a/mm/memory.c 2016-11-02 14:46:33.938862676 -0700
|
||||
+++ b/mm/memory.c 2016-11-02 14:50:52.086868277 -0700
|
||||
@@ -1427,6 +1427,23 @@ int zap_vma_ptes(struct vm_area_struct *
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zap_vma_ptes);
|
||||
|
||||
+static inline bool can_follow_write_pte(pte_t pte, struct page *page,
|
||||
+ unsigned int flags)
|
||||
+{
|
||||
+ if (pte_write(pte))
|
||||
+ return true;
|
||||
+
|
||||
+ /*
|
||||
+ * Make sure that we are really following CoWed page. We do not really
|
||||
+ * have to care about exclusiveness of the page because we only want
|
||||
+ * to ensure that once COWed page hasn't disappeared in the meantime
|
||||
+ * or it hasn't been merged to a KSM page.
|
||||
+ */
|
||||
+ if ((flags & FOLL_FORCE) && (flags & FOLL_COW))
|
||||
+ return page && PageAnon(page) && !PageKsm(page);
|
||||
+
|
||||
+ return false;
|
||||
+}
|
||||
/**
|
||||
* follow_page - look up a page descriptor from a user-virtual address
|
||||
* @vma: vm_area_struct mapping @address
|
||||
@@ -1509,10 +1526,12 @@ split_fallthrough:
|
||||
pte = *ptep;
|
||||
if (!pte_present(pte))
|
||||
goto no_page;
|
||||
- if ((flags & FOLL_WRITE) && !pte_write(pte))
|
||||
- goto unlock;
|
||||
|
||||
page = vm_normal_page(vma, address, pte);
|
||||
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) {
|
||||
+ pte_unmap_unlock(ptep, ptl);
|
||||
+ return NULL;
|
||||
+ }
|
||||
if (unlikely(!page)) {
|
||||
if ((flags & FOLL_DUMP) ||
|
||||
!is_zero_pfn(pte_pfn(pte)))
|
||||
@@ -1789,17 +1808,13 @@ int __get_user_pages(struct task_struct
|
||||
* The VM_FAULT_WRITE bit tells us that
|
||||
* do_wp_page has broken COW when necessary,
|
||||
* even if maybe_mkwrite decided not to set
|
||||
- * pte_write. We can thus safely do subsequent
|
||||
- * page lookups as if they were reads. But only
|
||||
- * do so when looping for pte_write is futile:
|
||||
- * in some cases userspace may also be wanting
|
||||
- * to write to the gotten user page, which a
|
||||
- * read fault here might prevent (a readonly
|
||||
- * page might get reCOWed by userspace write).
|
||||
+ * pte_write. We cannot simply drop FOLL_WRITE
|
||||
+ * here because the COWed page might be gone by
|
||||
+ * the time we do the subsequent page lookups.
|
||||
*/
|
||||
if ((ret & VM_FAULT_WRITE) &&
|
||||
!(vma->vm_flags & VM_WRITE))
|
||||
- foll_flags &= ~FOLL_WRITE;
|
||||
+ foll_flags |= FOLL_COW;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
@@ -253,3 +253,4 @@ mgmt-port-init-config.patch
|
||||
arch-intel-reboot-cf9-cold.patch
|
||||
drivers-hwmon-adm1021-detect.patch
|
||||
drivers-i2c-busses-i2c-isch-timeout.patch
|
||||
CVE-2016-5195.patch
|
||||
|
||||
Submodule packages/base/any/kernels/legacy/linux-3.8.13 updated: 7cdec99d7a...6c803ff857
Submodule packages/base/any/kernels/legacy/linux-3.9.6 updated: 0106373d79...34603c6ec2
@@ -66,6 +66,11 @@
|
||||
*/
|
||||
#define ONLP_I2C_F_USE_BLOCK_READ 0x20
|
||||
|
||||
/**
|
||||
* Use SMBUS block reads if possible.
|
||||
*/
|
||||
#define ONLP_I2C_F_USE_SMBUS_BLOCK_READ 0x40
|
||||
|
||||
/**
|
||||
* @brief Open and prepare for reading or writing.
|
||||
* @param bus The i2c bus number.
|
||||
|
||||
@@ -105,11 +105,17 @@ onlp_i2c_block_read(int bus, uint8_t addr, uint8_t offset, int size,
|
||||
int count = size;
|
||||
uint8_t* p = rdata;
|
||||
while(count > 0) {
|
||||
int rv;
|
||||
int rsize = (count >= ONLPLIB_CONFIG_I2C_BLOCK_SIZE) ? ONLPLIB_CONFIG_I2C_BLOCK_SIZE : count;
|
||||
int rv = i2c_smbus_read_i2c_block_data(fd,
|
||||
p - rdata,
|
||||
if(flags & ONLP_I2C_F_USE_SMBUS_BLOCK_READ) {
|
||||
rv = i2c_smbus_read_block_data(fd, offset, p);
|
||||
} else {
|
||||
rv = i2c_smbus_read_i2c_block_data(fd,
|
||||
offset,
|
||||
rsize,
|
||||
p);
|
||||
offset += rsize;
|
||||
}
|
||||
|
||||
if(rv != rsize) {
|
||||
AIM_LOG_ERROR("i2c-%d: reading address 0x%x, offset %d, size=%d failed: %{errno}",
|
||||
|
||||
Submodule packages/platforms-closed updated: 3963bb7d39...a7f832c14d
2
sm/infra
2
sm/infra
Submodule sm/infra updated: 3bfc913ed8...b0e02165a7
159
tools/onlpm.py
159
tools/onlpm.py
@@ -56,6 +56,42 @@ class OnlPackageMissingDirError(OnlPackageError):
|
||||
def __init__(self, p, d):
|
||||
self.value = "Package %s does not contain the directory %s." % (p, d)
|
||||
|
||||
class OnlPackageServiceScript(object):
|
||||
SCRIPT=None
|
||||
def __init__(self, service, dir=None):
|
||||
if self.SCRIPT is None:
|
||||
raise AttributeError("The SCRIPT attribute must be provided by the deriving class.")
|
||||
|
||||
with tempfile.NamedTemporaryFile(dir=dir, delete=False) as f:
|
||||
f.write(self.SCRIPT % dict(service=os.path.basename(service.replace(".init", ""))))
|
||||
self.name = f.name
|
||||
|
||||
|
||||
class OnlPackageAfterInstallScript(OnlPackageServiceScript):
|
||||
SCRIPT = """#!/bin/sh
|
||||
set -e
|
||||
if [ -x "/etc/init.d/%(service)s" ]; then
|
||||
update-rc.d %(service)s defaults >/dev/null
|
||||
invoke-rc.d %(service)s start || exit $?
|
||||
fi
|
||||
"""
|
||||
|
||||
class OnlPackageBeforeRemoveScript(OnlPackageServiceScript):
|
||||
SCRIPT = """#!/bin/sh
|
||||
set -e
|
||||
if [ -x "/etc/init.d/%(service)s" ]; then
|
||||
invoke-rc.d %(service)s stop || exit $?
|
||||
fi
|
||||
"""
|
||||
|
||||
class OnlPackageAfterRemoveScript(OnlPackageServiceScript):
|
||||
SCRIPT = """#!/bin/sh
|
||||
set -e
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d %(service)s remove >/dev/null
|
||||
fi
|
||||
"""
|
||||
|
||||
|
||||
class OnlPackage(object):
|
||||
"""Individual Debian Package Builder Class
|
||||
@@ -236,6 +272,36 @@ class OnlPackage(object):
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def copyf(src, dst, root):
|
||||
if dst.startswith('/'):
|
||||
dst = dst[1:]
|
||||
|
||||
if os.path.isdir(src):
|
||||
#
|
||||
# Copy entire src directory to target directory
|
||||
#
|
||||
dstpath = os.path.join(root, dst)
|
||||
logger.debug("Copytree %s -> %s" % (src, dstpath))
|
||||
shutil.copytree(src, dstpath)
|
||||
else:
|
||||
#
|
||||
# If the destination ends in a '/' it means copy the filename
|
||||
# as-is to that directory.
|
||||
#
|
||||
# If not, its a full rename to the destination.
|
||||
#
|
||||
if dst.endswith('/'):
|
||||
dstpath = os.path.join(root, dst)
|
||||
if not os.path.exists(dstpath):
|
||||
os.makedirs(dstpath)
|
||||
shutil.copy(src, dstpath)
|
||||
else:
|
||||
dstpath = os.path.join(root, os.path.dirname(dst))
|
||||
if not os.path.exists(dstpath):
|
||||
os.makedirs(dstpath)
|
||||
shutil.copyfile(src, os.path.join(root, dst))
|
||||
shutil.copymode(src, os.path.join(root, dst))
|
||||
|
||||
|
||||
def build(self, dir_=None):
|
||||
@@ -276,36 +342,7 @@ class OnlPackage(object):
|
||||
self.pkg['__workdir'] = workdir
|
||||
|
||||
for (src,dst) in self.pkg.get('files', {}):
|
||||
|
||||
if dst.startswith('/'):
|
||||
dst = dst[1:]
|
||||
|
||||
if os.path.isdir(src):
|
||||
#
|
||||
# Copy entire src directory to target directory
|
||||
#
|
||||
dstpath = os.path.join(root, dst)
|
||||
logger.debug("Copytree %s -> %s" % (src, dstpath))
|
||||
shutil.copytree(src, dstpath)
|
||||
else:
|
||||
#
|
||||
# If the destination ends in a '/' it means copy the filename
|
||||
# as-is to that directory.
|
||||
#
|
||||
# If not, its a full rename to the destination.
|
||||
#
|
||||
if dst.endswith('/'):
|
||||
dstpath = os.path.join(root, dst)
|
||||
if not os.path.exists(dstpath):
|
||||
os.makedirs(dstpath)
|
||||
shutil.copy(src, dstpath)
|
||||
else:
|
||||
dstpath = os.path.join(root, os.path.dirname(dst))
|
||||
if not os.path.exists(dstpath):
|
||||
os.makedirs(dstpath)
|
||||
shutil.copyfile(src, os.path.join(root, dst))
|
||||
shutil.copymode(src, os.path.join(root, dst))
|
||||
|
||||
OnlPackage.copyf(src, dst, root)
|
||||
|
||||
for (link,src) in self.pkg.get('links', {}).iteritems():
|
||||
logger.info("Linking %s -> %s..." % (link, src))
|
||||
@@ -316,14 +353,14 @@ class OnlPackage(object):
|
||||
# FPM doesn't seem to have a doc option so we copy documentation
|
||||
# files directly into place.
|
||||
#
|
||||
docpath = os.path.join(root, "usr/share/doc/%(name)s" % self.pkg)
|
||||
if not os.path.exists(docpath):
|
||||
os.makedirs(docpath)
|
||||
|
||||
for src in self.pkg.get('docs', []):
|
||||
if not os.path.exists(src):
|
||||
raise OnlPackageError("Documentation source file '%s' does not exist." % src)
|
||||
|
||||
dstpath = os.path.join(root, "usr/share/doc/%(name)s" % self.pkg)
|
||||
if not os.path.exists(dstpath):
|
||||
os.makedirs(dstpath)
|
||||
shutil.copy(src, dstpath)
|
||||
shutil.copy(src, docpath)
|
||||
|
||||
changelog = os.path.join(workdir, 'changelog')
|
||||
copyright_ = os.path.join(workdir, 'copyright')
|
||||
@@ -363,15 +400,38 @@ class OnlPackage(object):
|
||||
for provides in onlu.sflatten(self.pkg.get('provides', [])):
|
||||
command = command + "--provides %s " % provides
|
||||
|
||||
for conflicts in onlu.sflatten(self.pkg.get('conflicts', [])):
|
||||
command = command + "--conflicts %s " % conflicts
|
||||
|
||||
for replaces in onlu.sflatten(self.pkg.get('replaces', [])):
|
||||
command = command + "--replaces %s " % replaces
|
||||
|
||||
if 'virtual' in self.pkg:
|
||||
command = command + "--provides %(v)s --conflicts %(v)s --replaces %(v)s " % dict(v=self.pkg['virtual'])
|
||||
|
||||
if 'priority' in self.pkg:
|
||||
command = command + "--deb-priority %s " % self.pkg['priority']
|
||||
|
||||
if 'init' in self.pkg:
|
||||
if not os.path.exists(self.pkg['init']):
|
||||
raise OnlPackageError("Init script '%s' does not exist." % self.pkg['init'])
|
||||
command = command + "--deb-init %s " % self.pkg['init']
|
||||
if self.pkg.get('init-after-install', True):
|
||||
command = command + "--after-install %s " % OnlPackageAfterInstallScript(self.pkg['init'], dir=workdir).name
|
||||
if self.pkg.get('init-before-remove', True):
|
||||
command = command + "--before-remove %s " % OnlPackageBeforeRemoveScript(self.pkg['init'], dir=workdir).name
|
||||
if self.pkg.get('init-after-remove', True):
|
||||
command = command + "--after-remove %s " % OnlPackageAfterRemoveScript(self.pkg['init'], dir=workdir).name
|
||||
|
||||
if 'post-install' in self.pkg:
|
||||
if not os.path.exists(self.pkg['post-install']):
|
||||
raise OnlPackageError("Post-install script '%s' does not exist." % self.pkg['post-install'])
|
||||
command = command + "--after-install %s " % self.pkg['post-install']
|
||||
if self.pkg.get('asr', True):
|
||||
# Generate the ASR documentation for this package.
|
||||
sys.path.append("%s/sm/infra/tools" % os.getenv('ONL'))
|
||||
import asr
|
||||
asro = asr.AimSyslogReference()
|
||||
asro.extract(workdir)
|
||||
asro.format(os.path.join(docpath, asr.AimSyslogReference.ASR_NAME), 'json')
|
||||
|
||||
############################################################
|
||||
|
||||
if logger.level < logging.INFO:
|
||||
command = command + "--verbose "
|
||||
@@ -379,7 +439,6 @@ class OnlPackage(object):
|
||||
onlu.execute(command)
|
||||
|
||||
# Grab the package from the workdir. There can be only one.
|
||||
sys.stdout.write(workdir)
|
||||
files = glob.glob(os.path.join(workdir, '*.deb'))
|
||||
if len(files) == 0:
|
||||
raise OnlPackageError("No debian package.")
|
||||
@@ -543,18 +602,14 @@ class OnlPackageGroup(object):
|
||||
|
||||
|
||||
if 'release' in self._pkgs:
|
||||
release_list = onlu.validate_src_dst_file_tuples(self._pkgs['__directory'],
|
||||
self._pkgs['release'],
|
||||
dict(),
|
||||
OnlPackageError)
|
||||
for f in release_list:
|
||||
release_dir = os.environ.get('ONLPM_OPTION_RELEASE_DIR',
|
||||
os.path.join(os.environ.get('ONL', 'RELEASE')))
|
||||
dst = os.path.join(release_dir, g_dist_codename, f[1])
|
||||
if not os.path.exists(dst):
|
||||
os.makedirs(dst)
|
||||
logger.info("Releasing %s -> %s" % (os.path.basename(f[0]), dst))
|
||||
shutil.copy(f[0], dst)
|
||||
for (src, dst) in onlu.validate_src_dst_file_tuples(self._pkgs['__directory'],
|
||||
self._pkgs['release'],
|
||||
dict(),
|
||||
OnlPackageError):
|
||||
root = os.path.join(os.environ.get('ONLPM_OPTION_RELEASE_DIR',
|
||||
os.path.join(os.environ.get('ONL', 'RELEASE'))),
|
||||
g_dist_codename)
|
||||
OnlPackage.copyf(src, dst, root)
|
||||
|
||||
return products
|
||||
|
||||
|
||||
@@ -410,7 +410,7 @@ rm -f /usr/sbin/policy-rc.d
|
||||
logger.info("Cleaning Filesystem...")
|
||||
onlu.execute('sudo chroot %s /usr/bin/apt-get clean' % dir_)
|
||||
onlu.execute('sudo chroot %s /usr/sbin/localepurge' % dir_ )
|
||||
onlu.execute('sudo chroot %s find /usr/share/doc -type f -delete' % dir_)
|
||||
onlu.execute('sudo chroot %s find /usr/share/doc -type f -not -name asr.json -delete' % dir_)
|
||||
onlu.execute('sudo chroot %s find /usr/share/man -type f -delete' % dir_)
|
||||
|
||||
if 'PermitRootLogin' in options:
|
||||
@@ -458,6 +458,14 @@ rm -f /usr/sbin/policy-rc.d
|
||||
ua.chmod('go-w', f)
|
||||
ua.chmod('go-w', os.path.dirname(f))
|
||||
|
||||
if options.get('asr', None):
|
||||
asropts = options.get('asr')
|
||||
logger.info("Gathering ASR documentation...")
|
||||
sys.path.append("%s/sm/infra/tools" % os.getenv('ONL'))
|
||||
import asr
|
||||
asro = asr.AimSyslogReference()
|
||||
asro.merge(dir_)
|
||||
asro.format(os.path.join(dir_, asropts['file']), fmt=asropts['format'])
|
||||
|
||||
for (mf, fields) in Configure.get('manifests', {}).iteritems():
|
||||
logger.info("Configuring manifest %s..." % mf)
|
||||
@@ -520,6 +528,8 @@ rm -f /usr/sbin/policy-rc.d
|
||||
f.write("%s" % issue)
|
||||
onlu.execute("sudo chmod a-w %s" % fn)
|
||||
|
||||
|
||||
|
||||
finally:
|
||||
onlu.execute("sudo umount -l %s %s" % (os.path.join(dir_, "dev"), os.path.join(dir_, "proc")))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user