summaryrefslogtreecommitdiff
path: root/classes
diff options
context:
space:
mode:
Diffstat (limited to 'classes')
-rw-r--r--classes/base.bbclass17
-rw-r--r--classes/binconfig.bbclass13
-rw-r--r--classes/e.bbclass30
-rw-r--r--classes/efl.bbclass6
-rw-r--r--classes/image.bbclass (renamed from classes/image_ipk.bbclass)72
-rw-r--r--classes/package.bbclass2
-rw-r--r--classes/package_deb.bbclass238
-rw-r--r--classes/rootfs_deb.bbclass136
-rw-r--r--classes/rootfs_ipk.bbclass112
-rw-r--r--classes/siteinfo.bbclass3
-rw-r--r--classes/wrt-image.bbclass2
11 files changed, 502 insertions, 129 deletions
diff --git a/classes/base.bbclass b/classes/base.bbclass
index 249a25a218..504707e37b 100644
--- a/classes/base.bbclass
+++ b/classes/base.bbclass
@@ -41,12 +41,17 @@ def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
else:
return falsevalue
-def base_contains(variable, checkvalue, truevalue, falsevalue, d):
- import bb
- if bb.data.getVar(variable,d,1).find(checkvalue) != -1:
- return truevalue
- else:
- return falsevalue
+def base_contains(variable, checkvalues, truevalue, falsevalue, d):
+ import bb
+ matches = 0
+ if type(checkvalues).__name__ == "str":
+ checkvalues = [checkvalues]
+ for value in checkvalues:
+ if bb.data.getVar(variable,d,1).find(value) != -1:
+ matches = matches + 1
+ if matches == len(checkvalues):
+ return truevalue
+ return falsevalue
def base_both_contain(variable1, variable2, checkvalue, d):
import bb
diff --git a/classes/binconfig.bbclass b/classes/binconfig.bbclass
index 317e080fc5..dadf2dddfc 100644
--- a/classes/binconfig.bbclass
+++ b/classes/binconfig.bbclass
@@ -20,11 +20,6 @@ def get_binconfig_mangle(d):
s += " -e 's:OEEXECPREFIX:${STAGING_LIBDIR}/..:'"
return s
-# Native package configurations go in ${BINDIR}/<name>-config-native to prevent a collision with cross packages
-def is_native(d):
- import bb.data
- return ["","-native"][bb.data.inherits_class('native', d)]
-
BINCONFIG_GLOB ?= "*-config"
do_install_append() {
@@ -52,9 +47,9 @@ do_install_append() {
do_stage_append() {
for config in `find ${S} -name '${BINCONFIG_GLOB}'`; do
- configname=`basename $config`${@is_native(d)}
- install -d ${STAGING_BINDIR}
- cat $config | sed ${@get_binconfig_mangle(d)} > ${STAGING_BINDIR}/$configname
- chmod u+x ${STAGING_BINDIR}/$configname
+ configname=`basename $config`
+ install -d ${STAGING_BINDIR_CROSS}
+ cat $config | sed ${@get_binconfig_mangle(d)} > ${STAGING_BINDIR_CROSS}/$configname
+ chmod u+x ${STAGING_BINDIR_CROSS}/$configname
done
}
diff --git a/classes/e.bbclass b/classes/e.bbclass
index ae8fea0d5d..f20c1f8b60 100644
--- a/classes/e.bbclass
+++ b/classes/e.bbclass
@@ -8,23 +8,19 @@ do_prepsources () {
}
addtask prepsources after do_fetch before do_unpack
-def binconfig_suffix(d):
- import bb
- return ["","-native"][bb.data.inherits_class('native', d)]
-
-export CURL_CONFIG = "${STAGING_BINDIR}/curl-config${@binconfig_suffix(d)}"
-export EDB_CONFIG = "${STAGING_BINDIR}/edb-config${@binconfig_suffix(d)}"
-export EET_CONFIG = "${STAGING_BINDIR}/eet-config${@binconfig_suffix(d)}"
-export EVAS_CONFIG = "${STAGING_BINDIR}/evas-config${@binconfig_suffix(d)}"
-export ECORE_CONFIG = "${STAGING_BINDIR}/ecore-config${@binconfig_suffix(d)}"
-export EMBRYO_CONFIG = "${STAGING_BINDIR}/embryo-config${@binconfig_suffix(d)}"
-export ENGRAVE_CONFIG = "${STAGING_BINDIR}/engrave-config${@binconfig_suffix(d)}"
-export ENLIGHTENMENT_CONFIG = "${STAGING_BINDIR}/enlightenment-config${@binconfig_suffix(d)}"
-export EPSILON_CONFIG = "${STAGING_BINDIR}/epsilon-config${@binconfig_suffix(d)}"
-export EPEG_CONFIG = "${STAGING_BINDIR}/epeg-config${@binconfig_suffix(d)}"
-export ESMART_CONFIG = "${STAGING_BINDIR}/esmart-config${@binconfig_suffix(d)}"
-export FREETYPE_CONFIG = "${STAGING_BINDIR}/freetype-config${@binconfig_suffix(d)}"
-export IMLIB2_CONFIG = "${STAGING_BINDIR}/imlib2-config${@binconfig_suffix(d)}"
+export CURL_CONFIG = "${STAGING_BINDIR_CROSS}/curl-config"
+export EDB_CONFIG = "${STAGING_BINDIR_CROSS}/edb-config"
+export EET_CONFIG = "${STAGING_BINDIR_CROSS}/eet-config"
+export EVAS_CONFIG = "${STAGING_BINDIR_CROSS}/evas-config"
+export ECORE_CONFIG = "${STAGING_BINDIR_CROSS}/ecore-config"
+export EMBRYO_CONFIG = "${STAGING_BINDIR_CROSS}/embryo-config"
+export ENGRAVE_CONFIG = "${STAGING_BINDIR_CROSS}/engrave-config"
+export ENLIGHTENMENT_CONFIG = "${STAGING_BINDIR_CROSS}/enlightenment-config"
+export EPSILON_CONFIG = "${STAGING_BINDIR_CROSS}/epsilon-config"
+export EPEG_CONFIG = "${STAGING_BINDIR_CROSS}/epeg-config"
+export ESMART_CONFIG = "${STAGING_BINDIR_CROSS}/esmart-config"
+export FREETYPE_CONFIG = "${STAGING_BINDIR_CROSS}/freetype-config"
+export IMLIB2_CONFIG = "${STAGING_BINDIR_CROSS}/imlib2-config"
do_compile_prepend() {
find ${S} -name Makefile | xargs sed -i 's:/usr/include:${STAGING_INCDIR}:'
diff --git a/classes/efl.bbclass b/classes/efl.bbclass
index be5ef9dcb0..9b0345a5b8 100644
--- a/classes/efl.bbclass
+++ b/classes/efl.bbclass
@@ -13,6 +13,10 @@ libdirectory = "src/lib"
libraries = "lib${SRCNAME}"
headers = "${@bb.data.getVar('SRCNAME',d,1).capitalize()}.h"
+def efl_is_native(d):
+ import bb
+ return ["","-native"][bb.data.inherits_class('native', d)]
+
do_stage_append () {
for i in ${libraries}
do
@@ -24,7 +28,7 @@ do_stage_append () {
done
# Install binaries automatically for native builds
- if [ "${@binconfig_suffix(d)}" = "-native" ]
+ if [ "${@efl_is_native(d)}" = "-native" ]
then
# Most EFL binaries start with the package name
diff --git a/classes/image_ipk.bbclass b/classes/image.bbclass
index 2beb137aef..bcf9bef13c 100644
--- a/classes/image_ipk.bbclass
+++ b/classes/image.bbclass
@@ -1,4 +1,6 @@
-inherit rootfs_ipk
+inherit rootfs_${IMAGE_PKGTYPE}
+
+PACKAGES = ""
# We need to recursively follow RDEPENDS and RRECOMMENDS for images
BUILD_ALL_DEPS = "1"
@@ -9,6 +11,8 @@ EXCLUDE_FROM_WORLD = "1"
USE_DEVFS ?= "0"
+PID = "${@os.getpid()}"
+
DEPENDS += "makedevs-native"
PACKAGE_ARCH = "${MACHINE_ARCH}"
@@ -46,6 +50,17 @@ def get_devtable_list(d):
IMAGE_POSTPROCESS_COMMAND ?= ""
+# some default locales
+IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
+
+LINGUAS_INSTALL = "${@" ".join(map(lambda s: "locale-base-%s" % s, bb.data.getVar('IMAGE_LINGUAS', d, 1).split()))}"
+
+ROOTFS_POSTPROCESS_COMMAND ?= ""
+
+do_rootfs[nostamp] = "1"
+do_rootfs[dirs] = "${TOPDIR}"
+do_build[nostamp] = "1"
+
# Must call real_do_rootfs() from inside here, rather than as a separate
# task, so that we have a single fakeroot context for the whole process.
fakeroot do_rootfs () {
@@ -59,7 +74,7 @@ fakeroot do_rootfs () {
done
fi
- real_do_rootfs
+ rootfs_${IMAGE_PKGTYPE}_do_rootfs
insert_feed_uris
@@ -68,6 +83,7 @@ fakeroot do_rootfs () {
${IMAGE_PREPROCESS_COMMAND}
export TOPDIR=${TOPDIR}
+ export MACHINE=${MACHINE}
for type in ${IMAGE_FSTYPES}; do
if test -z "$FAKEROOTKEY"; then
@@ -97,3 +113,55 @@ insert_feed_uris () {
echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/ipkg/${feed_name}-feed.conf
done
}
+
+log_check() {
+ set +x
+ for target in $*
+ do
+ lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
+
+ echo "log_check: Using $lf_path as logfile"
+
+ if test -e "$lf_path"
+ then
+ rootfs_${IMAGE_PKGTYPE}_log_check $target $lf_path
+ else
+ echo "Cannot find logfile [$lf_path]"
+ fi
+ echo "Logfile is clean"
+ done
+
+ set -x
+}
+
+# set '*' as the rootpassword so the images
+# can decide if they want it or not
+
+zap_root_password () {
+ sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
+ mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
+}
+
+create_etc_timestamp() {
+ date +%2m%2d%2H%2M%Y >${IMAGE_ROOTFS}/etc/timestamp
+}
+
+# Turn any symbolic /sbin/init link into a file
+remove_init_link () {
+ if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
+ LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
+ rm ${IMAGE_ROOTFS}/sbin/init
+ cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
+ fi
+}
+
+make_zimage_symlink_relative () {
+ if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
+ (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
+ fi
+}
+
+# export the zap_root_password, create_etc_timestamp and remote_init_link
+EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link do_rootfs make_zimage_symlink_relative
+
+addtask rootfs before do_build after do_install
diff --git a/classes/package.bbclass b/classes/package.bbclass
index 132fdcb37a..a327aa46bd 100644
--- a/classes/package.bbclass
+++ b/classes/package.bbclass
@@ -116,7 +116,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
bb.data.setVar('PACKAGES', ' '.join(packages), d)
-PACKAGE_DEPENDS ?= "file-native"
+PACKAGE_DEPENDS ?= "file-native fakeroot-native"
DEPENDS_prepend =+ "${PACKAGE_DEPENDS} "
# file(1) output to match to consider a file an unstripped executable
FILE_UNSTRIPPED_MATCH ?= "not stripped"
diff --git a/classes/package_deb.bbclass b/classes/package_deb.bbclass
new file mode 100644
index 0000000000..9697426d5d
--- /dev/null
+++ b/classes/package_deb.bbclass
@@ -0,0 +1,238 @@
+inherit package
+DEPENDS_prepend="${@["dpkg-native ", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}"
+BOOTSTRAP_EXTRA_RDEPENDS += "dpkg"
+DISTRO_EXTRA_RDEPENDS += "dpkg"
+PACKAGE_WRITE_FUNCS += "do_package_deb"
+IMAGE_PKGTYPE ?= "deb"
+
+python package_deb_fn () {
+ from bb import data
+ bb.data.setVar('PKGFN', bb.data.getVar('PKG',d), d)
+}
+
+addtask package_deb_install
+python do_package_deb_install () {
+ import os, sys
+ pkg = bb.data.getVar('PKG', d, 1)
+ pkgfn = bb.data.getVar('PKGFN', d, 1)
+ rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1)
+ debdir = bb.data.getVar('DEPLOY_DIR_DEB', d, 1)
+ stagingdir = bb.data.getVar('STAGING_DIR', d, 1)
+ stagingbindir = bb.data.getVar('STAGING_BINDIR_NATIVE', d, 1)
+ tmpdir = bb.data.getVar('TMPDIR', d, 1)
+
+ if None in (pkg,pkgfn,rootfs):
+ raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGE_ROOTFS)")
+ try:
+ if not os.exists(rootfs):
+ os.makedirs(rootfs)
+ os.chdir(rootfs)
+ except OSError:
+ raise bb.build.FuncFailed(str(sys.exc_value))
+
+ # update packages file
+ (exitstatus, output) = commands.getstatusoutput('dpkg-scanpackages %s > %s/Packages' % (debdir, debdir))
+ if (exitstatus != 0 ):
+ raise bb.build.FuncFailed(output)
+
+ f = open(os.path.join(tmpdir, "stamps", "do_packages"), "w")
+ f.close()
+
+ # NOTE: this env stuff is racy at best, we need something more capable
+ # than 'commands' for command execution, which includes manipulating the
+ # env of the fork+execve'd processs
+
+ # Set up environment
+ apt_config = os.getenv('APT_CONFIG')
+ os.putenv('APT_CONFIG', os.path.join(stagingdir, 'etc', 'apt', 'apt.conf'))
+ path = os.getenv('PATH')
+ os.putenv('PATH', '%s:%s' % (stagingbindir, os.getenv('PATH')))
+
+ # install package
+ commands.getstatusoutput('apt-get update')
+ commands.getstatusoutput('apt-get install -y %s' % pkgfn)
+
+ # revert environment
+ os.putenv('APT_CONFIG', apt_config)
+ os.putenv('PATH', path)
+}
+
+python do_package_deb () {
+ import copy # to back up env data
+ import sys
+ import re
+
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ import os # path manipulations
+ outdir = bb.data.getVar('DEPLOY_DIR_DEB', d, 1)
+ if not outdir:
+ bb.error("DEPLOY_DIR_DEB not defined, unable to package")
+ return
+
+ dvar = bb.data.getVar('D', d, 1)
+ if not dvar:
+ bb.error("D not defined, unable to package")
+ return
+ bb.mkdirhier(dvar)
+
+ packages = bb.data.getVar('PACKAGES', d, 1)
+ if not packages:
+ bb.debug(1, "PACKAGES not defined, nothing to package")
+ return
+
+ tmpdir = bb.data.getVar('TMPDIR', d, 1)
+ # Invalidate the packages file
+ if os.access(os.path.join(tmpdir, "stamps", "do_packages"),os.R_OK):
+ os.unlink(os.path.join(tmpdir, "stamps", "do_packages"))
+
+ if packages == []:
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ for pkg in packages.split():
+ localdata = bb.data.createCopy(d)
+ root = "%s/install/%s" % (workdir, pkg)
+
+ bb.data.setVar('ROOT', '', localdata)
+ bb.data.setVar('ROOT_%s' % pkg, root, localdata)
+ pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1)
+ if not pkgname:
+ pkgname = pkg
+ bb.data.setVar('PKG', pkgname, localdata)
+
+ overrides = bb.data.getVar('OVERRIDES', localdata)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+ overrides = bb.data.expand(overrides, localdata)
+ bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+
+ bb.data.update_data(localdata)
+ basedir = os.path.join(os.path.dirname(root))
+
+ pkgoutdir = os.path.join(outdir, bb.data.getVar('PACKAGE_ARCH', localdata, 1))
+ bb.mkdirhier(pkgoutdir)
+
+ os.chdir(root)
+ from glob import glob
+ g = glob('*')
+ try:
+ del g[g.index('DEBIAN')]
+ del g[g.index('./DEBIAN')]
+ except ValueError:
+ pass
+ if not g and not bb.data.getVar('ALLOW_EMPTY', localdata):
+ from bb import note
+ note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1)))
+ continue
+ controldir = os.path.join(root, 'DEBIAN')
+ bb.mkdirhier(controldir)
+ try:
+ ctrlfile = file(os.path.join(controldir, 'control'), 'wb')
+ # import codecs
+ # ctrlfile = codecs.open("someFile", "w", "utf-8")
+ except OSError:
+ raise bb.build.FuncFailed("unable to open control file for writing.")
+
+ fields = []
+ fields.append(["Version: %s-%s\n", ['PV', 'PR']])
+ fields.append(["Description: %s\n", ['DESCRIPTION']])
+ fields.append(["Section: %s\n", ['SECTION']])
+ fields.append(["Priority: %s\n", ['PRIORITY']])
+ fields.append(["Maintainer: %s\n", ['MAINTAINER']])
+ fields.append(["Architecture: %s\n", ['TARGET_ARCH']])
+ fields.append(["OE: %s\n", ['P']])
+ fields.append(["Homepage: %s\n", ['HOMEPAGE']])
+
+# Package, Version, Maintainer, Description - mandatory
+# Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
+
+
+ def pullData(l, d):
+ l2 = []
+ for i in l:
+ data = bb.data.getVar(i, d, 1)
+ if data is None:
+ raise KeyError(f)
+ if i == 'TARGET_ARCH' and bb.data.getVar('PACKAGE_ARCH', d, 1) == 'all':
+ data = 'all'
+ l2.append(data)
+ return l2
+
+ ctrlfile.write("Package: %s\n" % pkgname)
+ # check for required fields
+ try:
+ for (c, fs) in fields:
+ ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
+ except KeyError:
+ (type, value, traceback) = sys.exc_info()
+ ctrlfile.close()
+ raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
+ # more fields
+
+ bb.build.exec_func("mapping_rename_hook", localdata)
+
+ rdepends = explode_deps(unicode(bb.data.getVar("RDEPENDS", localdata, 1) or ""))
+ rdepends = [dep for dep in rdepends if not '*' in dep]
+ rrecommends = explode_deps(unicode(bb.data.getVar("RRECOMMENDS", localdata, 1) or ""))
+ rrecommends = [rec for rec in rrecommends if not '*' in rec]
+ rsuggests = (unicode(bb.data.getVar("RSUGGESTS", localdata, 1) or "")).split()
+ rprovides = (unicode(bb.data.getVar("RPROVIDES", localdata, 1) or "")).split()
+ rreplaces = (unicode(bb.data.getVar("RREPLACES", localdata, 1) or "")).split()
+ rconflicts = (unicode(bb.data.getVar("RCONFLICTS", localdata, 1) or "")).split()
+ if rdepends:
+ ctrlfile.write(u"Depends: %s\n" % ", ".join(rdepends))
+ if rsuggests:
+ ctrlfile.write(u"Suggests: %s\n" % ", ".join(rsuggests))
+ if rrecommends:
+ ctrlfile.write(u"Recommends: %s\n" % ", ".join(rrecommends))
+ if rprovides:
+ ctrlfile.write(u"Provides: %s\n" % ", ".join(rprovides))
+ if rreplaces:
+ ctrlfile.write(u"Replaces: %s\n" % ", ".join(rreplaces))
+ if rconflicts:
+ ctrlfile.write(u"Conflicts: %s\n" % ", ".join(rconflicts))
+ ctrlfile.close()
+
+ for script in ["preinst", "postinst", "prerm", "postrm"]:
+ scriptvar = bb.data.getVar('pkg_%s' % script, localdata, 1)
+ if not scriptvar:
+ continue
+ try:
+ scriptfile = file(os.path.join(controldir, script), 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
+ scriptfile.write(scriptvar)
+ scriptfile.close()
+ os.chmod(os.path.join(controldir, script), 0755)
+
+ conffiles_str = bb.data.getVar("CONFFILES", localdata, 1)
+ if conffiles_str:
+ try:
+ conffiles = file(os.path.join(controldir, 'conffiles'), 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open conffiles for writing.")
+ for f in conffiles_str.split():
+ conffiles.write('%s\n' % f)
+ conffiles.close()
+
+ os.chdir(basedir)
+ ret = os.system("PATH=\"%s\" fakeroot dpkg-deb -b %s %s" % (bb.data.getVar("PATH", localdata, 1), root, pkgoutdir))
+ if ret != 0:
+ raise bb.build.FuncFailed("dpkg-deb execution failed")
+
+ for script in ["preinst", "postinst", "prerm", "postrm", "control" ]:
+ scriptfile = os.path.join(controldir, script)
+ try:
+ os.remove(scriptfile)
+ except OSError:
+ pass
+ try:
+ os.rmdir(controldir)
+ except OSError:
+ pass
+ del localdata
+}
diff --git a/classes/rootfs_deb.bbclass b/classes/rootfs_deb.bbclass
new file mode 100644
index 0000000000..59909d6852
--- /dev/null
+++ b/classes/rootfs_deb.bbclass
@@ -0,0 +1,136 @@
+DEPENDS_prepend = "dpkg-native apt-native fakeroot-native "
+DEPENDS_append = " ${EXTRA_IMAGEDEPENDS}"
+
+fakeroot rootfs_deb_do_rootfs () {
+ set +e
+ mkdir -p ${IMAGE_ROOTFS}/var/dpkg/{info,updates}
+
+ rm -f ${STAGING_DIR}/etc/apt/sources.list.rev
+ rm -f ${STAGING_DIR}/etc/apt/preferences
+ > ${IMAGE_ROOTFS}/var/dpkg/status
+ > ${IMAGE_ROOTFS}/var/dpkg/available
+ # > ${STAGING_DIR}/var/dpkg/status
+
+ priority=1
+ for arch in ${PACKAGE_ARCHS}; do
+ if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
+ continue;
+ fi
+ cd ${DEPLOY_DIR_DEB}/$arch
+ # if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then
+ rm -f Packages.gz Packages Packages.bz2
+ # fi
+ apt-ftparchive packages . | bzip2 > Packages.bz2
+ echo "Label: $arch" > Release
+
+ echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_DIR}/etc/apt/sources.list.rev
+ (echo "Package: *"
+ echo "Pin: release l=$arch"
+ echo "Pin-Priority: $((800 + $priority))"
+ echo) >> ${STAGING_DIR}/etc/apt/preferences
+ priority=$(expr $priority + 5)
+ done
+
+ tac ${STAGING_DIR}/etc/apt/sources.list.rev > ${STAGING_DIR}/etc/apt/sources.list
+
+ cat "${STAGING_DIR}/etc/apt/apt.conf.sample" \
+ | sed -e 's#Architecture ".*";#Architecture "${TARGET_ARCH}";#' \
+ > "${STAGING_DIR}/etc/apt/apt-rootfs.conf"
+
+ export APT_CONFIG="${STAGING_DIR}/etc/apt/apt-rootfs.conf"
+ export D=${IMAGE_ROOTFS}
+ export OFFLINE_ROOT=${IMAGE_ROOTFS}
+ export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
+
+ apt-get update
+
+ _flag () {
+ sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/dpkg/status
+ }
+ _getflag () {
+ cat ${IMAGE_ROOTFS}/var/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}"
+ }
+
+ if [ ! -z "${LINGUAS_INSTALL}" ]; then
+ apt-get install glibc-localedata-i18n
+ if [ $? -eq 1 ]; then
+ exit 1
+ fi
+ for i in ${LINGUAS_INSTALL}; do
+ apt-get install $i
+ if [ $? -eq 1 ]; then
+ exit 1
+ fi
+ done
+ fi
+
+ if [ ! -z "${PACKAGE_INSTALL}" ]; then
+ for i in ${PACKAGE_INSTALL}; do
+ apt-get install $i
+ if [ $? -eq 1 ]; then
+ exit 1
+ fi
+ find ${IMAGE_ROOTFS} -name \*.dpkg-new | for i in `cat`; do
+ mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
+ done
+ done
+ fi
+
+ install -d ${IMAGE_ROOTFS}/${sysconfdir}
+ echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
+
+ # Mark all packages installed
+ sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${IMAGE_ROOTFS}/var/dpkg/status
+
+ # Attempt to run preinsts
+ # Mark packages with preinst failures as unpacked
+ for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.preinst; do
+ if [ -f $i ] && ! sh $i; then
+ _flag unpacked `basename $i .preinst`
+ fi
+ done
+
+ # Attempt to run postinsts
+ # Mark packages with postinst failures as unpacked
+ for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.postinst; do
+ if [ -f $i ] && ! sh $i configure; then
+ _flag unpacked `basename $i .postinst`
+ fi
+ done
+
+ set -e
+
+ # Hacks to make dpkg/ipkg coexist for now
+ mv ${IMAGE_ROOTFS}/var/dpkg ${IMAGE_ROOTFS}/usr/
+ if [ -e ${IMAGE_ROOTFS}/usr/dpkg/alternatives ]; then
+ rmdir ${IMAGE_ROOTFS}/usr/dpkg/alternatives
+ fi
+ ln -s /usr/lib/ipkg/alternatives ${IMAGE_ROOTFS}/usr/dpkg/alternatives
+ ln -s /usr/dpkg/info ${IMAGE_ROOTFS}/usr/lib/ipkg/info
+ ln -s /usr/dpkg/status ${IMAGE_ROOTFS}/usr/lib/ipkg/status
+
+ ${ROOTFS_POSTPROCESS_COMMAND}
+
+ log_check rootfs
+}
+
+rootfs_deb_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "E:"
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
+
diff --git a/classes/rootfs_ipk.bbclass b/classes/rootfs_ipk.bbclass
index 8020fd0ca3..03ba5f86b7 100644
--- a/classes/rootfs_ipk.bbclass
+++ b/classes/rootfs_ipk.bbclass
@@ -2,40 +2,22 @@
# Creates a root filesystem out of IPKs
#
# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
-# See image_ipk.oeclass for a usage of this.
+# See image.bbclass for a usage of this.
#
DEPENDS_prepend="ipkg-native ipkg-utils-native fakeroot-native "
DEPENDS_append=" ${EXTRA_IMAGEDEPENDS}"
RDEPENDS += "ipkg ipkg-collateral"
-PACKAGES = ""
-
-do_rootfs[nostamp] = "1"
-do_rootfs[dirs] = "${TOPDIR}"
-do_build[nostamp] = "1"
-
IPKG_ARGS = "-f ${T}/ipkg.conf -o ${IMAGE_ROOTFS}"
-IPKG_INSTALL += "ipkg ipkg-collateral"
-
-ROOTFS_POSTPROCESS_COMMAND ?= ""
-
-PID = "${@os.getpid()}"
+PACKAGE_INSTALL += "ipkg ipkg-collateral"
-# some default locales
-IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
-
-LINGUAS_INSTALL = "${@" ".join(map(lambda s: "locale-base-%s" % s, bb.data.getVar('IMAGE_LINGUAS', d, 1).split()))}"
-
-real_do_rootfs () {
+fakeroot rootfs_ipk_do_rootfs () {
set -x
mkdir -p ${IMAGE_ROOTFS}/dev
- #work around a build in ipkg-make-index
- touch ${DEPLOY_DIR_IPK}/Packages
-
if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then
touch ${DEPLOY_DIR_IPK}/Packages
ipkg-make-index -r ${DEPLOY_DIR_IPK}/Packages -p ${DEPLOY_DIR_IPK}/Packages -l ${DEPLOY_DIR_IPK}/Packages.filelist -m ${DEPLOY_DIR_IPK}
@@ -55,11 +37,12 @@ real_do_rootfs () {
ipkg-cl ${IPKG_ARGS} install $i
done
fi
- if [ ! -z "${IPKG_INSTALL}" ]; then
- ipkg-cl ${IPKG_ARGS} install ${IPKG_INSTALL}
+ if [ ! -z "${PACKAGE_INSTALL}" ]; then
+ ipkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL}
fi
export D=${IMAGE_ROOTFS}
+ export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
mkdir -p ${IMAGE_ROOTFS}/etc/ipkg/
grep "^arch" ${T}/ipkg.conf >${IMAGE_ROOTFS}/etc/ipkg/arch.conf
@@ -83,75 +66,22 @@ real_do_rootfs () {
log_check rootfs
}
-log_check() {
- set +x
- for target in $*
- do
- lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
-
- echo "log_check: Using $lf_path as logfile"
-
- if test -e "$lf_path"
+rootfs_ipk_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "Cannot find package" "exit 1" ERR Fail
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
then
- lf_txt="`cat $lf_path`"
-
- for keyword_die in "Cannot find package" "exit 1" ERR Fail
- do
-
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -i "$keyword_die" -C1
- echo ""
- do_exit=1
- fi
- done
- test "$do_exit" = 1 && exit 1
- else
- echo "Cannot find logfile [$lf_path]"
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -i "$keyword_die" -C1
+ echo ""
+ do_exit=1
fi
- echo "Logfile is clean"
done
-
- set -x
-
+ test "$do_exit" = 1 && exit 1
+ true
}
-
-fakeroot do_rootfs () {
- rm -rf ${IMAGE_ROOTFS}
- real_do_rootfs
-}
-
-# set '*' as the rootpassword so the images
-# can decide if they want it or not
-
-zap_root_password () {
- sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
- mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
-}
-
-create_etc_timestamp() {
- date +%2m%2d%2H%2M%Y >${IMAGE_ROOTFS}/etc/timestamp
-}
-
-# Turn any symbolic /sbin/init link into a file
-remove_init_link () {
- if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
- LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
- rm ${IMAGE_ROOTFS}/sbin/init
- cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
- fi
-}
-
-make_zimage_symlink_relative () {
- if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
- (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
- fi
-}
-
-# export the zap_root_password, create_etc_timestamp and remote_init_link
-EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link make_zimage_symlink_relative
-
-
-addtask rootfs before do_build after do_install
diff --git a/classes/siteinfo.bbclass b/classes/siteinfo.bbclass
index 5a37768b52..94d25bf56f 100644
--- a/classes/siteinfo.bbclass
+++ b/classes/siteinfo.bbclass
@@ -49,6 +49,7 @@ def get_siteinfo_list(d):
if target in targetinfo:
info = targetinfo[target].split()
info.append(target)
+ info.append("common")
return info
else:
bb.error("Information not available for target '%s'" % target)
@@ -90,7 +91,7 @@ def siteinfo_get_files(d):
if os.path.exists(fname):
sitefiles += fname + " "
- bb.note("SITE files " + sitefiles);
+ bb.debug(1, "SITE files " + sitefiles);
return sitefiles
#
diff --git a/classes/wrt-image.bbclass b/classes/wrt-image.bbclass
index ba1163a719..45d9ac923f 100644
--- a/classes/wrt-image.bbclass
+++ b/classes/wrt-image.bbclass
@@ -3,7 +3,7 @@ ROOTFS_POSTPROCESS_COMMAND += "rm -f ${IMAGE_ROOTFS}/boot/zImage*"
def wrt_get_kernel_version(d):
import bb
- if bb.data.inherits_class('image_ipk', d):
+ if bb.data.inherits_class('image', d):
skd = bb.data.getVar('STAGING_KERNEL_DIR', d, 1)
return base_read_file(skd+'/kernel-abiversion')
return "-no kernel version for available-"