summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Purdie <richard@openedhand.com>2006-11-29 22:52:37 +0000
committerRichard Purdie <richard@openedhand.com>2006-11-29 22:52:37 +0000
commit681d6c18ad59dac9e53f769a568835241d7fa9b7 (patch)
tree243418a546b89650d28580f7721b8324586146e4
parentadabf6c0931af1282a7c75321cd8b050e8d05c95 (diff)
downloadopenembedded-core-681d6c18ad59dac9e53f769a568835241d7fa9b7.tar.gz
openembedded-core-681d6c18ad59dac9e53f769a568835241d7fa9b7.tar.bz2
openembedded-core-681d6c18ad59dac9e53f769a568835241d7fa9b7.zip
bitbake: Sync with bitbake trunk for bugfixes and improved dot file generation code
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@987 311d38ba-8fff-0310-9ca6-ca027cbcb966
-rwxr-xr-xbitbake/bin/bitbake223
-rw-r--r--bitbake/lib/bb/data.py4
-rw-r--r--bitbake/lib/bb/methodpool.py1
-rw-r--r--bitbake/lib/bb/parse/parse_py/BBHandler.py77
-rw-r--r--bitbake/lib/bb/runqueue.py72
-rw-r--r--bitbake/lib/bb/shell.py5
6 files changed, 155 insertions, 227 deletions
diff --git a/bitbake/bin/bitbake b/bitbake/bin/bitbake
index 85a0cbc398..36322d2a0e 100755
--- a/bitbake/bin/bitbake
+++ b/bitbake/bin/bitbake
@@ -186,171 +186,82 @@ class BBCooker:
def generateDotGraph( self, pkgs_to_build, ignore_deps ):
"""
- Generate two graphs one for the DEPENDS and RDEPENDS. The current
- implementation creates crappy graphs ;)
+ Generate a task dependency graph.
pkgs_to_build A list of packages that needs to be built
ignore_deps A list of names where processing of dependencies
should be stopped. e.g. dependencies that get
"""
- def myFilterProvider( providers, item):
- """
- Take a list of providers and filter according to environment
- variables. In contrast to filterProviders we do not discriminate
- and take PREFERRED_PROVIDER into account.
- """
- eligible = []
- preferred_versions = {}
-
- # Collate providers by PN
- pkg_pn = {}
- for p in providers:
- pn = self.status.pkg_fn[p]
- if pn not in pkg_pn:
- pkg_pn[pn] = []
- pkg_pn[pn].append(p)
-
- bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys()))
-
- for pn in pkg_pn.keys():
- preferred_versions[pn] = bb.providers.findBestProvider(pn, self.configuration.data, self.status, pkg_pn)[2:4]
- eligible.append(preferred_versions[pn][1])
-
- for p in eligible:
- if p in self.build_cache_fail:
- bb.msg.debug(1, bb.msg.domain.Provider, "rejecting already-failed %s" % p)
- eligible.remove(p)
-
- if len(eligible) == 0:
- bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item)
- return 0
-
- prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, self.configuration.data, 1)
-
- # try the preferred provider first
- if prefervar:
- for p in eligible:
- if prefervar == self.status.pkg_fn[p]:
- bb.msg.note(1, bb.msg.domain.Provider, "Selecting PREFERRED_PROVIDER %s" % prefervar)
- eligible.remove(p)
- eligible = [p] + eligible
-
- return eligible
-
-
- # try to avoid adding the same rdepends over an over again
- seen_depends = []
- seen_rdepends = []
-
-
- def add_depends(package_list):
- """
- Add all depends of all packages from this list
- """
- for package in package_list:
- if package in seen_depends or package in ignore_deps:
- continue
-
- seen_depends.append( package )
- if not package in self.status.providers:
- """
- We have not seen this name -> error in
- dependency handling
- """
- bb.msg.note(1, bb.msg.domain.Depends, "ERROR with provider: %(package)s" % vars() )
- print >> depends_file, '"%(package)s" -> ERROR' % vars()
- continue
-
- # get all providers for this package
- providers = self.status.providers[package]
-
- # now let us find the bestProvider for it
- fn = myFilterProvider(providers, package)[0]
-
- depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "")
- version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True)
- add_depends ( depends )
-
- # now create the node
- print >> depends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars()
-
- depends = filter( (lambda x: x not in ignore_deps), depends )
- for depend in depends:
- print >> depends_file, '"%(package)s" -> "%(depend)s"' % vars()
-
-
- def add_all_depends( the_depends, the_rdepends ):
- """
- Add both DEPENDS and RDEPENDS. RDEPENDS will get dashed
- lines
- """
- package_list = the_depends + the_rdepends
- for package in package_list:
- if package in seen_rdepends or package in ignore_deps:
- continue
-
- seen_rdepends.append( package )
-
- # Let us find out if the package is a DEPENDS or RDEPENDS
- # and we will set 'providers' with the avilable providers
- # for the package.
- if package in the_depends:
- if not package in self.status.providers:
- bb.msg.note(1, bb.msg.domain.Depends, "ERROR with provider: %(package)s" % vars() )
- print >> alldepends_file, '"%(package)s" -> ERROR' % vars()
- continue
-
- providers = self.status.providers[package]
- elif package in the_rdepends:
- if len(bb.providers.getRuntimeProviders(self.status, package)) == 0:
- bb.msg.note(1, bb.msg.domain.Depends, "ERROR with rprovider: %(package)s" % vars() )
- print >> alldepends_file, '"%(package)s" -> ERROR [style="dashed"]' % vars()
- continue
-
- providers = bb.providers.getRuntimeProviders(self.status, package)
- else:
- # something went wrong...
- print "Complete ERROR! %s" % package
- continue
-
- # now let us find the bestProvider for it
- fn = myFilterProvider(providers, package)[0]
-
- # Now we have a filename let us get the depends and RDEPENDS of it
- depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "")
- if fn in self.status.rundeps and package in self.status.rundeps[fn]:
- rdepends= self.status.rundeps[fn][package].keys()
- else:
- rdepends = []
- version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True)
+ for dep in ignore_deps:
+ self.status.ignored_dependencies.add(dep)
- # handle all the depends and rdepends of package
- add_all_depends ( depends, rdepends )
-
- # now create the node using package name
- print >> alldepends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars()
-
- # remove the stuff we want to ignore and add the edges
- depends = filter( (lambda x: x not in ignore_deps), depends )
- rdepends = filter( (lambda x: x not in ignore_deps), rdepends )
- for depend in depends:
- print >> alldepends_file, '"%(package)s" -> "%(depend)s"' % vars()
- for depend in rdepends:
- print >> alldepends_file, '"%(package)s" -> "%(depend)s" [style=dashed]' % vars()
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.update_data(localdata)
+ bb.data.expandKeys(localdata)
+ taskdata = bb.taskdata.TaskData(self.configuration.abort)
+ runlist = []
+ try:
+ for k in pkgs_to_build:
+ taskdata.add_provider(localdata, self.status, k)
+ runlist.append([k, "do_%s" % self.configuration.cmd])
+ taskdata.add_unresolved(localdata, self.status)
+ except bb.providers.NoProvider:
+ sys.exit(1)
+ rq = bb.runqueue.RunQueue()
+ rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist)
- # Add depends now
+ seen_fnids = []
depends_file = file('depends.dot', 'w' )
+ tdepends_file = file('task-depends.dot', 'w' )
print >> depends_file, "digraph depends {"
- add_depends( pkgs_to_build )
+ print >> tdepends_file, "digraph depends {"
+ rq.prio_map.reverse()
+ for task1 in range(len(rq.runq_fnid)):
+ task = rq.prio_map[task1]
+ taskname = rq.runq_task[task]
+ fnid = rq.runq_fnid[task]
+ fn = taskdata.fn_index[fnid]
+ pn = self.status.pkg_fn[fn]
+ version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True)
+ print >> tdepends_file, '"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn)
+ for dep in rq.runq_depends[task]:
+ depfn = taskdata.fn_index[rq.runq_fnid[dep]]
+ deppn = self.status.pkg_fn[depfn]
+ print >> tdepends_file, '"%s.%s" -> "%s.%s"' % (pn, rq.runq_task[task], deppn, rq.runq_task[dep])
+ if fnid not in seen_fnids:
+ seen_fnids.append(fnid)
+ packages = []
+ print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn)
+ for depend in self.status.deps[fn]:
+ print >> depends_file, '"%s" -> "%s"' % (pn, depend)
+ rdepends = self.status.rundeps[fn]
+ for package in rdepends:
+ for rdepend in rdepends[package]:
+ print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend)
+ packages.append(package)
+ rrecs = self.status.runrecs[fn]
+ for package in rrecs:
+ for rdepend in rrecs[package]:
+ print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend)
+ if not package in packages:
+ packages.append(package)
+ for package in packages:
+ if package != pn:
+ print >> depends_file, '"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn)
+ for depend in self.status.deps[fn]:
+ print >> depends_file, '"%s" -> "%s"' % (package, depend)
+ # Prints a flattened form of the above where subpackages of a package are merged into the main pn
+ #print >> depends_file, '"%s" [label="%s %s\\n%s\\n%s"]' % (pn, pn, taskname, version, fn)
+ #for rdep in taskdata.rdepids[fnid]:
+ # print >> depends_file, '"%s" -> "%s" [style=dashed]' % (pn, taskdata.run_names_index[rdep])
+ #for dep in taskdata.depids[fnid]:
+ # print >> depends_file, '"%s" -> "%s"' % (pn, taskdata.build_names_index[dep])
print >> depends_file, "}"
-
- # Add all depends now
- alldepends_file = file('alldepends.dot', 'w' )
- print >> alldepends_file, "digraph alldepends {"
- add_all_depends( pkgs_to_build, [] )
- print >> alldepends_file, "}"
+ print >> tdepends_file, "}"
+ bb.msg.note(1, bb.msg.domain.Collection, "Dependencies saved to 'depends.dot'")
+ bb.msg.note(1, bb.msg.domain.Collection, "Task dependencies saved to 'task-depends.dot'")
def buildDepgraph( self ):
all_depends = self.status.all_depends
@@ -643,10 +554,10 @@ class BBCooker:
rq.prepare_runqueue(self.configuration.data, self.status, taskdata, runlist)
try:
failures = rq.execute_runqueue(self, self.configuration.data, self.status, taskdata, runlist)
- except runqueue.TaskFailure, (fnid, fn, taskname):
- bb.msg.error(bb.msg.domain.Build, "'%s, %s' failed" % (fn, taskname))
+ except runqueue.TaskFailure, fnids:
+ for fnid in fnids:
+ bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
sys.exit(1)
-
bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures))
sys.exit( self.stats.show() )
diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py
index 19066c9adc..9f7e4be4c8 100644
--- a/bitbake/lib/bb/data.py
+++ b/bitbake/lib/bb/data.py
@@ -542,8 +542,8 @@ def update_data(d):
def inherits_class(klass, d):
- val = getVar('__inherit_cache', d) or ""
- if os.path.join('classes', '%s.bbclass' % klass) in val.split():
+ val = getVar('__inherit_cache', d) or []
+ if os.path.join('classes', '%s.bbclass' % klass) in val:
return True
return False
diff --git a/bitbake/lib/bb/methodpool.py b/bitbake/lib/bb/methodpool.py
index e14986bc19..f0565ce790 100644
--- a/bitbake/lib/bb/methodpool.py
+++ b/bitbake/lib/bb/methodpool.py
@@ -83,6 +83,7 @@ def check_insert_method(modulename, code, fn):
"""
if not modulename in _parsed_methods:
return insert_method(modulename, code, fn)
+ _parsed_methods[modulename] = 1
def parsed_module(modulename):
"""
diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py
index 34f4d25996..42b0369428 100644
--- a/bitbake/lib/bb/parse/parse_py/BBHandler.py
+++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py
@@ -40,7 +40,6 @@ __word__ = re.compile(r"\S+")
__infunc__ = ""
__inpython__ = False
__body__ = []
-__bbpath_found__ = 0
__classname__ = ""
classes = [ None, ]
@@ -58,25 +57,24 @@ def supports(fn, d):
return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc"
def inherit(files, d):
- __inherit_cache = data.getVar('__inherit_cache', d) or ""
+ __inherit_cache = data.getVar('__inherit_cache', d) or []
fn = ""
lineno = 0
- for f in files:
- file = data.expand(f, d)
+ files = data.expand(files, d)
+ for file in files:
if file[0] != "/" and file[-8:] != ".bbclass":
file = os.path.join('classes', '%s.bbclass' % file)
- if not file in __inherit_cache.split():
+ if not file in __inherit_cache:
bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file))
- __inherit_cache += " %s" % file
+ __inherit_cache.append( file )
include(fn, file, d, "inherit")
data.setVar('__inherit_cache', __inherit_cache, d)
def handle(fn, d, include = 0):
- global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __bbpath_found__, __residue__
+ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__
__body__ = []
- __bbpath_found__ = 0
__infunc__ = ""
__classname__ = ""
__residue__ = []
@@ -104,7 +102,6 @@ def handle(fn, d, include = 0):
if not os.path.isabs(fn):
f = None
for p in bbpath:
- p = data.expand(p, d)
j = os.path.join(p, fn)
if os.access(j, os.R_OK):
abs_fn = j
@@ -147,39 +144,35 @@ def handle(fn, d, include = 0):
data.expandKeys(d)
data.update_data(d)
anonqueue = data.getVar("__anonqueue", d, 1) or []
- for anon in anonqueue:
- data.setVar("__anonfunc", anon["content"], d)
- data.setVarFlags("__anonfunc", anon["flags"], d)
- from bb import build
- try:
- t = data.getVar('T', d)
- data.setVar('T', '${TMPDIR}/', d)
- build.exec_func("__anonfunc", d)
- data.delVar('T', d)
- if t:
- data.setVar('T', t, d)
- except Exception, e:
- bb.msg.debug(1, bb.msg.domain.Parsing, "executing anonymous function: %s" % e)
- raise
+ body = [x['content'] for x in anonqueue]
+ flag = { 'python' : 1, 'func' : 1 }
+ data.setVar("__anonfunc", "\n".join(body), d)
+ data.setVarFlags("__anonfunc", flag, d)
+ from bb import build
+ try:
+ t = data.getVar('T', d)
+ data.setVar('T', '${TMPDIR}/', d)
+ build.exec_func("__anonfunc", d)
+ data.delVar('T', d)
+ if t:
+ data.setVar('T', t, d)
+ except Exception, e:
+ bb.msg.debug(1, bb.msg.domain.Parsing, "executing anonymous function: %s" % e)
+ raise
data.delVar("__anonqueue", d)
data.delVar("__anonfunc", d)
set_additional_vars(fn, d, include)
data.update_data(d)
all_handlers = {}
- for var in data.keys(d):
+ for var in data.getVar('__BBHANDLERS', d) or []:
# try to add the handler
# if we added it remember the choiche
- if data.getVarFlag(var, 'handler', d):
- handler = data.getVar(var,d)
- if bb.event.register(var,handler) == bb.event.Registered:
- all_handlers[var] = handler
-
- continue
-
- if not data.getVarFlag(var, 'task', d):
- continue
+ handler = data.getVar(var,d)
+ if bb.event.register(var,handler) == bb.event.Registered:
+ all_handlers[var] = handler
+ for var in data.getVar('__BBTASKS', d) or []:
deps = data.getVarFlag(var, 'deps', d) or []
postdeps = data.getVarFlag(var, 'postdeps', d) or []
bb.build.add_task(var, deps, d)
@@ -204,7 +197,7 @@ def handle(fn, d, include = 0):
return d
def feeder(lineno, s, fn, root, d):
- global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, __bbpath_found__, classes, bb, __residue__
+ global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__
if __infunc__:
if s == '}':
__body__.append('')
@@ -336,6 +329,10 @@ def feeder(lineno, s, fn, root, d):
data.setVarFlag(var, "task", 1, d)
+ bbtasks = data.getVar('__BBTASKS', d) or []
+ bbtasks.append(var)
+ data.setVar('__BBTASKS', bbtasks, d)
+
if after is not None:
# set up deps for function
data.setVarFlag(var, "deps", after.split(), d)
@@ -348,8 +345,11 @@ def feeder(lineno, s, fn, root, d):
if m:
fns = m.group(1)
hs = __word__.findall(fns)
+ bbhands = data.getVar('__BBHANDLERS', d) or []
for h in hs:
+ bbhands.append(h)
data.setVarFlag(h, "handler", 1, d)
+ data.setVar('__BBHANDLERS', bbhands, d)
return
m = __inherit_regexp__.match(s)
@@ -386,16 +386,11 @@ def set_additional_vars(file, d, include):
bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file)
- src_uri = data.getVar('SRC_URI', d)
+ src_uri = data.getVar('SRC_URI', d, 1)
if not src_uri:
return
- src_uri = data.expand(src_uri, d)
- a = data.getVar('A', d)
- if a:
- a = data.expand(a, d).split()
- else:
- a = []
+ a = (data.getVar('A', d, 1) or '').split()
from bb import fetch
try:
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py
index 3dde9a9ffb..07821e23de 100644
--- a/bitbake/lib/bb/runqueue.py
+++ b/bitbake/lib/bb/runqueue.py
@@ -1,4 +1,4 @@
- #!/usr/bin/env python
+#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
@@ -25,9 +25,8 @@ import bb, os, sys
class TaskFailure(Exception):
"""Exception raised when a task in a runqueue fails"""
-
- def __init__(self, fnid, fn, taskname):
- self.args = fnid, fn, taskname
+ def __init__(self, x):
+ self.args = x
class RunQueue:
"""
@@ -319,21 +318,23 @@ class RunQueue:
failures = 0
while 1:
- try:
- self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData)
+ failed_fnids = self.execute_runqueue_internal(cooker, cfgData, dataCache, taskData)
+ if len(failed_fnids) == 0:
return failures
- except bb.runqueue.TaskFailure, (fnid, taskData.fn_index[fnid], taskname):
- if taskData.abort:
- raise
+ if taskData.abort:
+ raise bb.runqueue.TaskFailure(failed_fnids)
+ for fnid in failed_fnids:
+ #print "Failure: %s %s %s" % (fnid, taskData.fn_index[fnid], self.runq_task[fnid])
taskData.fail_fnid(fnid)
- self.reset_runqueue()
- self.prepare_runqueue(cfgData, dataCache, taskData, runlist)
failures = failures + 1
+ self.reset_runqueue()
+ self.prepare_runqueue(cfgData, dataCache, taskData, runlist)
def execute_runqueue_internal(self, cooker, cfgData, dataCache, taskData):
"""
Run the tasks in a queue prepared by prepare_runqueue
"""
+ import signal
bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue")
@@ -342,11 +343,15 @@ class RunQueue:
runq_complete = []
active_builds = 0
build_pids = {}
+ failed_fnids = []
if len(self.runq_fnid) == 0:
# nothing to do
return
+ def sigint_handler(signum, frame):
+ raise KeyboardInterrupt
+
def get_next_task(data):
"""
Return the id of the highest priority task that is buildable
@@ -414,6 +419,11 @@ class RunQueue:
except OSError, e:
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
if pid == 0:
+ # Bypass finally below
+ active_builds = 0
+ # Stop Ctrl+C being sent to children
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ sys.stdin = open('/dev/null', 'r')
cooker.configuration.cmd = taskname[3:]
try:
cooker.tryBuild(fn, False)
@@ -434,26 +444,36 @@ class RunQueue:
active_builds = active_builds - 1
task = build_pids[result[0]]
if result[1] != 0:
+ del build_pids[result[0]]
bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData)))
- raise bb.runqueue.TaskFailure(self.runq_fnid[task], taskData.fn_index[self.runq_fnid[task]], self.runq_task[task])
+ failed_fnids.append(self.runq_fnid[task])
+ break
task_complete(self, task)
del build_pids[result[0]]
continue
break
- except SystemExit:
- raise
- except:
- bb.msg.error(bb.msg.domain.RunQueue, "Exception received")
- while active_builds > 0:
- bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds)
- tasknum = 1
+ finally:
+ try:
+ while active_builds > 0:
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % active_builds)
+ tasknum = 1
+ for k, v in build_pids.iteritems():
+ bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k))
+ tasknum = tasknum + 1
+ result = os.waitpid(-1, 0)
+ task = build_pids[result[0]]
+ if result[1] != 0:
+ bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed" % (task, self.get_user_idstring(task, taskData)))
+ failed_fnids.append(self.runq_fnid[task])
+ del build_pids[result[0]]
+ active_builds = active_builds - 1
+ if len(failed_fnids) > 0:
+ return failed_fnids
+ except:
+ bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGTERM to remaining %s tasks" % active_builds)
for k, v in build_pids.iteritems():
- bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v, taskData), k))
- tasknum = tasknum + 1
- result = os.waitpid(-1, 0)
- del build_pids[result[0]]
- active_builds = active_builds - 1
- raise
+ os.kill(k, signal.SIGTERM)
+ raise
# Sanity Checks
for task in range(len(self.runq_fnid)):
@@ -464,7 +484,7 @@ class RunQueue:
if runq_complete[task] == 0:
bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task)
- return 0
+ return failed_fnids
def dump_data(self, taskQueue):
"""
diff --git a/bitbake/lib/bb/shell.py b/bitbake/lib/bb/shell.py
index 760c371d90..711cd4335f 100644
--- a/bitbake/lib/bb/shell.py
+++ b/bitbake/lib/bb/shell.py
@@ -179,8 +179,9 @@ class BitBakeShellCommands:
global last_exception
last_exception = Providers.NoProvider
- except runqueue.TaskFailure, (fnid, fn, taskname):
- print "ERROR: '%s, %s' failed" % (fn, taskname)
+ except runqueue.TaskFailure, fnids:
+ for fnid in fnids:
+ print "ERROR: '%s' failed" % td.fn_index[fnid])
global last_exception
last_exception = runqueue.TaskFailure