summaryrefslogtreecommitdiff
path: root/bitbake/lib
diff options
context:
space:
mode:
Diffstat (limited to 'bitbake/lib')
-rw-r--r--bitbake/lib/bb/providers.py8
-rw-r--r--bitbake/lib/bb/runqueue.py23
2 files changed, 28 insertions, 3 deletions
diff --git a/bitbake/lib/bb/providers.py b/bitbake/lib/bb/providers.py
index 2f6b620b3d..cb0ca3ff24 100644
--- a/bitbake/lib/bb/providers.py
+++ b/bitbake/lib/bb/providers.py
@@ -283,13 +283,16 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
# Should use dataCache.preferred here?
preferred = []
+ preferred_vars = []
for p in eligible:
pn = dataCache.pkg_fn[p]
provides = dataCache.pn_provides[pn]
for provide in provides:
prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1)
if prefervar == pn:
- bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item))
+ var = "PREFERRED_PROVIDERS_%s = %s" % (provide, prefervar)
+ bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to %s" % (pn, item, var))
+ preferred_vars.append(var)
eligible.remove(p)
eligible = [p] + eligible
preferred.append(p)
@@ -297,6 +300,9 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
numberPreferred = len(preferred)
+ if numberPreferred > 1:
+ bb.msg.error(bb.msg.domain.Provider, "Conflicting PREFERRED_PROVIDERS entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s" % (preferred, item, preferred_vars))
+
bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible))
return eligible, numberPreferred
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py
index 7b3defd343..3560996b9d 100644
--- a/bitbake/lib/bb/runqueue.py
+++ b/bitbake/lib/bb/runqueue.py
@@ -150,6 +150,7 @@ class RunQueue:
self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split()
self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed"
self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile"
+ self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or []
def reset_runqueue(self):
@@ -667,6 +668,18 @@ class RunQueue:
#if error:
# bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
+
+ # Create a whitelist usable by the stamp checks
+ stampfnwhitelist = []
+ for entry in self.stampwhitelist.split():
+ entryid = self.taskData.getbuild_id(entry)
+ if entryid not in self.taskData.build_targets:
+ continue
+ fnid = self.taskData.build_targets[entryid][0]
+ fn = self.taskData.fn_index[fnid]
+ stampfnwhitelist.append(fn)
+ self.stampfnwhitelist = stampfnwhitelist
+
#self.dump_data(taskData)
def check_stamps(self):
@@ -679,6 +692,9 @@ class RunQueue:
fulldeptree = False
else:
fulldeptree = True
+ stampwhitelist = []
+ if self.stamppolicy == "whitelist":
+ stampwhitelist = self.self.stampfnwhitelist
for task in range(len(self.runq_fnid)):
unchecked[task] = ""
@@ -730,7 +746,7 @@ class RunQueue:
fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
taskname2 = self.runq_task[dep]
stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
- if fulldeptree or fn == fn2:
+ if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
if dep in notcurrent:
iscurrent = False
else:
@@ -766,6 +782,9 @@ class RunQueue:
fulldeptree = False
else:
fulldeptree = True
+ stampwhitelist = []
+ if self.stamppolicy == "whitelist":
+ stampwhitelist = self.stampfnwhitelist
fn = self.taskData.fn_index[self.runq_fnid[task]]
taskname = self.runq_task[task]
@@ -785,7 +804,7 @@ class RunQueue:
fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
taskname2 = self.runq_task[dep]
stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
- if fulldeptree or fn == fn2:
+ if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
try:
t2 = os.stat(stampfile2)[stat.ST_MTIME]
if t1 < t2: