summaryrefslogtreecommitdiff
path: root/meta
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2015-06-01 22:16:17 +0100
committerRichard Purdie <richard.purdie@linuxfoundation.org>2015-06-01 22:18:14 +0100
commit77c4865bbde4cd2a061cf333f9ad798afc6de0ef (patch)
treed08497c647e223c2a05b46576cfee3bb0cc0fa09 /meta
parent44ae778fefca5112900b870be7a485360c50bc2e (diff)
downloadopenembedded-core-77c4865bbde4cd2a061cf333f9ad798afc6de0ef.tar.gz
openembedded-core-77c4865bbde4cd2a061cf333f9ad798afc6de0ef.tar.bz2
openembedded-core-77c4865bbde4cd2a061cf333f9ad798afc6de0ef.zip
sstate: Parallelise checkstatus calls for sstate mirror
Currently the urls are checked serially which is a performance bottleneck when looking at http:// urls in particular. This adds code to check the url status in parallel, mirroring the way we do this elsewhere. We need the datastore for the fetcher so we use threads, not multiprocess. Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta')
-rw-r--r--meta/classes/sstate.bbclass28
1 files changed, 20 insertions, 8 deletions
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index b48504429f..de3519a69e 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -715,20 +715,16 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
localdata.delVar('BB_NO_NETWORK')
- for task in range(len(sq_fn)):
- if task in ret:
- continue
-
- spec, extrapath, tname = getpathcomponents(task, d)
-
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+ def checkstatus(arg):
+ (task, sstatefile) = arg
+ localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
localdata.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
try:
- fetcher = bb.fetch2.Fetch(srcuri.split(), localdata)
+ fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
ret.append(task)
@@ -739,6 +735,22 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
pass
+ tasklist = []
+ for task in range(len(sq_fn)):
+ if task in ret:
+ continue
+ spec, extrapath, tname = getpathcomponents(task, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+ tasklist.append((task, sstatefile))
+
+ if tasklist:
+ import multiprocessing
+ nproc = min(multiprocessing.cpu_count(), len(tasklist))
+ pool = oe.utils.ThreadedPool(nproc)
+ for t in tasklist:
+ pool.add_task(checkstatus, t)
+ pool.wait_completion()
+
inheritlist = d.getVar("INHERIT", True)
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};