diff options
author | Markus Lehtonen <markus.lehtonen@linux.intel.com> | 2017-01-27 16:54:53 +0200 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2017-02-15 20:06:27 -0800 |
commit | e004664287ec03e7367a7bf553d9a3038444e82e (patch) | |
tree | 85cb3040ba7cc312c63b336c34548eee93c9822b /meta/lib | |
parent | 515f4c129ad27e9502621738d0bf7f5552627f19 (diff) | |
download | openembedded-core-e004664287ec03e7367a7bf553d9a3038444e82e.tar.gz openembedded-core-e004664287ec03e7367a7bf553d9a3038444e82e.tar.bz2 openembedded-core-e004664287ec03e7367a7bf553d9a3038444e82e.zip |
oeqa/buildperf: don't archive stdout/stderr of commands
Stop capturing output of the shell commands into <test>/commands.log.
Redirecting output into a file prevented the unittest framework from
capturing it, causing useless errors (with empty output) like:
oeqa.utils.CommandError: Command '['bitbake', 'core-image-sato']'
returned non-zero exit status 1 with output:
In general, the console output of commands is only interesting when
something fails. Also, dropping the commands.log file is a huge saving
in disk space, and thus, repository size when results are archived in
Git.
Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com>
Signed-off-by: Ross Burton <ross.burton@intel.com>
Diffstat (limited to 'meta/lib')
-rw-r--r-- | meta/lib/oeqa/buildperf/base.py | 29 | ||||
-rw-r--r-- | meta/lib/oeqa/buildperf/test_basic.py | 10 |
2 files changed, 14 insertions, 25 deletions
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py index 975524c6de..dd473a0bdc 100644 --- a/meta/lib/oeqa/buildperf/base.py +++ b/meta/lib/oeqa/buildperf/base.py @@ -163,8 +163,6 @@ class BuildPerfTestResult(unittest.TextTestResult): ('status', status), ('start_time', test.start_time), ('elapsed_time', test.elapsed_time), - ('cmd_log_file', os.path.relpath(test.cmd_log_file, - self.out_dir)), ('measurements', test.measurements)]) if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'): test_result['message'] = str(test.err[1]) @@ -268,18 +266,13 @@ class BuildPerfTestCase(unittest.TestCase): def out_dir(self): return os.path.join(self.base_dir, self.name) - @property - def cmd_log_file(self): - return os.path.join(self.out_dir, 'commands.log') - def shortDescription(self): return super(BuildPerfTestCase, self).shortDescription() or "" def setUp(self): """Set-up fixture for each test""" if self.build_target: - self.log_cmd_output(['bitbake', self.build_target, - '-c', 'fetchall']) + self.run_cmd(['bitbake', self.build_target, '-c', 'fetchall']) def run(self, *args, **kwargs): """Run test""" @@ -287,13 +280,12 @@ class BuildPerfTestCase(unittest.TestCase): super(BuildPerfTestCase, self).run(*args, **kwargs) self.elapsed_time = datetime.now() - self.start_time - def log_cmd_output(self, cmd): - """Run a command and log it's output""" + def run_cmd(self, cmd): + """Convenience method for running a command""" cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd) log.info("Logging command: %s", cmd_str) try: - with open(self.cmd_log_file, 'a') as fobj: - runCmd2(cmd, stdout=fobj) + runCmd2(cmd) except CommandError as err: log.error("Command failed: %s", err.retcode) raise @@ -338,17 +330,14 @@ class BuildPerfTestCase(unittest.TestCase): log.info("Timing command: %s", cmd_str) data_q = SimpleQueue() try: - with open(self.cmd_log_file, 'a') as fobj: - proc = Process(target=_worker, args=(data_q, cmd,), - kwargs={'stdout': fobj}) - proc.start() - data = data_q.get() - proc.join() + proc = Process(target=_worker, args=(data_q, cmd,)) + proc.start() + data = data_q.get() + proc.join() if isinstance(data, Exception): raise data except CommandError: - log.error("Command '%s' failed, see %s for more details", cmd_str, - self.cmd_log_file) + log.error("Command '%s' failed", cmd_str) raise etime = data['elapsed_time'] diff --git a/meta/lib/oeqa/buildperf/test_basic.py b/meta/lib/oeqa/buildperf/test_basic.py index 47118306b7..1333407a65 100644 --- a/meta/lib/oeqa/buildperf/test_basic.py +++ b/meta/lib/oeqa/buildperf/test_basic.py @@ -38,8 +38,8 @@ class Test1P2(BuildPerfTestCase): def test12(self): """Build virtual/kernel""" # Build and cleans state in order to get all dependencies pre-built - self.log_cmd_output(['bitbake', self.build_target]) - self.log_cmd_output(['bitbake', self.build_target, '-c', 'cleansstate']) + self.run_cmd(['bitbake', self.build_target]) + self.run_cmd(['bitbake', self.build_target, '-c', 'cleansstate']) self.sync() self.measure_cmd_resources(['bitbake', self.build_target], 'build', @@ -74,7 +74,7 @@ class Test2(BuildPerfTestCase): def test2(self): """Run core-image-sato do_rootfs with sstate""" # Build once in order to populate sstate cache - self.log_cmd_output(['bitbake', self.build_target]) + self.run_cmd(['bitbake', self.build_target]) self.rm_tmp() self.rm_cache() @@ -106,8 +106,8 @@ class Test4(BuildPerfTestCase): def test4(self): """eSDK metrics""" - self.log_cmd_output("bitbake {} -c do_populate_sdk_ext".format( - self.build_target)) + self.run_cmd(['bitbake', '-c', 'do_populate_sdk_ext', + self.build_target]) self.bb_vars = get_bb_vars(None, self.build_target) tmp_dir = self.bb_vars['TMPDIR'] installer = os.path.join( |