diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2018-07-11 11:46:01 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2018-07-13 16:34:30 +0100 |
commit | 4374c296d8963e4f6a1aa7bef7983ad0a1c2fcff (patch) | |
tree | f3776121e458be589e8314ddf2752ac5223b3a94 /meta | |
parent | a910d90dc18f9bc63142ccae2eeadc1feefc756b (diff) | |
download | openembedded-core-4374c296d8963e4f6a1aa7bef7983ad0a1c2fcff.tar.gz openembedded-core-4374c296d8963e4f6a1aa7bef7983ad0a1c2fcff.tar.bz2 openembedded-core-4374c296d8963e4f6a1aa7bef7983ad0a1c2fcff.zip |
oeqa/runner: Ensure we don't print misleading results output
The current code assumes if something isn't a failure of some
kind, it was a pass. When test case IDs weren't matching, this lead
to very confusing output where things would fail, then be listed as
passing.
This adds code to track successes, ensuring we don't end up in this
position again with unmatched entries being listed as UNKNOWN.
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta')
-rw-r--r-- | meta/lib/oeqa/core/runner.py | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py index 16345fab2e..374d30cc38 100644 --- a/meta/lib/oeqa/core/runner.py +++ b/meta/lib/oeqa/core/runner.py @@ -42,6 +42,8 @@ class OETestResult(_TestResult): def __init__(self, tc, *args, **kwargs): super(OETestResult, self).__init__(*args, **kwargs) + self.successes = [] + self.tc = tc self._tc_map_results() @@ -58,6 +60,7 @@ class OETestResult(_TestResult): self.tc._results['errors'] = self.errors self.tc._results['skipped'] = self.skipped self.tc._results['expectedFailures'] = self.expectedFailures + self.tc._results['successes'] = self.successes def logSummary(self, component, context_msg=''): elapsed_time = self.tc._run_end_time - self.tc._run_start_time @@ -115,13 +118,18 @@ class OETestResult(_TestResult): return (found, None) + def addSuccess(self, test): + #Added so we can keep track of successes too + self.successes.append((test, None)) + super(OETestResult, self).addSuccess(test) + def logDetails(self): self.tc.logger.info("RESULTS:") for case_name in self.tc._registry['cases']: case = self.tc._registry['cases'][case_name] - result_types = ['failures', 'errors', 'skipped', 'expectedFailures'] - result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL'] + result_types = ['failures', 'errors', 'skipped', 'expectedFailures', 'successes'] + result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL', 'PASSED'] fail = False desc = None @@ -143,7 +151,7 @@ class OETestResult(_TestResult): oeid, desc)) else: self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(), - oeid, 'PASSED')) + oeid, 'UNKNOWN')) class OEListTestsResult(object): def wasSuccessful(self): |