summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Purdie <richard.purdie@linuxfoundation.org>2019-02-16 18:13:00 +0000
committerRichard Purdie <richard.purdie@linuxfoundation.org>2019-03-24 16:53:05 +0000
commitb4513e75f746a0989b09ee53cb85e489d41e5783 (patch)
tree707bf879c20f741e94de7f54bc43f45079b63595
parent07054cc2691fd2822028a3fd55185af457f79ebf (diff)
downloadopenembedded-core-b4513e75f746a0989b09ee53cb85e489d41e5783.tar.gz
openembedded-core-b4513e75f746a0989b09ee53cb85e489d41e5783.tar.bz2
openembedded-core-b4513e75f746a0989b09ee53cb85e489d41e5783.zip
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I made to resulttool: * Avoid subprocess.run() as its a python 3.6 feature and we have autobuilder workers with 3.5. * Avoid python keywords as variable names * Simplify dict accesses using .get() * Rename resultsutils -> resultutils to match the resultstool -> resulttool rename * Formalised the handling of "file_name" to "TESTSERIES" which the code will now add into the json configuration data if its not present, based on the directory name. * When we don't have failed test cases, print something saying so instead of an empty table * Tweak the table headers in the report to be more readable (reference "Test Series" instead if file_id and ID instead of results_id) * Improve/simplify the max string length handling * Merge the counts and percentage data into one table in the report since printing two reports of the same data confuses the user * Removed the confusing header in the regression report * Show matches, then regressions, then unmatched runs in the regression report, also remove chatting unneeded output * Try harder to "pair" up matching configurations to reduce noise in the regressions report * Abstracted the "mapping" table concept used to pairing in the regression code to general code in resultutils * Created multiple mappings for results analysis, results storage and 'flattening' results data in a merge * Simplify the merge command to take a source and a destination, letting the destination be a directory or a file, removing the need for an output directory parameter * Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression mappings * Have the store command place the testresults files in a layout from the mapping, making commits into the git repo for results storage more useful for simple comparison purposes * Set the oe-git-archive tag format appropriately for oeqa results storage (and simplify the commit messages closer to their defaults) * Fix oe-git-archive to use the commit/branch data from the results file * Cleaned up the command option help to match other changes * Follow the model of git branch/tag processing used by oe-build-perf-report and use that to read the data using git show to avoid branch change * Add ptest summary to the report command * Update the tests to match the above changes Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-rw-r--r--meta/lib/oeqa/selftest/cases/resulttooltests.py106
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py8
-rw-r--r--scripts/lib/resulttool/merge.py69
-rw-r--r--scripts/lib/resulttool/regression.py328
-rw-r--r--scripts/lib/resulttool/report.py157
-rw-r--r--scripts/lib/resulttool/resultsutils.py67
-rw-r--r--scripts/lib/resulttool/resultutils.py127
-rw-r--r--scripts/lib/resulttool/store.py136
-rw-r--r--scripts/lib/resulttool/template/test_report_full_text.txt33
-rwxr-xr-xscripts/resulttool7
10 files changed, 530 insertions, 508 deletions
diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py
index 7bf1ec60c1..0a089c0b7f 100644
--- a/meta/lib/oeqa/selftest/cases/resulttooltests.py
+++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py
@@ -4,13 +4,46 @@ basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../')
lib_path = basepath + '/scripts/lib'
sys.path = sys.path + [lib_path]
from resulttool.report import ResultsTextReport
-from resulttool.regression import ResultsRegressionSelector, ResultsRegression
-from resulttool.merge import ResultsMerge
-from resulttool.store import ResultsGitStore
-from resulttool.resultsutils import checkout_git_dir
+from resulttool import regression as regression
+from resulttool import resultutils as resultutils
from oeqa.selftest.case import OESelftestTestCase
class ResultToolTests(OESelftestTestCase):
+ base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'base_result2': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86-64"},
+ 'result': {}}}
+ target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'target_result2': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86"},
+ 'result': {}},
+ 'target_result3': {'configuration': {"TEST_TYPE": "runtime",
+ "TESTSERIES": "series1",
+ "IMAGE_BASENAME": "image",
+ "IMAGE_PKGTYPE": "ipk",
+ "DISTRO": "mydistro",
+ "MACHINE": "qemux86-64"},
+ 'result': {}}}
def test_report_can_aggregate_test_result(self):
result_data = {'result': {'test1': {'status': 'PASSED'},
@@ -25,23 +58,12 @@ class ResultToolTests(OESelftestTestCase):
self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped'])
def test_regression_can_get_regression_base_target_pair(self):
- base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7"}},
- 'base_result2': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7",
- "MACHINE": "qemux86-64"}}}
- target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7"}},
- 'target_result2': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7",
- "MACHINE": "qemux86"}},
- 'target_result3': {'configuration': {"TEST_TYPE": "oeselftest",
- "HOST": "centos-7",
- "MACHINE": "qemux86-64"}}}
- regression = ResultsRegressionSelector()
- pair = regression.get_regression_base_target_pair(self.logger, base_results_data, target_results_data)
- self.assertTrue('target_result1' in pair['base_result1'], msg="Pair not correct:%s" % pair['base_result1'])
- self.assertTrue('target_result3' in pair['base_result2'], msg="Pair not correct:%s" % pair['base_result2'])
+
+ results = {}
+ resultutils.append_resultsdata(results, ResultToolTests.base_results_data)
+ resultutils.append_resultsdata(results, ResultToolTests.target_results_data)
+ self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results)
+ self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results)
def test_regrresion_can_get_regression_result(self):
base_result_data = {'result': {'test1': {'status': 'PASSED'},
@@ -54,8 +76,7 @@ class ResultToolTests(OESelftestTestCase):
'test3': {'status': 'PASSED'},
'test4': {'status': 'ERROR'},
'test5': {'status': 'SKIPPED'}}}
- regression = ResultsRegression()
- result = regression.get_regression_result(self.logger, base_result_data, target_result_data)
+ result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data)
self.assertTrue(result['test2']['base'] == 'PASSED',
msg="regression not correct:%s" % result['test2']['base'])
self.assertTrue(result['test2']['target'] == 'FAILED',
@@ -66,39 +87,8 @@ class ResultToolTests(OESelftestTestCase):
msg="regression not correct:%s" % result['test3']['target'])
def test_merge_can_merged_results(self):
- base_results_data = {'base_result1': {},
- 'base_result2': {}}
- target_results_data = {'target_result1': {},
- 'target_result2': {},
- 'target_result3': {}}
-
- merge = ResultsMerge()
- results = merge.merge_results(base_results_data, target_results_data)
- self.assertTrue(len(results.keys()) == 5, msg="merge not correct:%s" % len(results.keys()))
-
- def test_store_can_store_to_new_git_repository(self):
- basepath = os.path.abspath(os.path.dirname(__file__) + '/../../')
- source_dir = basepath + '/files/testresults'
- git_branch = 'qa-cycle-2.7'
- store = ResultsGitStore()
- output_dir = store.store_to_new(self.logger, source_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to new git repository failed:%s" %
- output_dir)
- store._remove_temporary_workspace_dir(output_dir)
+ results = {}
+ resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map)
+ self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results))
- def test_store_can_store_to_existing(self):
- basepath = os.path.abspath(os.path.dirname(__file__) + '/../../')
- source_dir = basepath + '/files/testresults'
- git_branch = 'qa-cycle-2.6'
- store = ResultsGitStore()
- output_dir = store.store_to_new(self.logger, source_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to new git repository failed:%s" %
- output_dir)
- git_branch = 'qa-cycle-2.7'
- output_dir = store.store_to_existing_with_new_branch(self.logger, source_dir, output_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to existing git repository failed:%s" %
- output_dir)
- output_dir = store.store_to_existing(self.logger, source_dir, output_dir, git_branch)
- self.assertTrue(checkout_git_dir(output_dir, git_branch), msg="store to existing git repository failed:%s" %
- output_dir)
- store._remove_temporary_workspace_dir(output_dir)
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
index 64ec581a9f..ecdc4e7a7a 100755
--- a/scripts/lib/resulttool/manualexecution.py
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -18,7 +18,11 @@ import sys
import datetime
import re
from oeqa.core.runner import OETestResultJSONHelper
-from resulttool.resultsutils import load_json_file
+
+def load_json_file(file):
+ with open(file, "r") as f:
+ return json.load(f)
+
class ManualTestRunner(object):
def __init__(self):
@@ -134,4 +138,4 @@ def register_commands(subparsers):
description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
group='manualexecution')
parser_build.set_defaults(func=manualexecution)
- parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') \ No newline at end of file
+ parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py
index 1d9cfafd41..3e4b7a38ad 100644
--- a/scripts/lib/resulttool/merge.py
+++ b/scripts/lib/resulttool/merge.py
@@ -1,6 +1,7 @@
-# test result tool - merge multiple testresults.json files
+# resulttool - merge multiple testresults.json files into a file or directory
#
# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -11,61 +12,31 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-from resulttool.resultsutils import load_json_file, get_dict_value, dump_json_data
import os
import json
-
-class ResultsMerge(object):
-
- def get_test_results(self, logger, file, result_id):
- results = load_json_file(file)
- if result_id:
- result = get_dict_value(logger, results, result_id)
- if result:
- return {result_id: result}
- return result
- return results
-
- def merge_results(self, base_results, target_results):
- for k in target_results:
- base_results[k] = target_results[k]
- return base_results
-
- def _get_write_dir(self):
- basepath = os.environ['BUILDDIR']
- return basepath + '/tmp/'
-
- def dump_merged_results(self, results, output_dir):
- file_output_dir = output_dir if output_dir else self._get_write_dir()
- dump_json_data(file_output_dir, 'testresults.json', results)
- print('Successfully merged results to: %s' % os.path.join(file_output_dir, 'testresults.json'))
-
- def run(self, logger, base_result_file, target_result_file, target_result_id, output_dir):
- base_results = self.get_test_results(logger, base_result_file, '')
- target_results = self.get_test_results(logger, target_result_file, target_result_id)
- if base_results and target_results:
- merged_results = self.merge_results(base_results, target_results)
- self.dump_merged_results(merged_results, output_dir)
+import resulttool.resultutils as resultutils
def merge(args, logger):
- merge = ResultsMerge()
- merge.run(logger, args.base_result_file, args.target_result_file, args.target_result_id, args.output_dir)
+ if os.path.isdir(args.target_results):
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ resultutils.save_resultsdata(results, args.target_results)
+ else:
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ if os.path.exists(args.target_results):
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
- parser_build = subparsers.add_parser('merge', help='merge test results',
- description='merge results from multiple files',
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories',
+ description='merge the results from multiple files/directories into the target file or directory',
group='setup')
parser_build.set_defaults(func=merge)
- parser_build.add_argument('base_result_file',
- help='base result file provide the base result set')
- parser_build.add_argument('target_result_file',
- help='target result file provide the target result set for merging into the '
- 'base result set')
- parser_build.add_argument('-t', '--target-result-id', default='',
- help='(optional) default merge all result sets available from target to base '
- 'unless specific target result id was provided')
- parser_build.add_argument('-o', '--output-dir', default='',
- help='(optional) default write merged results to <poky>/build/tmp/ unless specific '
- 'output directory was provided')
+ parser_build.add_argument('base_results',
+ help='the results file/directory to import')
+ parser_build.add_argument('target_results',
+ help='the target file or directory to merge the base_results with')
+
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index bee3fb011a..ff77332fa9 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -1,6 +1,7 @@
-# test result tool - regression analysis
+# resulttool - regression analysis
#
# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -11,171 +12,170 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-from resulttool.resultsutils import load_json_file, get_dict_value, pop_dict_element
+import resulttool.resultutils as resultutils
import json
-class ResultsRegressionSelector(object):
-
- def get_results_unique_configurations(self, logger, results):
- unique_configurations_map = {"oeselftest": ['TEST_TYPE', 'HOST_DISTRO', 'MACHINE'],
- "runtime": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE'],
- "sdk": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
- "sdkext": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE']}
- results_unique_configs = {}
- for k in results:
- result = results[k]
- result_configs = get_dict_value(logger, result, 'configuration')
- result_test_type = get_dict_value(logger, result_configs, 'TEST_TYPE')
- unique_configuration_keys = get_dict_value(logger, unique_configurations_map, result_test_type)
- result_unique_config = {}
- for ck in unique_configuration_keys:
- config_value = get_dict_value(logger, result_configs, ck)
- if config_value:
- result_unique_config[ck] = config_value
- results_unique_configs[k] = result_unique_config
- return results_unique_configs
-
- def get_regression_base_target_pair(self, logger, base_results, target_results):
- base_configs = self.get_results_unique_configurations(logger, base_results)
- logger.debug('Retrieved base configuration: config=%s' % base_configs)
- target_configs = self.get_results_unique_configurations(logger, target_results)
- logger.debug('Retrieved target configuration: config=%s' % target_configs)
- regression_pair = {}
- for bk in base_configs:
- base_config = base_configs[bk]
- for tk in target_configs:
- target_config = target_configs[tk]
- if base_config == target_config:
- if bk in regression_pair:
- regression_pair[bk].append(tk)
- else:
- regression_pair[bk] = [tk]
- return regression_pair
-
- def run_regression_with_regression_pairing(self, logger, regression_pair, base_results, target_results):
- regression = ResultsRegression()
- for base in regression_pair:
- for target in regression_pair[base]:
- print('Getting regression for base=%s target=%s' % (base, target))
- regression.run(logger, base_results[base], target_results[target])
-
-class ResultsRegression(object):
-
- def print_regression_result(self, result):
- if result:
- print('============================Start Regression============================')
- print('Only print regression if base status not equal target')
- print('<test case> : <base status> -> <target status>')
- print('========================================================================')
- for k in result:
- print(k, ':', result[k]['base'], '->', result[k]['target'])
- print('==============================End Regression==============================')
-
- def get_regression_result(self, logger, base_result, target_result):
- base_result = get_dict_value(logger, base_result, 'result')
- target_result = get_dict_value(logger, target_result, 'result')
- result = {}
- if base_result and target_result:
- logger.debug('Getting regression result')
- for k in base_result:
- base_testcase = base_result[k]
- base_status = get_dict_value(logger, base_testcase, 'status')
- if base_status:
- target_testcase = get_dict_value(logger, target_result, k)
- target_status = get_dict_value(logger, target_testcase, 'status')
- if base_status != target_status:
- result[k] = {'base': base_status, 'target': target_status}
- else:
- logger.error('Failed to retrieved base test case status: %s' % k)
- return result
-
- def run(self, logger, base_result, target_result):
- if base_result and target_result:
- result = self.get_regression_result(logger, base_result, target_result)
- logger.debug('Retrieved regression result =%s' % result)
- self.print_regression_result(result)
- else:
- logger.error('Input data objects must not be empty (base_result=%s, target_result=%s)' %
- (base_result, target_result))
-
-def get_results_from_directory(logger, source_dir):
- from resulttool.merge import ResultsMerge
- from resulttool.resultsutils import get_directory_files
- result_files = get_directory_files(source_dir, ['.git'], 'testresults.json')
- base_results = {}
- for file in result_files:
- merge = ResultsMerge()
- results = merge.get_test_results(logger, file, '')
- base_results = merge.merge_results(base_results, results)
- return base_results
-
-def remove_testcases_to_optimize_regression_runtime(logger, results):
- test_case_removal = ['ptestresult.rawlogs', 'ptestresult.sections']
- for r in test_case_removal:
- for k in results:
- result = get_dict_value(logger, results[k], 'result')
- pop_dict_element(logger, result, r)
-
-def regression_file(args, logger):
- base_results = load_json_file(args.base_result_file)
- print('Successfully loaded base test results from: %s' % args.base_result_file)
- target_results = load_json_file(args.target_result_file)
- print('Successfully loaded target test results from: %s' % args.target_result_file)
- remove_testcases_to_optimize_regression_runtime(logger, base_results)
- remove_testcases_to_optimize_regression_runtime(logger, target_results)
- if args.base_result_id and args.target_result_id:
- base_result = get_dict_value(logger, base_results, base_result_id)
- print('Getting base test result with result_id=%s' % base_result_id)
- target_result = get_dict_value(logger, target_results, target_result_id)
- print('Getting target test result with result_id=%s' % target_result_id)
- regression = ResultsRegression()
- regression.run(logger, base_result, target_result)
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+def compare_result(logger, base_name, target_name, base_result, target_result):
+ base_result = base_result.get('result')
+ target_result = target_result.get('result')
+ result = {}
+ if base_result and target_result:
+ for k in base_result:
+ base_testcase = base_result[k]
+ base_status = base_testcase.get('status')
+ if base_status:
+ target_testcase = target_result.get(k, {})
+ target_status = target_testcase.get('status')
+ if base_status != target_status:
+ result[k] = {'base': base_status, 'target': target_status}
+ else:
+ logger.error('Failed to retrieved base test case status: %s' % k)
+ if result:
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in result:
+ resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
else:
- regression = ResultsRegressionSelector()
- regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results)
- logger.debug('Retrieved regression pair=%s' % regression_pair)
- regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results)
- return 0
+ resultstring = "Match: %s\n %s" % (base_name, target_name)
+ return result, resultstring
+
+def get_results(logger, source):
+ return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+ base_results = get_results(logger, args.base_result)
+ target_results = get_results(logger, args.target_result)
+
+ regression_common(args, logger, base_results, target_results)
+
+def regression_common(args, logger, base_results, target_results):
+ if args.base_result_id:
+ base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+ if args.target_result_id:
+ target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+ matches = []
+ regressions = []
+ notfound = []
+
+ for a in base_results:
+ if a in target_results:
+ base = list(base_results[a].keys())
+ target = list(target_results[a].keys())
+ # We may have multiple base/targets which are for different configurations. Start by
+ # removing any pairs which match
+ for c in base.copy():
+ for b in target.copy():
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not res:
+ matches.append(resstr)
+ base.remove(c)
+ target.remove(b)
+ break
+ # Should only now see regressions, we may not be able to match multiple pairs directly
+ for c in base:
+ for b in target:
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if res:
+ regressions.append(resstr)
+ else:
+ notfound.append("%s not found in target" % a)
+ print("\n".join(matches))
+ print("\n".join(regressions))
+ print("\n".join(notfound))
-def regression_directory(args, logger):
- base_results = get_results_from_directory(logger, args.base_result_directory)
- target_results = get_results_from_directory(logger, args.target_result_directory)
- remove_testcases_to_optimize_regression_runtime(logger, base_results)
- remove_testcases_to_optimize_regression_runtime(logger, target_results)
- regression = ResultsRegressionSelector()
- regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results)
- logger.debug('Retrieved regression pair=%s' % regression_pair)
- regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results)
return 0
def regression_git(args, logger):
- from resulttool.resultsutils import checkout_git_dir
base_results = {}
target_results = {}
- if checkout_git_dir(args.source_dir, args.base_git_branch):
- base_results = get_results_from_directory(logger, args.source_dir)
- if checkout_git_dir(args.source_dir, args.target_git_branch):
- target_results = get_results_from_directory(logger, args.source_dir)
- if base_results and target_results:
- remove_testcases_to_optimize_regression_runtime(logger, base_results)
- remove_testcases_to_optimize_regression_runtime(logger, target_results)
- regression = ResultsRegressionSelector()
- regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results)
- logger.debug('Retrieved regression pair=%s' % regression_pair)
- regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results)
+
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(args.repo)
+
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+ if not len(revs2):
+ logger.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ logger.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ logger.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ logger.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+ base_results = resultutils.git_get_result(repo, revs[index1][2])
+ target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+ regression_common(args, logger, base_results, target_results)
+
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
- parser_build = subparsers.add_parser('regression-file', help='regression file analysis',
+
+ parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+ description='regression analysis comparing the base set of results to the target results',
+ group='analysis')
+ parser_build.set_defaults(func=regression)
+ parser_build.add_argument('base_result',
+ help='base result file/directory for the comparison')
+ parser_build.add_argument('target_result',
+ help='target result file/directory to compare with')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) filter the base results to this result ID')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) filter the target results to this result ID')
+
+ parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
description='regression analysis comparing base result set to target '
'result set',
group='analysis')
- parser_build.set_defaults(func=regression_file)
- parser_build.add_argument('base_result_file',
- help='base result file provide the base result set')
- parser_build.add_argument('target_result_file',
- help='target result file provide the target result set for comparison with base result')
+ parser_build.set_defaults(func=regression_git)
+ parser_build.add_argument('repo',
+ help='the git repository containing the data')
parser_build.add_argument('-b', '--base-result-id', default='',
help='(optional) default select regression based on configurations unless base result '
'id was provided')
@@ -183,26 +183,10 @@ def register_commands(subparsers):
help='(optional) default select regression based on configurations unless target result '
'id was provided')
- parser_build = subparsers.add_parser('regression-dir', help='regression directory analysis',
- description='regression analysis comparing base result set to target '
- 'result set',
- group='analysis')
- parser_build.set_defaults(func=regression_directory)
- parser_build.add_argument('base_result_directory',
- help='base result directory provide the files for base result set')
- parser_build.add_argument('target_result_directory',
- help='target result file provide the files for target result set for comparison with '
- 'base result')
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+ parser_build.add_argument('--commit', help="Revision to search for")
+ parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+ parser_build.add_argument('--commit2', help="Revision to compare with")
+ parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
- parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
- description='regression analysis comparing base result set to target '
- 'result set',
- group='analysis')
- parser_build.set_defaults(func=regression_git)
- parser_build.add_argument('source_dir',
- help='source directory that contain the git repository with test result files')
- parser_build.add_argument('base_git_branch',
- help='base git branch that provide the files for base result set')
- parser_build.add_argument('target_git_branch',
- help='target git branch that provide the files for target result set for comparison with '
- 'base result')
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index ab5de1f3a7..2f5ea308e2 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -1,6 +1,7 @@
# test result tool - report text based test results
#
# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -14,100 +15,120 @@
import os
import glob
import json
-from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.result_types = {'passed': ['PASSED', 'passed'],