diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-16 18:13:00 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-03-24 16:53:05 +0000 |
commit | b4513e75f746a0989b09ee53cb85e489d41e5783 (patch) | |
tree | 707bf879c20f741e94de7f54bc43f45079b63595 /scripts/lib/resulttool/resultutils.py | |
parent | 07054cc2691fd2822028a3fd55185af457f79ebf (diff) | |
download | openembedded-core-b4513e75f746a0989b09ee53cb85e489d41e5783.tar.gz openembedded-core-b4513e75f746a0989b09ee53cb85e489d41e5783.tar.bz2 openembedded-core-b4513e75f746a0989b09ee53cb85e489d41e5783.zip |
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:
* Avoid subprocess.run() as its a python 3.6 feature and we
have autobuilder workers with 3.5.
* Avoid python keywords as variable names
* Simplify dict accesses using .get()
* Rename resultsutils -> resultutils to match the resultstool ->
resulttool rename
* Formalised the handling of "file_name" to "TESTSERIES" which the code
will now add into the json configuration data if its not present, based
on the directory name.
* When we don't have failed test cases, print something saying so
instead of an empty table
* Tweak the table headers in the report to be more readable (reference
"Test Series" instead if file_id and ID instead of results_id)
* Improve/simplify the max string length handling
* Merge the counts and percentage data into one table in the report
since printing two reports of the same data confuses the user
* Removed the confusing header in the regression report
* Show matches, then regressions, then unmatched runs in the regression
report, also remove chatting unneeded output
* Try harder to "pair" up matching configurations to reduce noise in
the regressions report
* Abstracted the "mapping" table concept used to pairing in the
regression code to general code in resultutils
* Created multiple mappings for results analysis, results storage and
'flattening' results data in a merge
* Simplify the merge command to take a source and a destination,
letting the destination be a directory or a file, removing the need for
an output directory parameter
* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
mappings
* Have the store command place the testresults files in a layout from
the mapping, making commits into the git repo for results storage more
useful for simple comparison purposes
* Set the oe-git-archive tag format appropriately for oeqa results
storage (and simplify the commit messages closer to their defaults)
* Fix oe-git-archive to use the commit/branch data from the results file
* Cleaned up the command option help to match other changes
* Follow the model of git branch/tag processing used by oe-build-perf-report
and use that to read the data using git show to avoid branch change
* Add ptest summary to the report command
* Update the tests to match the above changes
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool/resultutils.py')
-rw-r--r-- | scripts/lib/resulttool/resultutils.py | 127 |
1 files changed, 127 insertions, 0 deletions
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py new file mode 100644 index 0000000000..06cceef796 --- /dev/null +++ b/scripts/lib/resulttool/resultutils.py @@ -0,0 +1,127 @@ +# resulttool - common library/utility functions +# +# Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +import os +import json +import scriptpath +scriptpath.add_oe_lib_path() + +flatten_map = { + "oeselftest": [], + "runtime": [], + "sdk": [], + "sdkext": [] +} +regression_map = { + "oeselftest": ['TEST_TYPE', 'MACHINE'], + "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'], + "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], + "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'] +} +store_map = { + "oeselftest": ['TEST_TYPE'], + "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], + "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], + "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'] +} + +# +# Load the json file and append the results data into the provided results dict +# +def append_resultsdata(results, f, configmap=store_map): + if type(f) is str: + with open(f, "r") as filedata: + data = json.load(filedata) + else: + data = f + for res in data: + if "configuration" not in data[res] or "result" not in data[res]: + raise ValueError("Test results data without configuration or result section?") + if "TESTSERIES" not in data[res]["configuration"]: + data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f)) + testtype = data[res]["configuration"].get("TEST_TYPE") + if testtype not in configmap: + raise ValueError("Unknown test type %s" % testtype) + configvars = configmap[testtype] + testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype]) + if testpath not in results: + results[testpath] = {} + if 'ptestresult.rawlogs' in data[res]['result']: + del data[res]['result']['ptestresult.rawlogs'] + if 'ptestresult.sections' in data[res]['result']: + for i in data[res]['result']['ptestresult.sections']: + del data[res]['result']['ptestresult.sections'][i]['log'] + results[testpath][res] = data[res] + +# +# Walk a directory and find/load results data +# or load directly from a file +# +def load_resultsdata(source, configmap=store_map): + results = {} + if os.path.isfile(source): + append_resultsdata(results, source, configmap) + return results + for root, dirs, files in os.walk(source): + for name in files: + f = os.path.join(root, name) + if name == "testresults.json": + append_resultsdata(results, f, configmap) + return results + +def filter_resultsdata(results, resultid): + newresults = {} + for r in results: + for i in results[r]: + if i == resultsid: + newresults[r] = {} + newresults[r][i] = results[r][i] + return newresults + +def save_resultsdata(results, destdir, fn="testresults.json"): + for res in results: + if res: + dst = destdir + "/" + res + "/" + fn + else: + dst = destdir + "/" + fn + os.makedirs(os.path.dirname(dst), exist_ok=True) + with open(dst, 'w') as f: + f.write(json.dumps(results[res], sort_keys=True, indent=4)) + +def git_get_result(repo, tags): + git_objs = [] + for tag in tags: + files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines() + git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")]) + + def parse_json_stream(data): + """Parse multiple concatenated JSON objects""" + objs = [] + json_d = "" + for line in data.splitlines(): + if line == '}{': + json_d += '}' + objs.append(json.loads(json_d)) + json_d = '{' + else: + json_d += line + objs.append(json.loads(json_d)) + return objs + + # Optimize by reading all data with one git command + results = {} + for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])): + append_resultsdata(results, obj) + + return results |