From patchwork Thu Jan 8 11:36:44 2026 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Miroslav Cernak X-Patchwork-Id: 78277 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from aws-us-west-2-korg-lkml-1.web.codeaurora.org (localhost.localdomain [127.0.0.1]) by smtp.lore.kernel.org (Postfix) with ESMTP id 84967D185E4 for ; Thu, 8 Jan 2026 11:53:51 +0000 (UTC) Received: from mta-64-226.siemens.flowmailer.net (mta-64-226.siemens.flowmailer.net [185.136.64.226]) by mx.groups.io with SMTP id smtpd.msgproc01-g2.4765.1767872955432818615 for ; Thu, 08 Jan 2026 03:49:16 -0800 Authentication-Results: mx.groups.io; dkim=pass header.i=miroslav.cernak@siemens.com header.s=fm1 header.b=YjckMTgQ; spf=pass (domain: rts-flowmailer.siemens.com, ip: 185.136.64.226, mailfrom: fm-1333546-202601081139089a9aad104f000207e1-jckbtp@rts-flowmailer.siemens.com) Received: by mta-64-226.siemens.flowmailer.net with ESMTPSA id 202601081139089a9aad104f000207e1 for ; Thu, 08 Jan 2026 12:39:08 +0100 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; s=fm1; d=siemens.com; i=miroslav.cernak@siemens.com; h=Date:From:Subject:To:Message-ID:MIME-Version:Content-Type:Content-Transfer-Encoding:Cc:References:In-Reply-To; bh=yhBW+X0EH3BQTFBJkDXGm1nDstJt3G7UmxgprOCp3VU=; b=YjckMTgQ4GG5XXOZbAeQuxEP6KzSOL4cqbCGRqvTbkCilZge3ChzWOmN2vvol9f3l1Mhyf VGiWT3H+SHrfERxbmCirF+SSKv3TgMkJltp+vnO72gntbApvqPc0ObHD+zQm15WiHNAEqErJ 27xxdKPu/ym+w0h/q7DRdrv0fiHc1zoeHInnTjU4VU2SMefG4zfqQQ5tn8GJOG6u7bRVk+nK ErFnIVIx2K3JiNTcLUGuQp6sWrboZnhwQ8+tr0KOz1kodt1Wxubq8qJ+WGgB2r541IDKPl2z I60b94b22AyXfpGwT1YN3F8YvzHNQLHkwTQ7ituu17RdCNVtoWAT4V5A==; From: Miroslav Cernak To: openembedded-core@lists.openembedded.org Cc: adrian.freihofer@siemens.com, Miroslav Cernak Subject: [PATCH 1/4] resulttool: junit support ptests Date: Thu, 8 Jan 2026 12:36:44 +0100 Message-Id: <20260108113647.56663-2-miroslav.cernak@siemens.com> In-Reply-To: <20260108113647.56663-1-miroslav.cernak@siemens.com> References: <20260108113647.56663-1-miroslav.cernak@siemens.com> MIME-Version: 1.0 X-Flowmailer-Platform: Siemens Feedback-ID: 519:519-1333546:519-21489:flowmailer List-Id: X-Webhook-Received: from 45-33-107-173.ip.linodeusercontent.com [45.33.107.173] by aws-us-west-2-korg-lkml-1.web.codeaurora.org with HTTPS for ; Thu, 08 Jan 2026 11:53:51 -0000 X-Groupsio-URL: https://lists.openembedded.org/g/openembedded-core/message/229064 From: Adrian Freihofer Support image tests and ptest. Therefore some additional hirarchies get introduced to the junit.xml file. Ptest log files can be written to a file per ptest and referred from junit.xml as ATTACHMENT or the log message can be inline in the system-out element. The following is an example generated with --attach-log-files parameter. [[ATTACHMENT|ptest-logs/foo.log]] This works at least with GitLab's junit parser. With GitLab inline system-out tags are not (yet?) fully supported by GitLab. The details are discussed here: https://gitlab.com/gitlab-org/gitlab/-/issues/440341 Support ptests - Handle ptestresult.sections A log file per ptest is generated. This allows to make the detailed ptest log available via file attribute of the junit's testcase class. - Skip ptestresults items. - Rendering all ptestresult sections quickly leads to overloaded test reports with almost no information (only test status per test). Handling the ptestresult.sections instead looks more useful especially for larger test pipelines. - This fixes a crash because they do not have a duration atribute. Improve the command line parser. Calling just resulttool junit from a bitbake environment finds the testresults.json in $BUILDDIR and writes the corresponding junit.xml into the same directory. Signed-off-by: Adrian Freihofer Signed-off-by: Miroslav Cernak --- scripts/lib/resulttool/junit.py | 280 ++++++++++++++++++++++++++------ 1 file changed, 228 insertions(+), 52 deletions(-) diff --git a/scripts/lib/resulttool/junit.py b/scripts/lib/resulttool/junit.py index c7a53dc550..c4476f1e59 100644 --- a/scripts/lib/resulttool/junit.py +++ b/scripts/lib/resulttool/junit.py @@ -6,72 +6,248 @@ # import os -import re import xml.etree.ElementTree as ET import resulttool.resultutils as resultutils -def junit(args, logger): - testresults = resultutils.load_resultsdata(args.json_file, configmap=resultutils.store_map) - total_time = 0 - skipped = 0 - failures = 0 - errors = 0 +DEFAULT_JUNIT_FILE = "junit.xml" + + +class PtestSummary: + """Collected infromation of one ptest suite + + Collect the information of many ptests of a ptest suite such as ptestresults.APtest.test_foo1, + ptestresults.APtest.test_foo2, ... as one testcase APtest. This can be merged into information + from ptestresult.sections. + """ + def __init__(self): + self.tests = 0 + self.ERROR = [] + self.FAILED = [] + self.SKIPPED = [] + + def add_status(self, ptest_name, status): + self.tests += 1 + if status == "FAILED": + self.FAILED.append(ptest_name) + elif status == "ERROR": + self.ERROR.append(ptest_name) + elif status == "SKIPPED": + self.SKIPPED.append(ptest_name) + + @property + def status(self): + """Normalize the status of many ptests to one status of the ptest suite""" + if len(self.ERROR) > 0: + return "ERROR" + if len(self.FAILED) > 0: + return "FAILED" + if len(self.SKIPPED) == self.tests: + return "SKIPPED" + return "SUCCESS" + + @property + def log_summary(self): + """Return a summary of the ptest suite""" + summary_str = "ERROR:" + os.linesep + summary_str += os.linesep.join([s + "- " for s in self.ERROR]) + os.linesep + summary_str = "FAILED:" + os.linesep + summary_str += os.linesep.join([s + "- " for s in self.FAILED]) + os.linesep + summary_str = "SKIPPED:" + os.linesep + summary_str += os.linesep.join([s + "- " for s in self.SKIPPED]) + os.linesep + return summary_str + - for tests in testresults.values(): - results = tests[next(reversed(tests))].get("result", {}) +def create_testcase(testsuite, testcase_dict, status, status_message, status_text=None, system_out=None): + """Create a junit testcase node""" + testcase_node = ET.SubElement(testsuite, "testcase", testcase_dict) - for result_id, result in results.items(): - # filter out ptestresult.rawlogs and ptestresult.sections - if re.search(r'\.test_', result_id): - total_time += result.get("duration", 0) + se = None + if status == "SKIPPED": + se = ET.SubElement(testcase_node, "skipped", message=status_message) + elif status == "FAILED": + se = ET.SubElement(testcase_node, "failure", message=status_message) + elif status == "ERROR": + se = ET.SubElement(testcase_node, "error", message=status_message) + if se and status_text: + se.text = status_text - if result['status'] == "FAILED": - failures += 1 - elif result['status'] == "ERROR": - errors += 1 - elif result['status'] == "SKIPPED": - skipped += 1 + if system_out: + ET.SubElement(testcase_node, "system-out").text = system_out + +def junit_tree(testresults, test_log_dir=None): + """Create a JUnit XML tree from testresults + + Generates a tuple of the XML tree and a dictionary of ptest log files. + The dictionary contains the path where the log file is located as key and the log content as value. + The log file path is test_log_dir/ptest_name.log. + """ + test_logfiles = {} testsuites_node = ET.Element("testsuites") - testsuites_node.set("time", "%s" % total_time) - testsuite_node = ET.SubElement(testsuites_node, "testsuite") - testsuite_node.set("name", "Testimage") - testsuite_node.set("time", "%s" % total_time) - testsuite_node.set("tests", "%s" % len(results)) - testsuite_node.set("failures", "%s" % failures) - testsuite_node.set("errors", "%s" % errors) - testsuite_node.set("skipped", "%s" % skipped) - - for result_id, result in results.items(): - if re.search(r'\.test_', result_id): - testcase_node = ET.SubElement(testsuite_node, "testcase", { - "name": result_id, - "classname": "Testimage", - "time": str(result['duration']) - }) - if result['status'] == "SKIPPED": - ET.SubElement(testcase_node, "skipped", message=result['log']) - elif result['status'] == "FAILED": - ET.SubElement(testcase_node, "failure", message=result['log']) - elif result['status'] == "ERROR": - ET.SubElement(testcase_node, "error", message=result['log']) + total_errors = total_failures = total_skipped = total_tests = total_time = 0 + + for _, run_name, _, results in resultutils.test_run_results(testresults): + test_run_testsuite = ET.SubElement(testsuites_node, "testsuite", name=run_name) + + # Handle all image tests but skip all ptests related sections + imagetest_testsuite = ET.SubElement(test_run_testsuite, "testsuite", name="Image Tests") + image_errors = image_failures = image_skipped = image_tests = image_total_time = 0 + + ptest_summarys = {} + + for result_id, result in results.items(): + if result_id.startswith("ptestresult.sections") or result_id.startswith("ptestresult.rawlogs"): + continue + + if result_id.startswith("ptestresult."): + ptest_name = result_id.split(".", 3)[1] + if ptest_name not in ptest_summarys: + ptest_summarys[ptest_name] = PtestSummary() + ptest_summarys[ptest_name].add_status(ptest_name, result["status"]) + else: + image_total_time += int(result["duration"]) + image_tests += 1 + status = result["status"] + if status == "FAILED": + image_failures += 1 + elif status == "ERROR": + image_errors += 1 + elif status == "SKIPPED": + image_skipped += 1 + + testcase_dict = { + "name": result_id, + "classname": "testimage", + "time": str(result["duration"]), + } + create_testcase( + imagetest_testsuite, + testcase_dict, + status, + result.get("log", None)) + imagetest_testsuite.set("errors", str(image_errors)) + imagetest_testsuite.set("failures", str(image_failures)) + imagetest_testsuite.set("skipped", str(image_skipped)) + imagetest_testsuite.set("tests", str(image_tests)) + imagetest_testsuite.set("time", str(image_total_time)) + + # Handle all ptest related sections + ptest_errors = ptest_failures = ptest_skipped = ptest_tests = ptest_total_time = 0 + if "ptestresult.sections" in results: + ptest_testsuite = ET.SubElement(test_run_testsuite, "testsuite", name="Package Tests") + + for ptest_name, result in results["ptestresult.sections"].items(): + testcase_dict = { + "name": ptest_name, + "classname": "ptest", + "time": str(result["duration"]), + } + + log = result.get("log") + system_out = None + if log: + if test_log_dir is not None: + test_log_file = os.path.join(test_log_dir, ptest_name + ".log") + system_out = f"[[ATTACHMENT|{test_log_file}]]" + test_logfiles[test_log_file] = log + else: + system_out = log + + create_testcase(ptest_testsuite, + testcase_dict, + ptest_summarys[ptest_name].status, + ptest_summarys[ptest_name].log_summary, + system_out=system_out) + + ptest_total_time += int(result["duration"]) + ptest_tests += 1 + status = ptest_summarys[ptest_name].status + if status == "FAILED": + ptest_failures += 1 + elif status == "ERROR": + ptest_errors += 1 + elif status == "SKIPPED": + ptest_skipped += 1 + + ptest_testsuite.set("errors", str(ptest_errors)) + ptest_testsuite.set("failures", str(ptest_failures)) + ptest_testsuite.set("skipped", str(ptest_skipped)) + ptest_testsuite.set("tests", str(ptest_tests)) + ptest_testsuite.set("time", str(ptest_total_time)) + + total_errors += image_errors + ptest_errors + total_failures += image_failures + ptest_failures + total_skipped += image_skipped + ptest_skipped + total_tests += image_tests + ptest_tests + total_time += image_total_time + ptest_total_time + + testsuites_node.set("errors", str(total_errors)) + testsuites_node.set("failures", str(total_failures)) + testsuites_node.set("skipped", str(total_skipped)) + testsuites_node.set("tests", str(total_tests)) + testsuites_node.set("time", str(total_time)) tree = ET.ElementTree(testsuites_node) + return tree, test_logfiles - if args.junit_xml_path is None: - args.junit_xml_path = os.environ['BUILDDIR'] + '/tmp/log/oeqa/junit.xml' - tree.write(args.junit_xml_path, encoding='UTF-8', xml_declaration=True) - logger.info('Saved JUnit XML report as %s' % args.junit_xml_path) +def junit(args, logger): + if args.junit_xml_path is not None: + junit_xml_path = args.junit_xml_path + else: + junit_xml_path = os.path.join(os.path.dirname(args.json_file), DEFAULT_JUNIT_FILE) + logger.debug("Generating JUnit XML report from %s" % args.json_file) + testresults = resultutils.load_resultsdata(args.json_file, configmap=resultutils.store_map) + + # dump ptest logs to a file in a subdirectory where the junit.xml is located + test_log_dir = None + if args.attach_log_files: + test_log_dir = "test-logs" + junit_dir_abs = os.path.dirname(os.path.abspath(junit_xml_path)) + test_log_dir_abs = os.path.join(junit_dir_abs, test_log_dir) + if not os.path.exists(test_log_dir_abs): + os.makedirs(test_log_dir_abs) + + tree, test_logfiles = junit_tree(testresults, test_log_dir) + + for test_logfile, log in test_logfiles.items(): + with open(os.path.join(junit_dir_abs, test_logfile), "w") as f: + f.write(log) + + tree.write(junit_xml_path, encoding="UTF-8", xml_declaration=True) + logger.info("Saved JUnit XML report as %s" % junit_xml_path) + def register_commands(subparsers): """Register subcommands from this plugin""" - parser_build = subparsers.add_parser('junit', help='create test report in JUnit XML format', - description='generate unit test report in JUnit XML format based on the latest test results in the testresults.json.', - group='analysis') + parser_build = subparsers.add_parser("junit", + help="create test report in JUnit XML format", + description="generate unit test report in JUnit XML format based on the latest test results in the testresults.json.", + group="analysis" + ) parser_build.set_defaults(func=junit) - parser_build.add_argument('json_file', - help='json file should point to the testresults.json') - parser_build.add_argument('-j', '--junit_xml_path', - help='junit xml path allows setting the path of the generated test report. The default location is /tmp/log/oeqa/junit.xml') + + # If BUILDDIR is set, use the default path for the testresults.json and junit.xml + # Otherwise require the user to provide the path to the testresults.json + help_json_file = "json file should point to the testresults.json" + help_junit_xml_path = "junit xml path allows setting the path of the generated test report." + try: + builddir = os.environ["BUILDDIR"] + except KeyError: + builddir = None + if builddir: + log_path = os.path.join(builddir, "tmp", "log", "oeqa") + parser_build.add_argument("json_file", nargs="?", + default=os.path.join(log_path, "testresults.json"), + help=help_json_file + " (default: %(default)s)") + parser_build.add_argument("-j", "--junit_xml_path", + default=os.path.join(log_path, DEFAULT_JUNIT_FILE), + help=help_junit_xml_path + " (default: %(default)s)") + else: + parser_build.add_argument("json_file", help=help_json_file) + parser_build.add_argument("-j", "--junit_xml_path", nargs="?", + help=help_junit_xml_path + " (default: junit.xml in the same folder as the testresults.json)") + + parser_build.add_argument("-a", "--attach-log-files", action="store_true", default=False, + help="Write the log files to subfolder in the same folder as the junit.xml and reference them in the junit.xml") From patchwork Thu Jan 8 11:36:45 2026 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Miroslav Cernak X-Patchwork-Id: 78275 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from aws-us-west-2-korg-lkml-1.web.codeaurora.org (localhost.localdomain [127.0.0.1]) by smtp.lore.kernel.org (Postfix) with ESMTP id AC6B9D185E2 for ; Thu, 8 Jan 2026 11:53:31 +0000 (UTC) Received: from mta-65-225.siemens.flowmailer.net (mta-65-225.siemens.flowmailer.net [185.136.65.225]) by mx.groups.io with SMTP id smtpd.msgproc01-g2.4764.1767872955432736513 for ; Thu, 08 Jan 2026 03:49:16 -0800 Authentication-Results: mx.groups.io; dkim=pass header.i=miroslav.cernak@siemens.com header.s=fm1 header.b=F25qWghS; spf=pass (domain: rts-flowmailer.siemens.com, ip: 185.136.65.225, mailfrom: fm-1333546-20260108113908177a9132a9000207de-rx1_vq@rts-flowmailer.siemens.com) Received: by mta-65-225.siemens.flowmailer.net with ESMTPSA id 20260108113908177a9132a9000207de for ; Thu, 08 Jan 2026 12:39:08 +0100 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; s=fm1; d=siemens.com; i=miroslav.cernak@siemens.com; h=Date:From:Subject:To:Message-ID:MIME-Version:Content-Type:Content-Transfer-Encoding:Cc:References:In-Reply-To; bh=lF9xvKbDqFTLbQpqjo0NHmTB9huwsPUJcs2cQcevS+o=; b=F25qWghS12K2nxvKuHUNVDPC6FE3QPkL4ITl/kEMUMTXGKBmxl9rg/TxhmtPU0oF7gmALt NIF/xzxhs8igldlBaoidhN6h/ZQCAmze2mh+ms7hN7JEmWEK1ktwXrkeTfDoR54n91miG6Pk 2w4Ktn0lxbSWLYptY9C9AigGpWK7ohXRWt9YQIP9iz1ACQ6gsANPfE+vqkRG7IsAaAdLssHc oDXwthvxvMukclSACtAnsD/cQB5WiAgsEyphSyLorYNaRB69WGTRz2JT7iA5U7xK0hDNv75i JxArcMmtttYmq59ey4cQVHidDdzelqMk8++gPi7Sdh8aZrlft3C6T9lg==; From: Miroslav Cernak To: openembedded-core@lists.openembedded.org Cc: adrian.freihofer@siemens.com, Miroslav Cernak Subject: [PATCH 2/4] oe-selftest: add resulttool junit test Date: Thu, 8 Jan 2026 12:36:45 +0100 Message-Id: <20260108113647.56663-3-miroslav.cernak@siemens.com> In-Reply-To: <20260108113647.56663-1-miroslav.cernak@siemens.com> References: <20260108113647.56663-1-miroslav.cernak@siemens.com> MIME-Version: 1.0 X-Flowmailer-Platform: Siemens Feedback-ID: 519:519-1333546:519-21489:flowmailer List-Id: X-Webhook-Received: from 45-33-107-173.ip.linodeusercontent.com [45.33.107.173] by aws-us-west-2-korg-lkml-1.web.codeaurora.org with HTTPS for ; Thu, 08 Jan 2026 11:53:31 -0000 X-Groupsio-URL: https://lists.openembedded.org/g/openembedded-core/message/229062 From: Adrian Freihofer Verify imagetests, and ptest with inlined as well as ptests with attached log files work as expected. Signed-off-by: Adrian Freihofer Signed-off-by: Miroslav Cernak --- .../oeqa/selftest/cases/resulttooltests.py | 179 ++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py index c3303f3fbb..aa90116795 100644 --- a/meta/lib/oeqa/selftest/cases/resulttooltests.py +++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py @@ -14,6 +14,12 @@ from resulttool import regression as regression from resulttool import resultutils as resultutils from oeqa.selftest.case import OESelftestTestCase +from resulttool.junit import junit_tree, PtestSummary +import xml.etree.ElementTree as ET +import logging +import json + + class ResultToolTests(OESelftestTestCase): base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime", "TESTSERIES": "series1", @@ -373,3 +379,176 @@ class ResultToolTests(OESelftestTestCase): self.logger, "A", "B", base_configuration["a"]["conf_X"], target_configuration["a"]["conf_Y"]) self.assertDictEqual( result, {}, msg=f"ptests should be compared: {resultstring}") + + @property + def _get_junit_testresults_1(self): + base_testresults = { + "a": { + "runtime_a-image": { + "configuration": {"TEST_TYPE": "runtime", "MACHINE": "qemux86"}, + "result": { + # Image test skipped + "ptest.PtestRunnerTest.test_ptestrunner_expectfail": { + "duration": 0, + "log": "Cannot run ptests with @expectedFailure as ptests are required to pass", + "status": "SKIPPED", + }, + # Image test passed + "ptest.PtestRunnerTest.test_ptestrunner_expectsuccess": { + "duration": 7, + "status": "PASSED", + }, + # Passed and skipped tests: passed + "ptestresult.package-passed.test_passed": {"status": "PASSED"}, + "ptestresult.package-passed.test_skipped": { + "status": "SKIPPED" + }, + # All tests are skipped: skipped + "ptestresult.package-skipped.test_skipped": { + "status": "SKIPPED" + }, + # One or more errors: error + "ptestresult.package-error.test_error": {"status": "ERROR"}, + "ptestresult.package-error.test_failed": {"status": "FAILED"}, + "ptestresult.package-error.test_skipped": {"status": "SKIPPED"}, + "ptestresult.package-error.test_passed": {"status": "PASSED"}, + # No error and one or more failed: failed + "ptestresult.package-failed.test_failed": {"status": "FAILED"}, + "ptestresult.package-failed.test_passed": {"status": "PASSED"}, + "ptestresult.sections": { + "package-passed": { + "duration": "2", + "log": "PASS: package-passed.test_passed\nPASS: package-passed.test_skipped\n", + }, + "package-skipped": { + "duration": "1", + "log": "SKIPPED: package-skipped.test_skipped\n", + }, + "package-error": { + "duration": "4", + "log": "ERROR: ERROR: package-error.test_error\nFAILED: package-error.test_failed\nSKIPPED: package-error.test_skipped\nPASSED: package-error.test_passed\n", + }, + "package-failed": { + "duration": "2", + "log": "FAILED: package-failed.test_failed\nPASS: package-failed.test_passed\n", + }, + }, + }, + } + } + } + return base_testresults + + def _dump_junit_tree(self, testresults, tree, files_name="junit"): + if self.logger.level <= logging.DEBUG: + junit_json_path = files_name + ".json" + with open(junit_json_path, "w") as f: + json.dump(testresults, f, indent=4) + self.logger.debug( + "Saved testresults json %s" % os.path.abspath(junit_json_path) + ) + junit_xml_path = files_name + ".xml" + tree.write(junit_xml_path, encoding="UTF-8", xml_declaration=True) + self.logger.debug( + "Saved JUnit XML report as %s" % os.path.abspath(junit_xml_path) + ) + + def _check_junit_testresults_1(self, testsuites_node): + self.assertEqual(testsuites_node.attrib["errors"], "1") + self.assertEqual(testsuites_node.attrib["failures"], "1") + self.assertEqual(testsuites_node.attrib["skipped"], "2") + self.assertEqual(testsuites_node.attrib["tests"], "6") + self.assertEqual(testsuites_node.attrib["time"], "16") + + testsuites = testsuites_node.findall("testsuite") + self.assertEqual(testsuites[0].attrib["name"], "runtime_a-image") + inner_testsuites = testsuites[0].findall("testsuite") + self.assertEqual(inner_testsuites[0].attrib["name"], "Image Tests") + self.assertEqual(inner_testsuites[1].attrib["name"], "Package Tests") + + ptests_suite = testsuites_node.find(".//testsuite[@name='Package Tests']") + testcases = ptests_suite.findall("testcase") + self.assertEqual(testcases[0].attrib["name"], "package-passed") + self.assertEqual(testcases[1].attrib["name"], "package-skipped") + self.assertEqual(testcases[2].attrib["name"], "package-error") + self.assertEqual(testcases[3].attrib["name"], "package-failed") + self.assertEqual(testcases[0].attrib["time"], "2") + self.assertEqual(testcases[1].attrib["time"], "1") + self.assertEqual(testcases[2].attrib["time"], "4") + self.assertEqual(testcases[3].attrib["time"], "2") + + def test_junit_log_inline(self): + testresults = self._get_junit_testresults_1 + tree, test_logfiles = junit_tree(testresults) + self._dump_junit_tree(testresults, tree) + testsuites_node = tree.getroot() + + # Verify the common part + self._check_junit_testresults_1(testsuites_node) + + # Verify the inlined log files + ptests_suite = testsuites_node.find(".//testsuite[@name='Package Tests']") + testcases = ptests_suite.findall("testcase") + ptestresult_sections = testresults["a"]["runtime_a-image"]["result"][ + "ptestresult.sections" + ] + self.assertEqual( + ptestresult_sections["package-passed"]["log"], + testcases[0].find("system-out").text, + ) + self.assertEqual( + ptestresult_sections["package-skipped"]["log"], + testcases[1].find("system-out").text, + ) + self.assertEqual( + ptestresult_sections["package-error"]["log"], + testcases[2].find("system-out").text, + ) + self.assertEqual( + ptestresult_sections["package-failed"]["log"], + testcases[3].find("system-out").text, + ) + + # Check the ptest log messages are inline + self.assertDictEqual(test_logfiles, {}) + + def test_junit_log_attached(self): + testresults_1 = self._get_junit_testresults_1 + test_logdir = "test-logs" + tree, test_logfiles = junit_tree(testresults_1, test_logdir) + self._dump_junit_tree(testresults_1, tree, "junit_attached") + testsuites_node = tree.getroot() + + # Verify the common part + self._check_junit_testresults_1(testsuites_node) + + # Verify the attached log files + ptests_suite = testsuites_node.find(".//testsuite[@name='Package Tests']") + testcases = ptests_suite.findall("testcase") + self.assertIn( + "[[ATTACHMENT|test-logs/package-passed.log]]", + testcases[0].find("system-out").text, + ) + self.assertIn( + "[[ATTACHMENT|test-logs/package-skipped.log]]", + testcases[1].find("system-out").text, + ) + self.assertIn( + "[[ATTACHMENT|test-logs/package-error.log]]", + testcases[2].find("system-out").text, + ) + self.assertIn( + "[[ATTACHMENT|test-logs/package-failed.log]]", + testcases[3].find("system-out").text, + ) + + self.maxDiff = None + self.assertDictEqual( + test_logfiles, + { + "test-logs/package-passed.log": "PASS: package-passed.test_passed\nPASS: package-passed.test_skipped\n", + "test-logs/package-skipped.log": "SKIPPED: package-skipped.test_skipped\n", + "test-logs/package-error.log": "ERROR: ERROR: package-error.test_error\nFAILED: package-error.test_failed\nSKIPPED: package-error.test_skipped\nPASSED: package-error.test_passed\n", + "test-logs/package-failed.log": "FAILED: package-failed.test_failed\nPASS: package-failed.test_passed\n", + }, + ) From patchwork Thu Jan 8 11:36:46 2026 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Miroslav Cernak X-Patchwork-Id: 78278 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from aws-us-west-2-korg-lkml-1.web.codeaurora.org (localhost.localdomain [127.0.0.1]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8597CD185E5 for ; Thu, 8 Jan 2026 11:54:01 +0000 (UTC) Received: from mta-65-226.siemens.flowmailer.net (mta-65-226.siemens.flowmailer.net [185.136.65.226]) by mx.groups.io with SMTP id smtpd.msgproc01-g2.4763.1767872955432320154 for ; Thu, 08 Jan 2026 03:49:16 -0800 Authentication-Results: mx.groups.io; dkim=pass header.i=miroslav.cernak@siemens.com header.s=fm1 header.b=VDjkLiA6; spf=pass (domain: rts-flowmailer.siemens.com, ip: 185.136.65.226, mailfrom: fm-1333546-202601081139096498f2f74500020760-gj6bcf@rts-flowmailer.siemens.com) Received: by mta-65-226.siemens.flowmailer.net with ESMTPSA id 202601081139096498f2f74500020760 for ; Thu, 08 Jan 2026 12:39:09 +0100 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; s=fm1; d=siemens.com; i=miroslav.cernak@siemens.com; h=Date:From:Subject:To:Message-ID:MIME-Version:Content-Type:Content-Transfer-Encoding:Cc:References:In-Reply-To; bh=xxgKxbnVwPjWi3sXX8pAMFFvTD4nfdEtJ0R4Yi4H1rM=; b=VDjkLiA6UFM2EaE0Sl4qaab/IBKoiUJ8Wi4i5WBLxOhkhR9nkkaecUKE7ZPvcIa+XFvt9n +z16jjrDrXNh4pciXP6HMpPseGPFaihHuvGMQoJA41akUjfFvG89XsaTdgdpK5aJGVgPpIYv d1pQremNkxn7148BfnmX8KS6chNyIbQGBTKHF/JLy45MAjU7yWFAjYWLSGVj4m0LxKYjc8BX FmHPnT9wftRd17s58nuOsuueySbUVAqUP2FhNPtwpleFrlOAKPi4acoAvrlU3QUf6yz9WDzH I9PinDECuKKgI0FfyZdzFo3vJtsgTq9+SMFkQ67AvNw9xKl9KRNEFJJA==; From: Miroslav Cernak To: openembedded-core@lists.openembedded.org Cc: adrian.freihofer@siemens.com, Miroslav Cernak Subject: [PATCH 3/4] resulttool: junit: improve ptest status handling and log Date: Thu, 8 Jan 2026 12:36:46 +0100 Message-Id: <20260108113647.56663-4-miroslav.cernak@siemens.com> In-Reply-To: <20260108113647.56663-1-miroslav.cernak@siemens.com> References: <20260108113647.56663-1-miroslav.cernak@siemens.com> MIME-Version: 1.0 X-Flowmailer-Platform: Siemens Feedback-ID: 519:519-1333546:519-21489:flowmailer List-Id: X-Webhook-Received: from 45-33-107-173.ip.linodeusercontent.com [45.33.107.173] by aws-us-west-2-korg-lkml-1.web.codeaurora.org with HTTPS for ; Thu, 08 Jan 2026 11:54:01 -0000 X-Groupsio-URL: https://lists.openembedded.org/g/openembedded-core/message/229065 While using resulttool's JUnit export, several issues surfaced and are addressed here: - Avoid KeyError when ptest results are missing by guarding access to ptest_summary e.g., KeyError: 'bzip2' - Report actual failures as FAILED instead of SKIPPED and list the failing testcase names (e.g., DataSQLite-testrunner, Foundation-testrunner). - Include testcase names in failure output rather than only the ptest name, improving first-read diagnosability. - Make multiline failure details readable: put full traces in JUnit output; for attribute-only messages that cannot contain newlines, collapse ā€œ\nā€ to spaces to avoid ā€œ ā€ artifacts in GitLab. This produces a more accurate and readable JUnit report, prevents crashes when ptestresult.* lacks entries, and makes CI output actionable on first read. Signed-off-by: Miroslav Cernak --- scripts/lib/resulttool/junit.py | 78 +++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 24 deletions(-) diff --git a/scripts/lib/resulttool/junit.py b/scripts/lib/resulttool/junit.py index c4476f1e59..0f541dd80b 100644 --- a/scripts/lib/resulttool/junit.py +++ b/scripts/lib/resulttool/junit.py @@ -26,14 +26,14 @@ class PtestSummary: self.FAILED = [] self.SKIPPED = [] - def add_status(self, ptest_name, status): + def add_status(self, ptest_testcase, status): self.tests += 1 if status == "FAILED": - self.FAILED.append(ptest_name) + self.FAILED.append(ptest_testcase) elif status == "ERROR": - self.ERROR.append(ptest_name) + self.ERROR.append(ptest_testcase) elif status == "SKIPPED": - self.SKIPPED.append(ptest_name) + self.SKIPPED.append(ptest_testcase) @property def status(self): @@ -49,32 +49,38 @@ class PtestSummary: @property def log_summary(self): """Return a summary of the ptest suite""" - summary_str = "ERROR:" + os.linesep - summary_str += os.linesep.join([s + "- " for s in self.ERROR]) + os.linesep - summary_str = "FAILED:" + os.linesep - summary_str += os.linesep.join([s + "- " for s in self.FAILED]) + os.linesep - summary_str = "SKIPPED:" + os.linesep - summary_str += os.linesep.join([s + "- " for s in self.SKIPPED]) + os.linesep - return summary_str + summary_parts = [] + + if self.ERROR: + summary_parts.append("ERROR:" + os.linesep) + summary_parts.append(os.linesep.join(["- " + s for s in self.ERROR]) + os.linesep) + + if self.FAILED: + summary_parts.append("FAILED:" + os.linesep) + summary_parts.append(os.linesep.join(["- " + s for s in self.FAILED]) + os.linesep) + + if self.SKIPPED: + summary_parts.append("SKIPPED:" + os.linesep) + summary_parts.append(os.linesep.join(["- " + s for s in self.SKIPPED]) + os.linesep) + + return "".join(summary_parts) if summary_parts else "No failures or errors" def create_testcase(testsuite, testcase_dict, status, status_message, status_text=None, system_out=None): """Create a junit testcase node""" testcase_node = ET.SubElement(testsuite, "testcase", testcase_dict) + print("%s -> %s status: %s" % (testcase_dict["classname"], testcase_dict["name"], status)) + se = None if status == "SKIPPED": - se = ET.SubElement(testcase_node, "skipped", message=status_message) + se = ET.SubElement(testcase_node, "skipped", message = status_message.replace('\n', ' ') if status_message else None) elif status == "FAILED": - se = ET.SubElement(testcase_node, "failure", message=status_message) + se = ET.SubElement(testcase_node, "failure") + se = ET.SubElement(testcase_node, "system-out").text = (status_message or "") + os.linesep + (system_out or "") elif status == "ERROR": - se = ET.SubElement(testcase_node, "error", message=status_message) - if se and status_text: - se.text = status_text - - if system_out: - ET.SubElement(testcase_node, "system-out").text = system_out - + se = ET.SubElement(testcase_node, "error") + se = ET.SubElement(testcase_node, "system-out").text = (status_message or "") + os.linesep + (system_out or "") def junit_tree(testresults, test_log_dir=None): """Create a JUnit XML tree from testresults @@ -102,9 +108,10 @@ def junit_tree(testresults, test_log_dir=None): if result_id.startswith("ptestresult."): ptest_name = result_id.split(".", 3)[1] + test_case = result_id.split(".", 3)[2] if ptest_name not in ptest_summarys: ptest_summarys[ptest_name] = PtestSummary() - ptest_summarys[ptest_name].add_status(ptest_name, result["status"]) + ptest_summarys[ptest_name].add_status(test_case, result["status"]) else: image_total_time += int(result["duration"]) image_tests += 1 @@ -145,6 +152,7 @@ def junit_tree(testresults, test_log_dir=None): "time": str(result["duration"]), } + exitcode = result.get("exitcode") log = result.get("log") system_out = None if log: @@ -155,15 +163,24 @@ def junit_tree(testresults, test_log_dir=None): else: system_out = log + # Determine status and log summary + if ptest_name in ptest_summarys: + status = ptest_summarys[ptest_name].status + log_summary = ptest_summarys[ptest_name].log_summary + else: + # When there is no detailed result for the ptest, we assume it was skipped or errored + status = "SKIPPED" if exitcode in (None, "0") else "ERROR" + print("Warning: ptest %s has no detailed results, marking as %s" % (ptest_name, status)) + log_summary = log if log else "No log available." + create_testcase(ptest_testsuite, testcase_dict, - ptest_summarys[ptest_name].status, - ptest_summarys[ptest_name].log_summary, + status, + log_summary, system_out=system_out) ptest_total_time += int(result["duration"]) ptest_tests += 1 - status = ptest_summarys[ptest_name].status if status == "FAILED": ptest_failures += 1 elif status == "ERROR": @@ -188,6 +205,19 @@ def junit_tree(testresults, test_log_dir=None): testsuites_node.set("skipped", str(total_skipped)) testsuites_node.set("tests", str(total_tests)) testsuites_node.set("time", str(total_time)) + + ptest_success = ptest_tests - ptest_errors - ptest_failures - ptest_skipped + image_success = image_tests - image_errors - image_failures - image_skipped + total_success = total_tests - total_errors - total_failures - total_skipped + + print("ptest -> tests: %d, success: %d, error: %d, failures: %d, skipped: %d, time: %d" % + (ptest_tests, ptest_success, ptest_errors, ptest_failures, ptest_skipped, ptest_total_time)) + + print("testimage -> tests: %d, success: %d, error: %d, failures: %d, skipped: %d, time: %d" % + (image_tests, image_success, image_errors, image_failures, image_skipped, image_total_time)) + + print("total -> tests: %d, success: %d, error: %d, failures: %d, skipped: %d, time: %d" % + (total_tests, total_success, total_errors, total_failures, total_skipped, total_time)) tree = ET.ElementTree(testsuites_node) return tree, test_logfiles From patchwork Thu Jan 8 11:36:47 2026 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Miroslav Cernak X-Patchwork-Id: 78276 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from aws-us-west-2-korg-lkml-1.web.codeaurora.org (localhost.localdomain [127.0.0.1]) by smtp.lore.kernel.org (Postfix) with ESMTP id 84B5DD185E3 for ; Thu, 8 Jan 2026 11:53:41 +0000 (UTC) Received: from mta-65-225.siemens.flowmailer.net (mta-65-225.siemens.flowmailer.net [185.136.65.225]) by mx.groups.io with SMTP id smtpd.msgproc02-g2.4755.1767872955799700441 for ; Thu, 08 Jan 2026 03:49:16 -0800 Authentication-Results: mx.groups.io; dkim=pass header.i=miroslav.cernak@siemens.com header.s=fm1 header.b=GLHKgBlT; spf=pass (domain: rts-flowmailer.siemens.com, ip: 185.136.65.225, mailfrom: fm-1333546-202601081139091dc91c13b900020757-12i9wk@rts-flowmailer.siemens.com) Received: by mta-65-225.siemens.flowmailer.net with ESMTPSA id 202601081139091dc91c13b900020757 for ; Thu, 08 Jan 2026 12:39:09 +0100 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; s=fm1; d=siemens.com; i=miroslav.cernak@siemens.com; h=Date:From:Subject:To:Message-ID:MIME-Version:Content-Type:Content-Transfer-Encoding:Cc:References:In-Reply-To; bh=XNMpkOQssCEol6wjd/Wgplg556sPH7soGce/AWSiV7Y=; b=GLHKgBlTkXDAuH/zXNIaMbf1eEqTnZ/1x+K9IsgRGBVZMySGkDyS4w5xxV0lwmC2Rfr8zq dlMMfT2ZckQkEOeM/FfrB8GMEyOSl9u2WwDo8HVbLqIbWFNo0W5duJ36nbKUfVKfrVdNj2eG s1vrMQ3/ogxOzGG2GP9uDEwkJasedj3gUAfKi8eTSHfbWvjG3+5WbVCmZGZx30x8rHnSwXCY xheR7+7a6YHF8Bo6KXYzRsrQOInVHoAtvFfzly0s/8PmIAjBhjLpjwed6KkW76zT6X11u5YP dxSoopxS+pMv6D1DBwHJMrZ7I/EsVEukPNm5SHKgoe6NuqYEvtm6Y99Q==; From: Miroslav Cernak To: openembedded-core@lists.openembedded.org Cc: adrian.freihofer@siemens.com, Miroslav Cernak Subject: [PATCH 4/4] oe-selftest: improved junit test Date: Thu, 8 Jan 2026 12:36:47 +0100 Message-Id: <20260108113647.56663-5-miroslav.cernak@siemens.com> In-Reply-To: <20260108113647.56663-1-miroslav.cernak@siemens.com> References: <20260108113647.56663-1-miroslav.cernak@siemens.com> MIME-Version: 1.0 X-Flowmailer-Platform: Siemens Feedback-ID: 519:519-1333546:519-21489:flowmailer List-Id: X-Webhook-Received: from 45-33-107-173.ip.linodeusercontent.com [45.33.107.173] by aws-us-west-2-korg-lkml-1.web.codeaurora.org with HTTPS for ; Thu, 08 Jan 2026 11:53:41 -0000 X-Groupsio-URL: https://lists.openembedded.org/g/openembedded-core/message/229063 Verify junit with improved image status and error Signed-off-by: Miroslav Cernak --- .../oeqa/selftest/cases/resulttooltests.py | 62 +++++++++++-------- 1 file changed, 35 insertions(+), 27 deletions(-) diff --git a/meta/lib/oeqa/selftest/cases/resulttooltests.py b/meta/lib/oeqa/selftest/cases/resulttooltests.py index aa90116795..e933fc6390 100644 --- a/meta/lib/oeqa/selftest/cases/resulttooltests.py +++ b/meta/lib/oeqa/selftest/cases/resulttooltests.py @@ -432,6 +432,12 @@ class ResultToolTests(OESelftestTestCase): "duration": "2", "log": "FAILED: package-failed.test_failed\nPASS: package-failed.test_passed\n", }, + # Test with exitcode without ptestresult + "package-error-noresult": { + "duration": "6", + "exitcode": "123", + "log": "ERROR: -bash: testerror: command not found\nERROR: Exit status is 123\n", + }, }, }, } @@ -454,11 +460,11 @@ class ResultToolTests(OESelftestTestCase): ) def _check_junit_testresults_1(self, testsuites_node): - self.assertEqual(testsuites_node.attrib["errors"], "1") + self.assertEqual(testsuites_node.attrib["errors"], "2") self.assertEqual(testsuites_node.attrib["failures"], "1") self.assertEqual(testsuites_node.attrib["skipped"], "2") - self.assertEqual(testsuites_node.attrib["tests"], "6") - self.assertEqual(testsuites_node.attrib["time"], "16") + self.assertEqual(testsuites_node.attrib["tests"], "7") + self.assertEqual(testsuites_node.attrib["time"], "22") testsuites = testsuites_node.findall("testsuite") self.assertEqual(testsuites[0].attrib["name"], "runtime_a-image") @@ -472,10 +478,12 @@ class ResultToolTests(OESelftestTestCase): self.assertEqual(testcases[1].attrib["name"], "package-skipped") self.assertEqual(testcases[2].attrib["name"], "package-error") self.assertEqual(testcases[3].attrib["name"], "package-failed") + self.assertEqual(testcases[4].attrib["name"], "package-error-noresult") self.assertEqual(testcases[0].attrib["time"], "2") self.assertEqual(testcases[1].attrib["time"], "1") self.assertEqual(testcases[2].attrib["time"], "4") self.assertEqual(testcases[3].attrib["time"], "2") + self.assertEqual(testcases[4].attrib["time"], "6") def test_junit_log_inline(self): testresults = self._get_junit_testresults_1 @@ -492,22 +500,22 @@ class ResultToolTests(OESelftestTestCase): ptestresult_sections = testresults["a"]["runtime_a-image"]["result"][ "ptestresult.sections" ] - self.assertEqual( - ptestresult_sections["package-passed"]["log"], - testcases[0].find("system-out").text, - ) - self.assertEqual( - ptestresult_sections["package-skipped"]["log"], - testcases[1].find("system-out").text, - ) - self.assertEqual( - ptestresult_sections["package-error"]["log"], - testcases[2].find("system-out").text, - ) - self.assertEqual( - ptestresult_sections["package-failed"]["log"], - testcases[3].find("system-out").text, - ) + # The inline system-out now includes a PtestSummary section followed by the raw section log. + # Build expected summaries and verify both parts are present. + pkg_error_summary = PtestSummary() + pkg_error_summary.add_status("test_error", "ERROR") + pkg_error_summary.add_status("test_failed", "FAILED") + pkg_error_summary.add_status("test_skipped", "SKIPPED") + pkg_failed_summary = PtestSummary() + pkg_failed_summary.add_status("test_failed", "FAILED") + + pkg_error_out = testcases[2].find("system-out").text + self.assertIn(pkg_error_summary.log_summary, pkg_error_out) + self.assertIn(ptestresult_sections["package-error"]["log"], pkg_error_out) + + pkg_failed_out = testcases[3].find("system-out").text + self.assertIn(pkg_failed_summary.log_summary, pkg_failed_out) + self.assertIn(ptestresult_sections["package-failed"]["log"], pkg_failed_out) # Check the ptest log messages are inline self.assertDictEqual(test_logfiles, {}) @@ -525,14 +533,9 @@ class ResultToolTests(OESelftestTestCase): # Verify the attached log files ptests_suite = testsuites_node.find(".//testsuite[@name='Package Tests']") testcases = ptests_suite.findall("testcase") - self.assertIn( - "[[ATTACHMENT|test-logs/package-passed.log]]", - testcases[0].find("system-out").text, - ) - self.assertIn( - "[[ATTACHMENT|test-logs/package-skipped.log]]", - testcases[1].find("system-out").text, - ) + # Passed and skipped testcases do not include system-out attachments + self.assertIsNone(testcases[0].find("system-out")) + self.assertIsNone(testcases[1].find("system-out")) self.assertIn( "[[ATTACHMENT|test-logs/package-error.log]]", testcases[2].find("system-out").text, @@ -541,6 +544,10 @@ class ResultToolTests(OESelftestTestCase): "[[ATTACHMENT|test-logs/package-failed.log]]", testcases[3].find("system-out").text, ) + self.assertIn( + "[[ATTACHMENT|test-logs/package-error-noresult.log]]", + testcases[4].find("system-out").text, + ) self.maxDiff = None self.assertDictEqual( @@ -550,5 +557,6 @@ class ResultToolTests(OESelftestTestCase): "test-logs/package-skipped.log": "SKIPPED: package-skipped.test_skipped\n", "test-logs/package-error.log": "ERROR: ERROR: package-error.test_error\nFAILED: package-error.test_failed\nSKIPPED: package-error.test_skipped\nPASSED: package-error.test_passed\n", "test-logs/package-failed.log": "FAILED: package-failed.test_failed\nPASS: package-failed.test_passed\n", + "test-logs/package-error-noresult.log": "ERROR: -bash: testerror: command not found\nERROR: Exit status is 123\n", }, )