zdtm: report test results in the junit format

Submitted by Andrei Vagin on Sept. 17, 2017, 1:21 a.m.

Details

Message ID 20170917012153.14335-1-avagin@openvz.org
State New
Series "zdtm: report test results in the junit format"
Headers show

Commit Message

Andrei Vagin Sept. 17, 2017, 1:21 a.m.
From: Andrei Vagin <avagin@virtuozzo.com>

We found a good plugin for Jenkins to analize test results,
but it can handle only the junit format.

Signed-off-by: Andrei Vagin <avagin@virtuozzo.com>
---
 test/zdtm.py | 28 +++++++++++++++++++++++++---
 1 file changed, 25 insertions(+), 3 deletions(-)

Patch hide | download patch | download mbox

diff --git a/test/zdtm.py b/test/zdtm.py
index 12fa51ffb..c649774ff 100755
--- a/test/zdtm.py
+++ b/test/zdtm.py
@@ -1477,6 +1477,8 @@  class launcher:
 		self.__subs = {}
 		self.__fail = False
 		self.__file_report = None
+		self.__junit_file = None
+		self.__junit_test_cases = None
 		self.__failed = []
 		self.__nr_skip = 0
 		if self.__max > 1 and self.__total > 1:
@@ -1487,13 +1489,20 @@  class launcher:
 			self.__use_log = False
 
 		if opts['report'] and (opts['keep_going'] or self.__total == 1):
+			global TestSuite, TestCase
+			from junit_xml import TestSuite, TestCase
 			now = datetime.datetime.now()
 			att = 0
 			reportname = os.path.join(report_dir, "criu-testreport.tap")
-			while os.access(reportname, os.F_OK):
+			junitreport = os.path.join(report_dir, "criu-testreport.xml")
+			while os.access(reportname, os.F_OK) or os.access(junitreport, os.F_OK):
 				reportname = os.path.join(report_dir, "criu-testreport" + ".%d.tap" % att)
+				junitreport = os.path.join(report_dir, "criu-testreport" + ".%d.xml" % att)
 				att += 1
 
+			self.__junit_file = open(junitreport, 'a')
+			self.__junit_test_cases = []
+
 			self.__file_report = open(reportname, 'a')
 			print >> self.__file_report, "TAP version 13"
 			print >> self.__file_report, "# Hardware architecture: " + arch
@@ -1515,6 +1524,10 @@  class launcher:
 		self.__nr += 1
 		self.__runtest += 1
 		self.__nr_skip += 1
+		tc = TestCase(name)
+		tc.add_skipped_info(reason)
+		if self.__junit_test_cases != None:
+			self.__junit_test_cases.append(tc)
 		if self.__file_report:
 			testline = "ok %d - %s # SKIP %s" % (self.__runtest, name, reason)
 			print >> self.__file_report, testline
@@ -1550,7 +1563,7 @@  class launcher:
 		sub = subprocess.Popen(["./zdtm_ct", "zdtm.py"],
 				env = dict(os.environ, CR_CT_TEST_INFO = arg),
 				stdout = log, stderr = subprocess.STDOUT, close_fds = True)
-		self.__subs[sub.pid] = {'sub': sub, 'log': logf, 'name': name}
+		self.__subs[sub.pid] = {'sub': sub, 'log': logf, 'name': name, "start": time.time()}
 
 		if test_flag(desc, 'excl'):
 			self.wait()
@@ -1575,13 +1588,19 @@  class launcher:
 		self.__runtest += 1
 		if pid != 0:
 			sub = self.__subs.pop(pid)
+			tc = None
+			if self.__junit_test_cases != None:
+				tc = TestCase(sub['name'], elapsed_sec=time.time() - sub['start'])
+				self.__junit_test_cases.append(tc)
 			if status != 0:
 				self.__fail = True
 				failed_flavor = decode_flav(os.WEXITSTATUS(status))
 				self.__failed.append([sub['name'], failed_flavor])
 				if self.__file_report:
 					testline = "not ok %d - %s # flavor %s" % (self.__runtest, sub['name'], failed_flavor)
-					details = {'output': open(sub['log']).read()}
+					output = open(sub['log']).read()
+					details = {'output': output}
+					tc.add_error_info(output = output)
 					print >> self.__file_report, testline
 					print >> self.__file_report, yaml.dump(details, explicit_start=True, explicit_end=True, default_style='|')
 				if sub['log']:
@@ -1621,6 +1640,8 @@  class launcher:
 		if not opts['fault'] and check_core_files():
 			self.__fail = True
 		if self.__file_report:
+			ts = TestSuite(opts['title'], self.__junit_test_cases, os.getenv("NODE_NAME"))
+			self.__junit_file.write(TestSuite.to_xml_string([ts]))
 			self.__file_report.close()
 
 		if opts['keep_going']:
@@ -2074,6 +2095,7 @@  rp.add_argument("--ignore-taint", help = "Don't care about a non-zero kernel tai
 rp.add_argument("--lazy-pages", help = "restore pages on demand", action = 'store_true')
 rp.add_argument("--remote-lazy-pages", help = "simulate lazy migration", action = 'store_true')
 rp.add_argument("--check-only", help = "Additionally try to dump/restore in --check-only mode", action = 'store_true')
+rp.add_argument("--title", help = "A test suite title", default = "criu")
 
 lp = sp.add_parser("list", help = "List tests")
 lp.set_defaults(action = list_tests)