dovetail tool: skip testcase status added 69/28269/6
authorMatthewLi <matthew.lijun@huawei.com>
Wed, 8 Feb 2017 07:52:27 +0000 (02:52 -0500)
committerMatthewLi <matthew.lijun@huawei.com>
Fri, 17 Feb 2017 06:39:13 +0000 (01:39 -0500)
JIRA: DOVETAIL-344

1) testcase status, True/False --> PASS/SKIP/FAIL,
   so skipped testcase in tempest can be figured out
   details can be seen in
   https://etherpad.opnfv.org/p/dovetail_report
2) some variable name changed more readable, such as
   passed-->testcase_passed failed--> prepare_failed
3) "dovetail report" minor amend according to 1)

Change-Id: Ibe8e50693fc474d78e7d0fb89257f34703248579
Signed-off-by: MatthewLi <matthew.lijun@huawei.com>
dovetail/report.py
dovetail/test_runner.py
dovetail/testcase.py

index b7b2793..66618b2 100644 (file)
@@ -23,13 +23,6 @@ from utils.dovetail_config import DovetailConfig as dt_cfg
 from testcase import Testcase
 
 
-def get_pass_str(passed):
-    if passed:
-        return 'PASS'
-    else:
-        return 'FAIL'
-
-
 class Report(object):
 
     results = {'functest': {}, 'yardstick': {}, 'shell': {}}
@@ -75,15 +68,14 @@ class Report(object):
                 report_obj['testcases_list'].append(testcase_inreport)
                 continue
 
-            testcase_inreport['result'] = get_pass_str(testcase.passed())
+            testcase_inreport['result'] = testcase.passed()
             testcase_inreport['objective'] = testcase.objective()
             testcase_inreport['sub_testcase'] = []
             if testcase.sub_testcase() is not None:
                 for sub_test in testcase.sub_testcase():
                     testcase_inreport['sub_testcase'].append({
                         'name': sub_test,
-                        'result': get_pass_str(
-                            testcase.sub_testcase_passed(sub_test))
+                        'result': testcase.sub_testcase_passed(sub_test)
                     })
             report_obj['testcases_list'].append(testcase_inreport)
         cls.logger.info(json.dumps(report_obj))
@@ -114,38 +106,51 @@ class Report(object):
             testcase_num[area] = 0
             testcase_passnum[area] = 0
 
-        repo_link = dt_cfg.dovetail_config['repo']['path'] + \
-            dt_cfg.dovetail_config['repo']['tag']
-
+        testarea_scope = []
         for testcase in report_data['testcases_list']:
             pattern = re.compile(
                 '|'.join(dt_cfg.dovetail_config['testarea_supported']))
             area = pattern.findall(testcase['name'])[0]
-            result_dir = dt_cfg.dovetail_config['result_dir']
-            spec_link = repo_link + '/dovetail/testcase'
-            sub_report[area] += '- <%s> %s result: <%s>\n' %\
-                (spec_link, testcase['name'], result_dir)
+            testarea_scope.append(area)
+            sub_report[area] += '-%-25s %s\n' %\
+                (testcase['name'], testcase['result'])
+            if 'sub_testcase' in testcase:
+                for sub_test in testcase['sub_testcase']:
+                    sub_report[area] += '\t%-110s %s\n' %\
+                        (sub_test['name'], sub_test['result'])
             testcase_num[area] += 1
             total_num += 1
             if testcase['result'] == 'PASS':
                 testcase_passnum[area] += 1
                 pass_num += 1
+            elif testcase['result'] == 'SKIP':
+                testcase_num[area] -= 1
+                total_num -= 1
 
         if total_num != 0:
             pass_rate = pass_num / total_num
             report_txt += 'Pass Rate: %.2f%% (%s/%s)\n' %\
                 (pass_rate * 100, pass_num, total_num)
             report_txt += 'Assessed test areas:\n'
+        else:
+            report_txt += \
+                'no testcase or all testcases are skipped in this testsuite'
+
         for key in sub_report:
             if testcase_num[key] != 0:
                 pass_rate = testcase_passnum[key] / testcase_num[key]
-                doc_link = repo_link + ('/docs/testsuites/%s' % key)
-                report_txt += '- %s results: <%s> pass %.2f%%\n' %\
-                    (key, doc_link, pass_rate * 100)
+                report_txt += '-%-25s pass %.2f%%\n' %\
+                    (key + ' results:', pass_rate * 100)
+            elif key in testarea_scope:
+                report_txt += '-%-25s all skipped\n' % key
         for key in sub_report:
             if testcase_num[key] != 0:
                 pass_rate = testcase_passnum[key] / testcase_num[key]
-                report_txt += '%s: pass rate %.2f%%\n' % (key, pass_rate * 100)
+                report_txt += '%-25s  pass rate %.2f%%\n' %\
+                    (key + ':', pass_rate * 100)
+                report_txt += sub_report[key]
+            elif key in testarea_scope:
+                report_txt += '%-25s  all skipped\n' % key
                 report_txt += sub_report[key]
 
         cls.logger.info(report_txt)
@@ -248,12 +253,24 @@ class FunctestCrawler(object):
             with open(file_path, 'r') as myfile:
                 output = myfile.read()
 
-            error_logs = " ".join(re.findall('(.*?)[. ]*fail ', output))
-            skipped = " ".join(re.findall('(.*?)[. ]*skip:', output))
+            match = re.findall('(.*?)[. ]*fail ', output)
+            if match:
+                error_logs = " ".join(match)
+            else:
+                error_logs = ""
+            match = re.findall('(.*?)[. ]*skip:', output)
+            if match:
+                skipped = " ".join(match)
+            else:
+                skipped = ""
 
-            failed_num = int(re.findall(' - Failures: (\d*)', output)[0])
-            if failed_num != 0:
-                criteria = 'FAIL'
+            match = re.findall(' - Failures: (\d*)', output)
+            if match:
+                failed_num = int(match[0])
+            else:
+                failed_num = 0
+            if failed_num == 0:
+                criteria = 'PASS'
 
             match = re.findall('Ran: (\d*) tests in (\d*)\.\d* sec.', output)
             num_tests, dur_sec_int = match[0]
@@ -379,31 +396,32 @@ class FunctestChecker(object):
         if not db_result:
             if sub_testcase_list is not None:
                 for sub_testcase in sub_testcase_list:
-                    testcase.sub_testcase_passed(sub_testcase, False)
+                    testcase.sub_testcase_passed(sub_testcase, 'FAIL')
             return
 
-        testcase.passed(db_result['criteria'] == 'PASS')
+        testcase.passed(db_result['criteria'])
 
         if sub_testcase_list is None:
             return
 
-        if testcase.testcase['passed'] is True:
-            for sub_testcase in sub_testcase_list:
-                testcase.sub_testcase_passed(sub_testcase, True)
-            return
-
-        all_passed = True
+        testcase_passed = 'SKIP'
         for sub_testcase in sub_testcase_list:
             self.logger.debug('check sub_testcase:%s', sub_testcase)
-            # TO DO: should think the test case when skipped, should think
-            # together with the "dovetail report"
             if sub_testcase in db_result['details']['errors']:
-                testcase.sub_testcase_passed(sub_testcase, False)
-                all_passed = False
+                testcase.sub_testcase_passed(sub_testcase, 'FAIL')
+                testcase_passed = 'FAIL'
+            elif sub_testcase in db_result['details']['skipped']:
+                testcase.sub_testcase_passed(sub_testcase, 'SKIP')
             else:
-                testcase.sub_testcase_passed(sub_testcase, True)
+                testcase.sub_testcase_passed(sub_testcase, 'PASS')
+
+        if testcase_passed == 'SKIP':
+            for sub_testcase in sub_testcase_list:
+                if testcase.sub_testcase_status[sub_testcase] == 'PASS':
+                    testcase_passed = 'PASS'
+                    break
 
-        testcase.passed(all_passed)
+        testcase.passed(testcase_passed)
 
 
 class YardstickChecker(object):
index 29217f5..588f049 100644 (file)
@@ -38,15 +38,15 @@ class DockerRunner(object):
         self.logger.debug('container id:%s', container_id)
 
         if not self.testcase.prepared():
-            failed = False
+            prepare_failed = False
             cmds = self.testcase.pre_condition()
             if cmds:
                 for cmd in cmds:
                     ret, msg = Container.exec_cmd(container_id, cmd)
                     if ret != 0:
-                        failed = True
+                        prepare_failed = True
                         break
-            if not failed:
+            if not prepare_failed:
                 self.testcase.prepared(True)
 
         if not self.testcase.prepare_cmd(self.type):
@@ -98,18 +98,18 @@ class ShellRunner(object):
         self.logger.debug('create runner:%s', self.type)
 
     def run(self):
-        passed = True
-        failed = False
-        result = {'pass': True, 'results': []}
+        testcase_passed = 'PASS'
+        prepare_failed = False
+        result = {'pass': 'PASS', 'results': []}
         if not self.testcase.prepared():
             cmds = self.testcase.pre_condition()
             for cmd in cmds:
                 ret, msg = dt_utils.exec_cmd(cmd, self.logger)
                 result['results'].append((cmd, ret, msg))
                 if ret != 0:
-                    failed = True
+                    prepare_failed = True
                     break
-            if not failed:
+            if not prepare_failed:
                 self.testcase.prepared(True)
 
         if not self.testcase.prepare_cmd(self.type):
@@ -120,9 +120,9 @@ class ShellRunner(object):
                 ret, msg = dt_utils.exec_cmd(cmd, self.logger)
                 result['results'].append((cmd, ret, msg))
                 if ret != 0:
-                    passed = False
+                    testcase_passed = 'FAIL'
 
-        result['pass'] = passed
+        result['pass'] = testcase_passed
 
         cmds = self.testcase.post_condition()
         for cmd in cmds:
index 4ad2b36..aeeeee6 100644 (file)
@@ -23,7 +23,7 @@ class Testcase(object):
 
     def __init__(self, testcase_yaml):
         self.testcase = testcase_yaml.values()[0]
-        self.testcase['passed'] = False
+        self.testcase['passed'] = 'FAIL'
         self.cmds = []
         self.sub_testcase_status = {}
         self.update_validate_testcase(self.validate_testcase())