Refactor task results manipulation in rally 65/57465/2
authorJuha Kosonen <juha.kosonen@nokia.com>
Wed, 16 May 2018 11:18:41 +0000 (14:18 +0300)
committerJuha Kosonen <juha.kosonen@nokia.com>
Fri, 18 May 2018 14:49:10 +0000 (17:49 +0300)
Use data provided by report generation since the output of task
results is not available as valid JSON if task didn't finish
gracefully.

JIRA: FUNCTEST-966

Change-Id: Ieb906bc209cc26055babd5f4e6b2bb556724f9a8
Signed-off-by: Juha Kosonen <juha.kosonen@nokia.com>
functest/opnfv_tests/openstack/rally/rally.py
functest/tests/unit/openstack/rally/test_rally.py

index 8d1bfab..16699a0 100644 (file)
@@ -181,14 +181,14 @@ class RallyBase(testcase.TestCase):
         :return: Bool
         """
         rally_report = json.loads(json_raw)
-        for report in rally_report:
-            if report is None or report.get('result') is None:
-                return False
-
-            for result in report.get('result'):
-                if result is None or result.get('error'):
+        tasks = rally_report.get('tasks')
+        if tasks:
+            for task in tasks:
+                if task.get('status') != 'finished' or \
+                   task.get('pass_sla') is not True:
                     return False
-
+        else:
+            return False
         return True
 
     def _migration_supported(self):
@@ -316,6 +316,52 @@ class RallyBase(testcase.TestCase):
 
         return True
 
+    def _save_results(self, test_name, task_id):
+        """ Generate and save task execution results"""
+        # check for result directory and create it otherwise
+        if not os.path.exists(self.RESULTS_DIR):
+            LOGGER.debug('%s does not exist, we create it.',
+                         self.RESULTS_DIR)
+            os.makedirs(self.RESULTS_DIR)
+
+        # put detailed result to log
+        cmd = (["rally", "task", "detailed", "--uuid", task_id])
+        LOGGER.debug('running command: %s', cmd)
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                                stderr=subprocess.STDOUT)
+        json_detailed = self.get_cmd_output(proc)
+        LOGGER.info('%s', json_detailed)
+
+        # save report as JSON
+        cmd = (["rally", "task", "report", "--json", "--uuid", task_id])
+        LOGGER.debug('running command: %s', cmd)
+        with open(os.devnull, 'w') as devnull:
+            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                                    stderr=devnull)
+        json_results = self.get_cmd_output(proc)
+        report_json_name = 'opnfv-{}.json'.format(test_name)
+        report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
+        with open(report_json_dir, 'w') as r_file:
+            LOGGER.debug('saving json file')
+            r_file.write(json_results)
+
+        # save report as HTML
+        report_html_name = 'opnfv-{}.html'.format(test_name)
+        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+        cmd = (["rally", "task", "report", "--html", "--uuid", task_id,
+                "--out", report_html_dir])
+        LOGGER.debug('running command: %s', cmd)
+        with open(os.devnull, 'w') as devnull:
+            subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=devnull)
+
+        self._append_summary(json_results, test_name)
+
+        # parse JSON operation result
+        if self.task_succeed(json_results):
+            LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
+        else:
+            LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+
     def _run_task(self, test_name):
         """Run a task."""
         LOGGER.info('Starting test scenario "%s" ...', test_name)
@@ -351,47 +397,9 @@ class RallyBase(testcase.TestCase):
                                     stderr=subprocess.STDOUT)
             output = self.get_cmd_output(proc)
             LOGGER.error("Task validation result:" + "\n" + output)
-            return
+            raise Exception("Failed to retrieve task id")
 
-        # check for result directory and create it otherwise
-        if not os.path.exists(self.RESULTS_DIR):
-            LOGGER.debug('%s does not exist, we create it.',
-                         self.RESULTS_DIR)
-            os.makedirs(self.RESULTS_DIR)
-
-        # get and save rally operation JSON result
-        cmd = (["rally", "task", "detailed", task_id])
-        LOGGER.debug('running command: %s', cmd)
-        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
-        json_detailed = self.get_cmd_output(proc)
-        LOGGER.info('%s', json_detailed)
-
-        cmd = (["rally", "task", "results", task_id])
-        LOGGER.debug('running command: %s', cmd)
-        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
-        json_results = self.get_cmd_output(proc)
-        self._append_summary(json_results, test_name)
-        report_json_name = 'opnfv-{}.json'.format(test_name)
-        report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
-        with open(report_json_dir, 'w') as r_file:
-            LOGGER.debug('saving json file')
-            r_file.write(json_results)
-
-        # write html report file
-        report_html_name = 'opnfv-{}.html'.format(test_name)
-        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
-        cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
-        LOGGER.debug('running command: %s', cmd)
-        subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT)
-
-        # parse JSON operation result
-        if self.task_succeed(json_results):
-            LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
-        else:
-            LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+        self._save_results(test_name, task_id)
 
     def _append_summary(self, json_raw, test_name):
         """Update statistics summary info."""
@@ -400,20 +408,24 @@ class RallyBase(testcase.TestCase):
         overall_duration = 0.0
 
         rally_report = json.loads(json_raw)
-        for report in rally_report:
-            if report.get('full_duration'):
-                overall_duration += report.get('full_duration')
+        for task in rally_report.get('tasks'):
+            for subtask in task.get('subtasks'):
+                for workload in subtask.get('workloads'):
+                    if workload.get('full_duration'):
+                        overall_duration += workload.get('full_duration')
 
-            if report.get('result'):
-                for result in report.get('result'):
-                    nb_tests += 1
-                    if not result.get('error'):
-                        nb_success += 1
+                    if workload.get('data'):
+                        nb_tests += len(workload.get('data'))
+
+                    for result in workload.get('data'):
+                        if not result.get('error'):
+                            nb_success += 1
 
         scenario_summary = {'test_name': test_name,
                             'overall_duration': overall_duration,
                             'nb_tests': nb_tests,
-                            'nb_success': nb_success}
+                            'nb_success': nb_success,
+                            'task_status': self.task_succeed(json_raw)}
         self.summary.append(scenario_summary)
 
     def _prepare_env(self):
@@ -508,6 +520,7 @@ class RallyBase(testcase.TestCase):
         total_duration = 0.0
         total_nb_tests = 0
         total_nb_success = 0
+        nb_modules = 0
         payload = []
 
         res_table = prettytable.PrettyTable(
@@ -519,6 +532,8 @@ class RallyBase(testcase.TestCase):
 
         # for each scenario we draw a row for the table
         for item in self.summary:
+            if item['task_status'] is True:
+                nb_modules += 1
             total_duration += item['overall_duration']
             total_nb_tests += item['nb_tests']
             total_nb_success += item['nb_success']
@@ -549,8 +564,9 @@ class RallyBase(testcase.TestCase):
                            success_rate_str])
 
         LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
-        LOGGER.info("Rally '%s' success_rate is %s%%",
-                    self.case_name, success_rate)
+        LOGGER.info("Rally '%s' success_rate is %s%% in %s/%s modules",
+                    self.case_name, success_rate, nb_modules,
+                    len(self.summary))
         payload.append({'summary': {'duration': total_duration,
                                     'nb tests': total_nb_tests,
                                     'nb success': success_rate}})
@@ -573,6 +589,14 @@ class RallyBase(testcase.TestCase):
         if self.image:
             self.cloud.delete_image(self.image.id)
 
+    def is_successful(self):
+        """The overall result of the test."""
+        for item in self.summary:
+            if item['task_status'] is False:
+                return testcase.TestCase.EX_TESTCASE_FAILED
+
+        return super(RallyBase, self).is_successful()
+
     @energy.enable_recording
     def run(self, **kwargs):
         """Run testcase."""
index 970e5c4..3ef90c4 100644 (file)
@@ -85,15 +85,16 @@ class OSRallyTesting(unittest.TestCase):
                          None)
 
     def test_task_succeed_fail(self):
-        json_raw = json.dumps([None])
+        json_raw = json.dumps({})
         self.assertEqual(self.rally_base.task_succeed(json_raw),
                          False)
-        json_raw = json.dumps([{'result': [{'error': ['test_error']}]}])
+        json_raw = json.dumps({'tasks': [{'status': 'crashed'}]})
         self.assertEqual(self.rally_base.task_succeed(json_raw),
                          False)
 
     def test_task_succeed_success(self):
-        json_raw = json.dumps('')
+        json_raw = json.dumps({'tasks': [{'status': 'finished',
+                                          'pass_sla': True}]})
         self.assertEqual(self.rally_base.task_succeed(json_raw),
                          True)
 
@@ -213,8 +214,6 @@ class OSRallyTesting(unittest.TestCase):
                 'file_is_empty', return_value=False)
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
                 '_build_task_args', return_value={})
-    @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
-                '_append_summary')
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
                 'get_task_id', return_value=None)
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
@@ -225,7 +224,8 @@ class OSRallyTesting(unittest.TestCase):
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
     def test_run_task_taskid_missing(self, mock_logger_error, *args):
         # pylint: disable=unused-argument
-        self.rally_base._run_task('test_name')
+        with self.assertRaises(Exception):
+            self.rally_base._run_task('test_name')
         text = 'Failed to retrieve task_id, validating task...'
         mock_logger_error.assert_any_call(text)
 
@@ -236,8 +236,6 @@ class OSRallyTesting(unittest.TestCase):
                 'file_is_empty', return_value=False)
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
                 '_build_task_args', return_value={})
-    @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
-                '_append_summary')
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
                 'get_task_id', return_value='1')
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
@@ -250,13 +248,30 @@ class OSRallyTesting(unittest.TestCase):
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.makedirs')
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
     @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.error')
-    def test_run_task_default(self, mock_logger_error, mock_logger_info,
-                              *args):
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+                '_save_results')
+    def test_run_task_default(self, mock_save_res, *args):
         # pylint: disable=unused-argument
         self.rally_base._run_task('test_name')
-        text = 'Test scenario: "test_name" OK.\n'
-        mock_logger_info.assert_any_call(text)
-        mock_logger_error.assert_not_called()
+        mock_save_res.assert_called()
+
+    @mock.patch('__builtin__.open', mock.mock_open())
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+                'task_succeed', return_value=True)
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+                'get_cmd_output', return_value='')
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.path.exists',
+                return_value=True)
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.subprocess.Popen')
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.os.makedirs')
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.info')
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.LOGGER.debug')
+    @mock.patch('functest.opnfv_tests.openstack.rally.rally.RallyBase.'
+                '_append_summary')
+    def test_save_results(self, mock_summary, *args):
+        # pylint: disable=unused-argument
+        self.rally_base._save_results('test_name', '1234')
+        mock_summary.assert_called()
 
     def test_prepare_env_testname_invalid(self):
         self.rally_base.TESTS = ['test1', 'test2']
@@ -473,15 +488,31 @@ class OSRallyTesting(unittest.TestCase):
         mock_prep_env.assert_called()
 
     def test_append_summary(self):
-        text = '[{"result":[{"error":[]},{"error":["err"]}],' \
-               '"full_duration": 17.312026}]'
+        text = '{"tasks": [{"subtasks": [{"workloads": [{"full_duration": ' \
+               '1.23,"data": [{"error": []}]}]},{"workloads": ' \
+               '[{"full_duration": 2.78, "data": [{"error": ["err"]}]}]}]}]}'
         self.rally_base._append_summary(text, "foo_test")
         self.assertEqual(self.rally_base.summary[0]['test_name'], "foo_test")
-        self.assertEqual(self.rally_base.summary[0]['overall_duration'],
-                         17.312026)
+        self.assertEqual(self.rally_base.summary[0]['overall_duration'], 4.01)
         self.assertEqual(self.rally_base.summary[0]['nb_tests'], 2)
         self.assertEqual(self.rally_base.summary[0]['nb_success'], 1)
 
+    def test_is_successful_false(self):
+        with mock.patch('__builtin__.super') as mock_super:
+            self.rally_base.summary = [{"task_status": True},
+                                       {"task_status": False}]
+            self.assertEqual(self.rally_base.is_successful(),
+                             testcase.TestCase.EX_TESTCASE_FAILED)
+            mock_super(rally.RallyBase, self).is_successful.assert_not_called()
+
+    def test_is_successful_true(self):
+        with mock.patch('__builtin__.super') as mock_super:
+            mock_super(rally.RallyBase, self).is_successful.return_value = 424
+            self.rally_base.summary = [{"task_status": True},
+                                       {"task_status": True}]
+            self.assertEqual(self.rally_base.is_successful(), 424)
+            mock_super(rally.RallyBase, self).is_successful.assert_called()
+
 
 if __name__ == "__main__":
     logging.disable(logging.CRITICAL)