Get HA test case results on failure 47/55247/5
authorMiikka Koistinen <miikka.koistinen@nokia.com>
Mon, 9 Apr 2018 14:40:39 +0000 (17:40 +0300)
committerMiikka Koistinen <miikka.koistinen@nokia.com>
Mon, 16 Apr 2018 09:33:19 +0000 (12:33 +0300)
Now, yardstick HA test cases (which have 'sla action: monitor') do
not output results if the test case SLA validation fails.
This patch modifies the task runner and the serviceHA scenario so
that during an SLA validation failure the test result data is
captured correctly.

Do similar changes in the general scenario.

JIRA: YARDSTICK-1115

Change-Id: I648a8b229600c0ad089320ac3f803698f73aa800
Signed-off-by: Miikka Koistinen <miikka.koistinen@nokia.com>
yardstick/benchmark/core/task.py
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py

index 955b8ca..697cc00 100644 (file)
@@ -112,9 +112,9 @@ class Task(object):     # pragma: no cover
                 continue
 
             try:
-                data = self._run(tasks[i]['scenarios'],
-                                 tasks[i]['run_in_parallel'],
-                                 output_config)
+                success, data = self._run(tasks[i]['scenarios'],
+                                          tasks[i]['run_in_parallel'],
+                                          output_config)
             except KeyboardInterrupt:
                 raise
             except Exception:  # pylint: disable=broad-except
@@ -123,9 +123,15 @@ class Task(object):     # pragma: no cover
                 testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
                                                     'tc_data': []}
             else:
-                LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
-                testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
-                                                    'tc_data': data}
+                if success:
+                    LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+                    testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+                                                        'tc_data': data}
+                else:
+                    LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+                              exc_info=True)
+                    testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+                                                        'tc_data': data}
 
             if args.keep_deploy:
                 # keep deployment, forget about stack
@@ -240,6 +246,7 @@ class Task(object):     # pragma: no cover
 
         background_runners = []
 
+        task_success = True
         result = []
         # Start all background scenarios
         for scenario in filter(_is_background_scenario, scenarios):
@@ -258,8 +265,8 @@ class Task(object):     # pragma: no cover
             for runner in runners:
                 status = runner_join(runner, background_runners, self.outputs, result)
                 if status != 0:
-                    raise RuntimeError(
-                        "{0} runner status {1}".format(runner.__execution_type__, status))
+                    LOG.error("%s runner status %s", runner.__execution_type__, status)
+                    task_success = False
                 LOG.info("Runner ended")
         else:
             # run serially
@@ -271,8 +278,8 @@ class Task(object):     # pragma: no cover
                         LOG.error('Scenario NO.%s: "%s" ERROR!',
                                   scenarios.index(scenario) + 1,
                                   scenario.get('type'))
-                        raise RuntimeError(
-                            "{0} runner status {1}".format(runner.__execution_type__, status))
+                        LOG.error("%s runner status %s", runner.__execution_type__, status)
+                        task_success = False
                     LOG.info("Runner ended")
 
         # Abort background runners
@@ -289,7 +296,7 @@ class Task(object):     # pragma: no cover
             base_runner.Runner.release(runner)
 
             print("Background task ended")
-        return result
+        return task_success, result
 
     def atexit_handler(self):
         """handler for process termination"""
index 9ac5547..1fadd25 100644 (file)
@@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.intermediate_variables = {}
-        self.pass_flag = True
 
     def setup(self):
         self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario):
                     step['actionType'], step['actionKey'])
                 if actionRollbacker:
                     self.director.executionSteps.append(actionRollbacker)
-            except Exception:
+            except Exception:  # pylint: disable=broad-except
                 LOG.exception("Exception")
                 LOG.debug(
                     "\033[91m exception when running step: %s .... \033[0m",
@@ -59,31 +58,16 @@ class ScenarioGeneral(base.Scenario):
         self.director.stopMonitors()
 
         verify_result = self.director.verify()
-
-        self.director.store_result(result)
-
         for k, v in self.director.data.items():
             if v == 0:
                 result['sla_pass'] = 0
                 verify_result = False
-                self.pass_flag = False
-                LOG.info(
-                    "\033[92m The service process not found in the host \
-envrioment, the HA test case NOT pass")
+                LOG.info("\033[92m The service process (%s) not found in the host environment", k)
 
-        if verify_result:
-            result['sla_pass'] = 1
-            LOG.info(
-                "\033[92m Congratulations, "
-                "the HA test case PASS! \033[0m")
-        else:
-            result['sla_pass'] = 0
-            self.pass_flag = False
-            LOG.info(
-                "\033[91m Aoh, the HA test case FAIL,"
-                "please check the detail debug information! \033[0m")
+        result['sla_pass'] = 1 if verify_result else 0
+        self.director.store_result(result)
+
+        assert verify_result is True, "The HA test case NOT passed"
 
     def teardown(self):
         self.director.knockoff()
-
-        assert self.pass_flag, "The HA test case NOT passed"
index 6d0d812..dcd0fe5 100755 (executable)
@@ -29,7 +29,6 @@ class ServiceHA(base.Scenario):
         self.context_cfg = context_cfg
         self.setup_done = False
         self.data = {}
-        self.pass_flag = True
 
     def setup(self):
         """scenario setup"""
@@ -73,18 +72,12 @@ class ServiceHA(base.Scenario):
         sla_pass = self.monitorMgr.verify_SLA()
         for k, v in self.data.items():
             if v == 0:
-                result['sla_pass'] = 0
-                self.pass_flag = False
-                LOG.info("The service process not found in the host envrioment, \
-the HA test case NOT pass")
-                return
+                sla_pass = False
+                LOG.info("The service process (%s) not found in the host envrioment", k)
+
+        result['sla_pass'] = 1 if sla_pass else 0
         self.monitorMgr.store_result(result)
-        if sla_pass:
-            result['sla_pass'] = 1
-            LOG.info("The HA test case PASS the SLA")
-        else:
-            result['sla_pass'] = 0
-            self.pass_flag = False
+
         assert sla_pass is True, "The HA test case NOT pass the SLA"
 
         return
@@ -94,8 +87,6 @@ the HA test case NOT pass")
         for attacker in self.attackers:
             attacker.recover()
 
-        assert self.pass_flag, "The HA test case NOT passed"
-
 
 def _test():    # pragma: no cover
     """internal test function"""
index 45840d5..d1172d5 100644 (file)
@@ -14,7 +14,8 @@ from yardstick.benchmark.scenarios.availability import scenario_general
 
 class ScenarioGeneralTestCase(unittest.TestCase):
 
-    def setUp(self):
+    @mock.patch.object(scenario_general, 'Director')
+    def setUp(self, *args):
         self.scenario_cfg = {
             'type': "general_scenario",
             'options': {
@@ -36,32 +37,28 @@ class ScenarioGeneralTestCase(unittest.TestCase):
             }
         }
         self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None)
-
-        self._mock_director = mock.patch.object(scenario_general, 'Director')
-        self.mock_director = self._mock_director.start()
-        self.addCleanup(self._stop_mock)
-
-    def _stop_mock(self):
-        self._mock_director.stop()
+        self.instance.setup()
+        self.instance.director.verify.return_value = True
 
     def test_scenario_general_all_successful(self):
-        self.instance.setup()
-        self.instance.run({})
+
+        ret = {}
+        self.instance.run(ret)
         self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 1)
 
     def test_scenario_general_exception(self):
-        mock_obj = mock.Mock()
-        mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
-        self.instance.director = mock_obj
+        self.instance.director.createActionPlayer.side_effect = KeyError('Wrong')
         self.instance.director.data = {}
-        self.instance.run({})
+        ret = {}
+        self.instance.run(ret)
         self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 1)
 
     def test_scenario_general_case_fail(self):
-        mock_obj = mock.Mock()
-        mock_obj.verify.return_value = False
-        self.instance.director = mock_obj
+        self.instance.director.verify.return_value = False
         self.instance.director.data = {}
-        self.instance.run({})
-        self.instance.pass_flag = True
+        ret = {}
+        self.assertRaises(AssertionError, self.instance.run, ret)
         self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 0)
index 6bb3ec6..dd656fb 100644 (file)
@@ -60,15 +60,16 @@ class ServicehaTestCase(unittest.TestCase):
         p.setup()
         self.assertTrue(p.setup_done)
 
-    # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
-    #     p = serviceha.ServiceHA(self.args, self.ctx)
+    @mock.patch.object(serviceha, 'baseattacker')
+    @mock.patch.object(serviceha, 'basemonitor')
+    def test__serviceha_run_sla_error(self, mock_monitor, *args):
+        p = serviceha.ServiceHA(self.args, self.ctx)
 
-        p.setup()
-        self.assertEqual(p.setup_done, True)
+        p.setup()
+        self.assertEqual(p.setup_done, True)
 
-    #     result = {}
-    #     result["outage_time"] = 10
-    #     mock_monitor.Monitor().get_result.return_value = result
+        mock_monitor.MonitorMgr().verify_SLA.return_value = False
 
-    #     ret = {}
-    #     self.assertRaises(AssertionError, p.run, ret)
+        ret = {}
+        self.assertRaises(AssertionError, p.run, ret)
+        self.assertEqual(ret['sla_pass'], 0)