Bugfix: HA kill process recovery has a conflict 13/58713/4
authorrexlee8776 <limingjiang@huawei.com>
Tue, 19 Jun 2018 10:40:20 +0000 (10:40 +0000)
committerrexlee8776 <limingjiang@huawei.com>
Tue, 26 Jun 2018 01:45:46 +0000 (01:45 +0000)
It happens in Nokia SUT when running in the Plugfest. The problem happens when
the start_process start to recover the killed process (like nova-api), but the
self-cured mechanism already recovered. And somehow it lead to a conflict and
has problems.
So the recover of the HA attack-recover should be improved to only recover it
when it needs to.

JIRA: YARDSTICK-1222

Change-Id: I1acb5a7d59d6fe4e0de0b0c5942fa89e051dd1ff
Signed-off-by: rexlee8776 <limingjiang@huawei.com>
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py

index 76721e3..7f976fd 100755 (executable)
@@ -29,6 +29,7 @@ class ServiceHA(base.Scenario):
         self.context_cfg = context_cfg
         self.setup_done = False
         self.data = {}
+        self.sla_pass = False
 
     def setup(self):
         """scenario setup"""
@@ -69,26 +70,28 @@ class ServiceHA(base.Scenario):
         self.monitorMgr.wait_monitors()
         LOG.info("Monitor '%s' stop!", self.__scenario_type__)
 
-        sla_pass = self.monitorMgr.verify_SLA()
+        self.sla_pass = self.monitorMgr.verify_SLA()
         service_not_found = False
         for k, v in self.data.items():
             if v == 0:
-                sla_pass = False
+                self.sla_pass = False
                 service_not_found = True
                 LOG.info("The service process (%s) not found in the host envrioment", k)
 
-        result['sla_pass'] = 1 if sla_pass else 0
+        result['sla_pass'] = 1 if self.sla_pass else 0
         self.monitorMgr.store_result(result)
 
         self.verify_SLA(
-            sla_pass, ("a service process was not found in the host "
-                       "environment" if service_not_found
-                       else "MonitorMgr.verify_SLA() failed"))
+            self.sla_pass, ("a service process was not found in the host "
+                            "environment" if service_not_found
+                            else "MonitorMgr.verify_SLA() failed"))
 
     def teardown(self):
         """scenario teardown"""
-        for attacker in self.attackers:
-            attacker.recover()
+        # only recover when sla not pass
+        if not self.sla_pass:
+            for attacker in self.attackers:
+                attacker.recover()
 
 
 def _test():    # pragma: no cover
index cf1e76d..ec0e597 100644 (file)
@@ -43,6 +43,13 @@ class ServicehaTestCase(unittest.TestCase):
         }
         sla = {"outage_time": 5}
         self.args = {"options": options, "sla": sla}
+        self.test__serviceha = serviceha.ServiceHA(self.args, self.ctx)
+
+    def test___init__(self):
+
+        self.assertEqual(self.test__serviceha.data, {})
+        self.assertFalse(self.test__serviceha.setup_done)
+        self.assertFalse(self.test__serviceha.sla_pass)
 
     # NOTE(elfoley): This should be split into test_setup and test_run
     # NOTE(elfoley): This should explicitly test outcomes and states
@@ -90,3 +97,18 @@ class ServicehaTestCase(unittest.TestCase):
         ret = {}
         self.assertRaises(y_exc.SLAValidationError, p.run, ret)
         self.assertEqual(ret['sla_pass'], 0)
+
+    @mock.patch.object(serviceha, 'baseattacker')
+    @mock.patch.object(serviceha, 'basemonitor')
+    def test__serviceha_no_teardown_when_sla_pass(self, mock_monitor,
+                                                  *args):
+        p = serviceha.ServiceHA(self.args, self.ctx)
+        p.setup()
+        self.assertTrue(p.setup_done)
+        mock_monitor.MonitorMgr().verify_SLA.return_value = True
+        ret = {}
+        p.run(ret)
+        attacker = mock.Mock()
+        p.attackers = [attacker]
+        p.teardown()
+        attacker.recover.assert_not_called()