Fix log typos in benchmark/scenarios/availability 57/56757/5
authorMiikka Koistinen <miikka.koistinen@nokia.com>
Mon, 7 May 2018 09:12:33 +0000 (12:12 +0300)
committerMiikka Koistinen <miikka.koistinen@nokia.com>
Tue, 8 May 2018 08:23:17 +0000 (11:23 +0300)
This commit fixes multiple log message typos and all the emerged pylint
errors.

MonitorProcess/MonitorOpenstackCmd.verify_SLA() repeat already logged
information, so these prints are removed.

JIRA: YARDSTICK-1145

Change-Id: Ifef26e4b4ff7766089caec24785511969c2d663e
Signed-off-by: Miikka Koistinen <miikka.koistinen@nokia.com>
yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
yardstick/benchmark/scenarios/availability/director.py
yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
yardstick/benchmark/scenarios/availability/serviceha.py

index cb171ea..7f1136c 100644 (file)
@@ -42,29 +42,28 @@ class ProcessAttacker(BaseAttacker):
 
     def check(self):
         with open(self.check_script, "r") as stdin_file:
-            exit_status, stdout, stderr = self.connection.execute(
+            _, stdout, stderr = self.connection.execute(
                 "sudo /bin/sh -s {0}".format(self.service_name),
                 stdin=stdin_file)
 
         if stdout:
-            LOG.info("check the environment success!")
+            LOG.info("Check the environment success!")
             return int(stdout.strip('\n'))
         else:
-            LOG.error(
-                "the host environment is error, stdout:%s, stderr:%s",
-                stdout, stderr)
+            LOG.error("Error checking the host environment, "
+                      "stdout:%s, stderr:%s", stdout, stderr)
         return False
 
     def inject_fault(self):
         with open(self.inject_script, "r") as stdin_file:
-            exit_status, stdout, stderr = self.connection.execute(
+            self.connection.execute(
                 "sudo /bin/sh -s {0}".format(self.service_name),
                 stdin=stdin_file)
 
     def recover(self):
         with open(self.recovery_script, "r") as stdin_file:
-            exit_status, stdout, stderr = self.connection.execute(
+            exit_status, _, _ = self.connection.execute(
                 "sudo /bin/bash -s {0} ".format(self.service_name),
                 stdin=stdin_file)
         if exit_status:
-            LOG.info("Fail to restart service!")
+            LOG.info("Failed to restart service: %s", self.recovery_script)
index d03d044..d67a16b 100644 (file)
@@ -71,7 +71,7 @@ class BaseAttacker(object):
         for attacker_cls in utils.itersubclasses(BaseAttacker):
             if attacker_type == attacker_cls.__attacker_type__:
                 return attacker_cls
-        raise RuntimeError("No such runner_type %s" % attacker_type)
+        raise RuntimeError("No such runner_type: %s" % attacker_type)
 
     def get_script_fullpath(self, path):
         base_path = os.path.dirname(attacker_conf_path)
index 71690c1..6cc0cb2 100644 (file)
@@ -40,7 +40,7 @@ class Director(object):
         nodes = self.context_cfg.get("nodes", None)
         # setup attackers
         if "attackers" in self.scenario_cfg["options"]:
-            LOG.debug("start init attackers...")
+            LOG.debug("Start init attackers...")
             attacker_cfgs = self.scenario_cfg["options"]["attackers"]
             self.attackerMgr = baseattacker.AttackerMgr()
             self.data = self.attackerMgr.init_attackers(attacker_cfgs,
@@ -48,19 +48,19 @@ class Director(object):
 
         # setup monitors
         if "monitors" in self.scenario_cfg["options"]:
-            LOG.debug("start init monitors...")
+            LOG.debug("Start init monitors...")
             monitor_cfgs = self.scenario_cfg["options"]["monitors"]
             self.monitorMgr = basemonitor.MonitorMgr(self.data)
             self.monitorMgr.init_monitors(monitor_cfgs, nodes)
         # setup operations
         if "operations" in self.scenario_cfg["options"]:
-            LOG.debug("start init operations...")
+            LOG.debug("Start init operations...")
             operation_cfgs = self.scenario_cfg["options"]["operations"]
             self.operationMgr = baseoperation.OperationMgr()
             self.operationMgr.init_operations(operation_cfgs, nodes)
         # setup result checker
         if "resultCheckers" in self.scenario_cfg["options"]:
-            LOG.debug("start init resultCheckers...")
+            LOG.debug("Start init resultCheckers...")
             result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"]
             self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
             self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
@@ -69,7 +69,7 @@ class Director(object):
         if intermediate_variables is None:
             intermediate_variables = {}
         LOG.debug(
-            "the type of current action is %s, the key is %s", type, key)
+            "The type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionplayers.AttackerPlayer(self.attackerMgr[key], intermediate_variables)
         if type == ActionType.MONITOR:
@@ -80,17 +80,17 @@ class Director(object):
         if type == ActionType.OPERATION:
             return actionplayers.OperationPlayer(self.operationMgr[key],
                                                  intermediate_variables)
-        LOG.debug("something run when creatactionplayer")
+        LOG.debug("The type is not recognized by createActionPlayer")
 
     def createActionRollbacker(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s", type, key)
+            "The type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
         if type == ActionType.OPERATION:
             return actionrollbackers.OperationRollbacker(
                 self.operationMgr[key])
-        LOG.debug("no rollbacker created for %s", key)
+        LOG.debug("No rollbacker created for key: %s", key)
 
     def verify(self):
         result = True
@@ -99,7 +99,7 @@ class Director(object):
         if hasattr(self, 'resultCheckerMgr'):
             result &= self.resultCheckerMgr.verify()
         if result:
-            LOG.debug("monitors are passed")
+            LOG.debug("Monitor results are passed")
         return result
 
     def stopMonitors(self):
@@ -107,12 +107,12 @@ class Director(object):
             self.monitorMgr.wait_monitors()
 
     def knockoff(self):
-        LOG.debug("knock off ....")
+        LOG.debug("Knock off ....")
         while self.executionSteps:
             singleStep = self.executionSteps.pop()
             singleStep.rollback()
 
     def store_result(self, result):
-        LOG.debug("store result ....")
+        LOG.debug("Store result ....")
         if hasattr(self, 'monitorMgr'):
             self.monitorMgr.store_result(result)
index 50a63f5..f6004c7 100644 (file)
@@ -103,7 +103,7 @@ class BaseMonitor(multiprocessing.Process):
         for monitor in utils.itersubclasses(BaseMonitor):
             if monitor_type == monitor.__monitor_type__:
                 return monitor
-        raise RuntimeError("No such monitor_type %s" % monitor_type)
+        raise RuntimeError("No such monitor_type: %s" % monitor_type)
 
     def get_script_fullpath(self, path):
         base_path = os.path.dirname(monitor_conf_path)
index d0551bf..3b36c76 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
     output = []
     try:
         output = subprocess.check_output(command, shell=True)
-    except Exception:
+    except Exception:  # pylint: disable=broad-except
         exitcode = -1
         LOG.error("exec command '%s' error:\n ", command, exc_info=True)
 
@@ -45,7 +45,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
             self.connection = ssh.SSH.from_node(host,
                                                 defaults={"user": "root"})
             self.connection.wait(timeout=600)
-            LOG.debug("ssh host success!")
+            LOG.debug("ssh host (%s) success!", str(host))
 
         self.check_script = self.get_script_fullpath(
             "ha_tools/check_openstack_cmd.bash")
@@ -61,22 +61,20 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
                 self.cmd = self.cmd + " --insecure"
 
     def monitor_func(self):
-        exit_status = 0
         exit_status, stdout = _execute_shell_command(self.cmd)
-        LOG.debug("Execute command '%s' and the stdout is:\n%s", self.cmd, stdout)
+        LOG.debug("Executed command '%s'. "
+                  "The stdout is:\n%s", self.cmd, stdout)
         if exit_status:
             return False
         return True
 
     def verify_SLA(self):
         outage_time = self._result.get('outage_time', None)
-        LOG.debug("the _result:%s", self._result)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time > max_outage_time:
             LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
-            LOG.info("the sla is passed")
             return True
 
 
@@ -97,7 +95,7 @@ def _test():    # pragma: no cover
     }
     monitor_configs.append(config)
 
-    p = basemonitor.MonitorMgr()
+    p = basemonitor.MonitorMgr({})
     p.init_monitors(monitor_configs, context)
     p.start_monitors()
     p.wait_monitors()
index dce69f4..971bae1 100644 (file)
@@ -29,7 +29,7 @@ class MultiMonitor(basemonitor.BaseMonitor):
         monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
 
         monitor_number = self._config.get("monitor_number", 1)
-        for i in range(monitor_number):
+        for _ in range(monitor_number):
             monitor_ins = monitor_cls(self._config, self._context,
                                       self.monitor_data)
             self.monitors.append(monitor_ins)
@@ -70,7 +70,8 @@ class MultiMonitor(basemonitor.BaseMonitor):
         elif "max_recover_time" in self._config["sla"]:
             max_outage_time = self._config["sla"]["max_recover_time"]
         else:
-            raise RuntimeError("monitor max_outage_time config is not found")
+            raise RuntimeError("'max_outage_time' or 'max_recover_time' "
+                               "config is not found")
         self._result = {"outage_time": outage_time}
 
         if outage_time > max_outage_time:
index b0f6f8e..8d2f263 100644 (file)
@@ -25,14 +25,14 @@ class MonitorProcess(basemonitor.BaseMonitor):
 
         self.connection = ssh.SSH.from_node(host, defaults={"user": "root"})
         self.connection.wait(timeout=600)
-        LOG.debug("ssh host success!")
+        LOG.debug("ssh host (%s) success!", str(host))
         self.check_script = self.get_script_fullpath(
             "ha_tools/check_process_python.bash")
         self.process_name = self._config["process_name"]
 
     def monitor_func(self):
         with open(self.check_script, "r") as stdin_file:
-            exit_status, stdout, stderr = self.connection.execute(
+            _, stdout, _ = self.connection.execute(
                 "sudo /bin/sh -s {0}".format(self.process_name),
                 stdin=stdin_file)
 
@@ -45,14 +45,12 @@ class MonitorProcess(basemonitor.BaseMonitor):
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_recover_time"]
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+            LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
-            LOG.info("the sla is passed")
             return True
 
 
@@ -73,7 +71,7 @@ def _test():    # pragma: no cover
     }
     monitor_configs.append(config)
 
-    p = basemonitor.MonitorMgr()
+    p = basemonitor.MonitorMgr({})
     p.init_monitors(monitor_configs, context)
     p.start_monitors()
     p.wait_monitors()
index dcd0fe5..42941c6 100755 (executable)
@@ -34,7 +34,7 @@ class ServiceHA(base.Scenario):
         """scenario setup"""
         nodes = self.context_cfg.get("nodes", None)
         if nodes is None:
-            LOG.error("the nodes info is none")
+            LOG.error("The nodes info is none")
             return
 
         self.attackers = []
@@ -57,17 +57,17 @@ class ServiceHA(base.Scenario):
     def run(self, result):
         """execute the benchmark"""
         if not self.setup_done:
-            LOG.error("The setup not finished!")
+            LOG.error("The setup is not finished!")
             return
 
         self.monitorMgr.start_monitors()
-        LOG.info("HA monitor start!")
+        LOG.info("Monitor '%s' start!", self.__scenario_type__)
 
         for attacker in self.attackers:
             attacker.inject_fault()
 
         self.monitorMgr.wait_monitors()
-        LOG.info("HA monitor stop!")
+        LOG.info("Monitor '%s' stop!", self.__scenario_type__)
 
         sla_pass = self.monitorMgr.verify_SLA()
         for k, v in self.data.items():