switch logging to proper usage 47/24947/3
authorRoss Brattain <ross.b.brattain@intel.com>
Fri, 25 Nov 2016 22:21:37 +0000 (14:21 -0800)
committerRoss Brattain <ross.b.brattain@intel.com>
Thu, 1 Dec 2016 00:48:34 +0000 (16:48 -0800)
The logging methods do string interpolation themselves

From the reference:

https://docs.python.org/2/library/logging.html#logging.Logger.debug

  Logger.debug(msg, *args, **kwargs)

Logs a message with level DEBUG on this logger. The msg is the message format string, and the args are the arguments which are merged into msg using the string formatting operator. (Note that this means that you can use keywords in the format string, together with a single dictionary argument.)

There are two keyword arguments in kwargs which are inspected: exc_info which, if it does not evaluate as false, causes exception information to be added to the logging message. If an exception tuple (in the format returned by sys.exc_info()) is provided, it is used; otherwise, sys.exc_info() is called to get the exception informatio

The reason logging does string interpolation itselfs is to implement deferred interpolation.

String interpolation involves evaluating arguments, so it can introduce significant computation.  The logging module tries to be smart about deferring interpolation until the last possible moment.

The logging methods check isEnabledFor for the log level and won't interpolate if the level is not enabled.

https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L1178

     def warning(self, msg, *args, **kwargs):
        if self.isEnabledFor(WARNING):
            self._log(WARNING, msg, args, **kwargs)

logging actually waits to interpolate the string in LogRecord.getMessage()

https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L328
        if self.args:
            msg = msg % self.args

Change-Id: Ie09efe0a66881e19bd8119caa376075e605627a2
Signed-off-by: Ross Brattain <ross.b.brattain@intel.com>
31 files changed:
yardstick/benchmark/contexts/node.py
yardstick/benchmark/runners/arithmetic.py
yardstick/benchmark/runners/base.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/runners/sequence.py
yardstick/benchmark/scenarios/availability/actionrollbackers.py
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
yardstick/benchmark/scenarios/availability/director.py
yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
yardstick/benchmark/scenarios/availability/operation/baseoperation.py
yardstick/benchmark/scenarios/availability/operation/operation_general.py
yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/benchmark/scenarios/compute/cachestat.py
yardstick/benchmark/scenarios/compute/cpuload.py
yardstick/benchmark/scenarios/compute/cyclictest.py
yardstick/benchmark/scenarios/compute/memload.py
yardstick/benchmark/scenarios/networking/netutilization.py
yardstick/benchmark/scenarios/storage/storperf.py
yardstick/dispatcher/http.py
yardstick/dispatcher/influxdb.py
yardstick/ssh.py

index 67db442..78bce82 100644 (file)
@@ -83,7 +83,7 @@ class NodeContext(Context):
             return None
         elif len(nodes) > 1:
             LOG.error("Duplicate nodes!!!")
-            LOG.error("Nodes: %r" % nodes)
+            LOG.error("Nodes: %r", nodes)
             sys.exit(-1)
 
         # A clone is created in order to avoid affecting the
index 74a236f..69ea915 100755 (executable)
@@ -93,7 +93,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
         if aborted.is_set():
             break
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         for i, value in enumerate(comb_values):
@@ -109,7 +109,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -129,7 +129,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index 2374992..8f3f75f 100755 (executable)
@@ -63,7 +63,7 @@ def _execute_shell_command(command):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        log.error("exec command '%s' error:\n " % command)
+        log.error("exec command '%s' error:\n ", command)
         log.error(traceback.format_exc())
 
     return exitcode, output
@@ -76,10 +76,10 @@ def _single_action(seconds, command, queue):
     log.debug("single action: executing command: '%s'", command)
     ret_code, data = _execute_shell_command(command)
     if ret_code < 0:
-        log.error("single action error! command:%s" % command)
+        log.error("single action error! command:%s", command)
         queue.put({'single-action-data': data})
         return
-    log.debug("single action data: \n%s" % data)
+    log.debug("single action data: \n%s", data)
     queue.put({'single-action-data': data})
 
 
@@ -96,7 +96,7 @@ def _periodic_action(interval, command, queue):
             log.error("periodic action error! command:%s", command)
             queue.put({'periodic-action-data': data})
             break
-        log.debug("periodic action data: \n%s" % data)
+        log.debug("periodic action data: \n%s", data)
         queue.put({'periodic-action-data': data})
 
 
@@ -127,7 +127,7 @@ class Runner(object):
         """
         # if there is no runner, start the output serializer subprocess
         if len(Runner.runners) == 0:
-            log.debug("Starting dump process file '%s'" %
+            log.debug("Starting dump process file '%s'",
                       config["output_filename"])
             Runner.queue = multiprocessing.Queue()
             Runner.dump_process = multiprocessing.Process(
@@ -196,13 +196,13 @@ class Runner(object):
         '''run a potentially configured post-stop action'''
         if "post-stop-action" in self.config:
             command = self.config["post-stop-action"]["command"]
-            log.debug("post stop action: command: '%s'" % command)
+            log.debug("post stop action: command: '%s'", command)
             ret_code, data = _execute_shell_command(command)
             if ret_code < 0:
                 log.error("post action error! command:%s", command)
                 self.result_queue.put({'post-stop-action-data': data})
                 return
-            log.debug("post-stop data: \n%s" % data)
+            log.debug("post-stop data: \n%s", data)
             self.result_queue.put({'post-stop-action-data': data})
 
     def run(self, scenario_cfg, context_cfg):
@@ -219,13 +219,13 @@ class Runner(object):
         # run a potentially configured pre-start action
         if "pre-start-action" in self.config:
             command = self.config["pre-start-action"]["command"]
-            log.debug("pre start action: command: '%s'" % command)
+            log.debug("pre start action: command: '%s'", command)
             ret_code, data = _execute_shell_command(command)
             if ret_code < 0:
                 log.error("pre-start action error! command:%s", command)
                 self.result_queue.put({'pre-start-action-data': data})
                 return
-            log.debug("pre-start data: \n%s" % data)
+            log.debug("pre-start data: \n%s", data)
             self.result_queue.put({'pre-start-action-data': data})
 
         if "single-shot-action" in self.config:
index 1f51f51..1412c0c 100644 (file)
@@ -58,7 +58,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     start = time.time()
     while True:
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
@@ -71,7 +71,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -91,7 +91,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index b23b32b..3a839b6 100644 (file)
@@ -60,7 +60,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     if "run" in run_step:
         while True:
 
-            LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+            LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                       {"runner": runner_cfg["runner_id"],
                        "sequence": sequence})
 
@@ -74,7 +74,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 if sla_action == "assert":
                     raise
                 elif sla_action == "monitor":
-                    LOG.warning("SLA validation failed: %s" % assertion.args)
+                    LOG.warning("SLA validation failed: %s", assertion.args)
                     errors = assertion.args
             except Exception as e:
                 errors = traceback.format_exc()
@@ -94,7 +94,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
             queue.put(record)
 
-            LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+            LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                       {"runner": runner_cfg["runner_id"],
                        "sequence": sequence})
 
index fe53412..3b06e2a 100644 (file)
@@ -67,7 +67,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     for value in sequence_values:
         options[arg_name] = value
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
@@ -80,7 +80,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -100,7 +100,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index 4b732a1..38f57d4 100644 (file)
@@ -28,8 +28,8 @@ class AttackerRollbacker(ActionRollbacker):
 
     def rollback(self):
         LOG.debug(
-            "\033[93m recovering attacker %s \033[0m"
-            % (self.underlyingAttacker.key))
+            "\033[93m recovering attacker %s \033[0m",
+            self.underlyingAttacker.key)
         self.underlyingAttacker.recover()
 
 
@@ -40,6 +40,6 @@ class OperationRollbacker(ActionRollbacker):
 
     def rollback(self):
         LOG.debug(
-            "\033[93m rollback operation %s \033[0m"
-            % (self.underlyingOperation.key))
+            "\033[93m rollback operation %s \033[0m",
+            self.underlyingOperation.key)
         self.underlyingOperation.rollback()
index 6561f6b..3b1f8ef 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command, stdin=None):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n " % command)
+        LOG.error("exec command '%s' error:\n ", command)
         LOG.error(traceback.format_exc())
 
     return exitcode, output
@@ -34,7 +34,7 @@ class BaremetalAttacker(BaseAttacker):
     __attacker_type__ = 'bare-metal-down'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -65,10 +65,10 @@ class BaremetalAttacker(BaseAttacker):
             "/bin/sh -s {0} -W 10".format(self.host_ip),
             stdin=open(self.check_script, "r"))
 
-        LOG.debug("check ret: %s out:%s err:%s" %
-                  (exit_status, stdout, stderr))
+        LOG.debug("check ret: %s out:%s err:%s",
+                  exit_status, stdout, stderr)
         if not stdout or "running" not in stdout:
-            LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+            LOG.info("the host (ipmi_ip:%s) is not running!", self.ipmi_ip)
             return False
 
         return True
@@ -76,8 +76,8 @@ class BaremetalAttacker(BaseAttacker):
     def inject_fault(self):
         exit_status, stdout, stderr = self.connection.execute(
             "shutdown -h now")
-        LOG.debug("inject fault ret: %s out:%s err:%s" %
-                  (exit_status, stdout, stderr))
+        LOG.debug("inject fault ret: %s out:%s err:%s",
+                  exit_status, stdout, stderr)
         if not exit_status:
             LOG.info("inject fault success")
 
@@ -91,7 +91,7 @@ class BaremetalAttacker(BaseAttacker):
             ssh_port = host.get("ssh_port", ssh.DEFAULT_PORT)
             pwd = host.get("pwd", None)
 
-            LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+            LOG.debug("jump_host ip:%s user:%s", ip, user)
             self.jump_connection = ssh.SSH(user, ip, password=pwd,
                                            port=ssh_port)
             self.jump_connection.wait(timeout=600)
index 5e7716e..a452c37 100644 (file)
@@ -20,7 +20,7 @@ class GeneralAttacker(BaseAttacker):
     __attacker_type__ = 'general-attacker'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -79,8 +79,8 @@ class GeneralAttacker(BaseAttacker):
                       .format(stdout))
         else:
             LOG.error(
-                "the inject_fault's error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the inject_fault's error, stdout:%s, stderr:%s",
+                stdout, stderr)
 
     def recover(self):
         if "rollback_parameter" in self._config:
index 0a844f5..2ccc231 100644 (file)
@@ -19,7 +19,7 @@ class ProcessAttacker(BaseAttacker):
     __attacker_type__ = 'kill-process'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -54,8 +54,8 @@ class ProcessAttacker(BaseAttacker):
             return True
         else:
             LOG.error(
-                "the host envrioment is error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the host envrioment is error, stdout:%s, stderr:%s",
+                stdout, stderr)
         return False
 
     def inject_fault(self):
index 78276ef..f96e577 100644 (file)
@@ -26,7 +26,7 @@ class AttackerMgr(object):
         self._attacker_list = []
 
     def init_attackers(self, attacker_cfgs, context):
-        LOG.debug("attackerMgr confg: %s" % attacker_cfgs)
+        LOG.debug("attackerMgr confg: %s", attacker_cfgs)
 
         for cfg in attacker_cfgs:
             attacker_cls = BaseAttacker.get_attacker_cls(cfg)
index 267933d..104c683 100644 (file)
@@ -63,7 +63,7 @@ class Director(object):
 
     def createActionPlayer(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s" % (type, key))
+            "the type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionplayers.AttackerPlayer(self.attackerMgr[key])
         if type == ActionType.MONITOR:
@@ -77,13 +77,13 @@ class Director(object):
 
     def createActionRollbacker(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s" % (type, key))
+            "the type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
         if type == ActionType.OPERATION:
             return actionrollbackers.OperationRollbacker(
                 self.operationMgr[key])
-        LOG.debug("no rollbacker created for %s" % (key))
+        LOG.debug("no rollbacker created for %s", key)
 
     def verify(self):
         result = True
index d26c99c..38d1c4e 100644 (file)
@@ -27,7 +27,7 @@ class MonitorMgr(object):
         self._monitor_list = []
 
     def init_monitors(self, monitor_cfgs, context):
-        LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+        LOG.debug("monitorMgr config: %s", monitor_cfgs)
 
         for monitor_cfg in monitor_cfgs:
             monitor_type = monitor_cfg["monitor_type"]
@@ -87,7 +87,7 @@ class BaseMonitor(multiprocessing.Process):
         return os.path.join(base_path, path)
 
     def run(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
 
         self.setup()
         monitor_time = self._config.get("monitor_time", 0)
@@ -140,7 +140,7 @@ class BaseMonitor(multiprocessing.Process):
     def wait_monitor(self):
         self.join()
         self._result = self._queue.get()
-        LOG.debug("the monitor result:%s" % self._result)
+        LOG.debug("the monitor result:%s", self._result)
 
     def setup(self):  # pragma: no cover
         pass
index b55cc31..366d16e 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n " % command)
+        LOG.error("exec command '%s' error:\n ", command)
         LOG.error(traceback.format_exc())
 
     return exitcode, output
@@ -62,8 +62,8 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
                 "/bin/bash -s '{0}'".format(self.cmd),
                 stdin=open(self.check_script, "r"))
 
-            LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
-                      (exit_status, stdout, stderr))
+            LOG.debug("the ret stats: %s stdout: %s stderr: %s",
+                      exit_status, stdout, stderr)
         else:
             exit_status, stdout = _execute_shell_command(self.cmd)
         if exit_status:
@@ -72,10 +72,10 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
 
     def verify_SLA(self):
         outage_time = self._result.get('outage_time', None)
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time > max_outage_time:
-            LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             LOG.info("the sla is passed")
index f9ddb25..359cde6 100644 (file)
@@ -61,14 +61,14 @@ class GeneralMonitor(basemonitor.BaseMonitor):
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time is None:
             LOG.error("There is no outage_time in monitor result.")
             return False
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             return True
index 403ec4d..a88b8d4 100644 (file)
@@ -39,17 +39,17 @@ class MonitorProcess(basemonitor.BaseMonitor):
             "/bin/sh -s {0}".format(self.process_name),
             stdin=open(self.check_script, "r"))
         if not stdout or int(stdout) <= 0:
-            LOG.info("the process (%s) is not running!" % self.process_name)
+            LOG.info("the process (%s) is not running!", self.process_name)
             return False
 
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_recover_time"]
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             return True
index e776e87..80efd1b 100644 (file)
@@ -26,7 +26,7 @@ class OperationMgr(object):
         self._operation_list = []
 
     def init_operations(self, operation_cfgs, context):
-        LOG.debug("operationMgr confg: %s" % operation_cfgs)
+        LOG.debug("operationMgr confg: %s", operation_cfgs)
         for cfg in operation_cfgs:
             operation_type = cfg['operation_type']
             operation_cls = BaseOperation.get_operation_cls(operation_type)
index aa28472..b3a20c3 100644 (file)
@@ -19,7 +19,7 @@ class GeneralOperaion(BaseOperation):
     __operation__type__ = "general-operation"
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -67,8 +67,8 @@ class GeneralOperaion(BaseOperation):
             LOG.debug("success,the operation's output is: {0}".format(stdout))
         else:
             LOG.error(
-                "the operation's error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the operation's error, stdout:%s, stderr:%s",
+                stdout, stderr)
 
     def rollback(self):
         if "rollback_parameter" in self._config:
index 1bdb9f2..a24f26e 100644 (file)
@@ -26,7 +26,7 @@ class ResultCheckerMgr(object):
         self._result_checker_list = []
 
     def init_ResultChecker(self, resultchecker_cfgs, context):
-        LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+        LOG.debug("resultcheckerMgr confg: %s", resultchecker_cfgs)
 
         for cfg in resultchecker_cfgs:
             resultchecker_type = cfg['checker_type']
index ae896c2..8c9d160 100644 (file)
@@ -20,7 +20,7 @@ class GeneralResultChecker(BaseResultChecker):
     __result_checker__type__ = "general-result-checker"
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -67,7 +67,7 @@ class GeneralResultChecker(BaseResultChecker):
             LOG.debug("action script of the operation is: {0}"
                       .format(self.verify_script))
 
-        LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+        LOG.debug("exit_status ,stdout : %s ,%s", exit_status, stdout)
         if exit_status == 0 and stdout:
             self.actualResult = stdout
             LOG.debug("verifying resultchecker: {0}".format(self.key))
@@ -104,6 +104,6 @@ class GeneralResultChecker(BaseResultChecker):
             LOG.error(stderr)
 
         LOG.debug(
-            "verifying resultchecker: {0},the result is : {1}"
-            .format(self.key, self.success))
+            "verifying resultchecker: %s,the result is : %s", self.key,
+            self.success)
         return self.success
index 0a128aa..b064c67 100644 (file)
@@ -22,7 +22,7 @@ class ScenarioGeneral(base.Scenario):
 
     def __init__(self, scenario_cfg, context_cfg):
         LOG.debug(
-            "scenario_cfg:%s context_cfg:%s" % (scenario_cfg, context_cfg))
+            "scenario_cfg:%s context_cfg:%s", scenario_cfg, context_cfg)
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
 
index 10f2c4f..46a197c 100755 (executable)
@@ -21,8 +21,8 @@ class ServiceHA(base.Scenario):
 
     def __init__(self, scenario_cfg, context_cfg):
         LOG.debug(
-            "scenario_cfg:%s context_cfg:%s" %
-            (scenario_cfg, context_cfg))
+            "scenario_cfg:%s context_cfg:%s",
+            scenario_cfg, context_cfg)
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.setup_done = False
index 1177020..25300dd 100644 (file)
@@ -92,7 +92,7 @@ class CACHEstat(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index a7fae44..9d71038 100644 (file)
@@ -96,7 +96,7 @@ class CPULoad(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status != 0:
             raise RuntimeError("Failed executing command: ",
index 6a1afe2..a6c4d95 100644 (file)
@@ -69,14 +69,14 @@ class Cyclictest(base.Scenario):
         rpm_dir = setup_options["rpm_dir"]
         script_dir = setup_options["script_dir"]
         image_dir = setup_options["image_dir"]
-        LOG.debug("Send RPMs from %s to workspace %s" %
-                  (rpm_dir, self.WORKSPACE))
+        LOG.debug("Send RPMs from %s to workspace %s",
+                  rpm_dir, self.WORKSPACE)
         client.put(rpm_dir, self.WORKSPACE, recursive=True)
-        LOG.debug("Send scripts from %s to workspace %s" %
-                  (script_dir, self.WORKSPACE))
+        LOG.debug("Send scripts from %s to workspace %s",
+                  script_dir, self.WORKSPACE)
         client.put(script_dir, self.WORKSPACE, recursive=True)
-        LOG.debug("Send guest image from %s to workspace %s" %
-                  (image_dir, self.WORKSPACE))
+        LOG.debug("Send guest image from %s to workspace %s",
+                  image_dir, self.WORKSPACE)
         client.put(image_dir, self.WORKSPACE, recursive=True)
 
     def _connect_host(self):
@@ -102,7 +102,7 @@ class Cyclictest(base.Scenario):
         self.guest.wait(timeout=600)
 
     def _run_setup_cmd(self, client, cmd):
-        LOG.debug("Run cmd: %s" % cmd)
+        LOG.debug("Run cmd: %s", cmd)
         status, stdout, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
index 48088f8..e1ba93d 100644 (file)
@@ -61,7 +61,7 @@ class MEMLoad(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index ecde756..1ea92cc 100644 (file)
@@ -83,7 +83,7 @@ class NetUtilization(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on target."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index d39c23a..06c329d 100644 (file)
@@ -75,8 +75,8 @@ class StorPerf(base.Scenario):
         setup_query_content = json.loads(setup_query.content)
         if setup_query_content["stack_created"]:
             self.setup_done = True
-            LOG.debug("stack_created: %s"
-                      setup_query_content["stack_created"])
+            LOG.debug("stack_created: %s",
+                      setup_query_content["stack_created"])
 
     def setup(self):
         """Set the configuration."""
@@ -88,8 +88,8 @@ class StorPerf(base.Scenario):
             if env_argument in self.options:
                 env_args[env_argument] = self.options[env_argument]
 
-        LOG.info("Creating a stack on node %s with parameters %s" %
-                 (self.target, env_args))
+        LOG.info("Creating a stack on node %s with parameters %s",
+                 self.target, env_args)
         setup_res = requests.post('http://%s:5000/api/v1.0/configurations'
                                   % self.target, json=env_args)
 
@@ -99,7 +99,7 @@ class StorPerf(base.Scenario):
             raise RuntimeError("Failed to create a stack, error message:",
                                setup_res_content["message"])
         elif setup_res.status_code == 200:
-            LOG.info("stack_id: %s" % setup_res_content["stack_id"])
+            LOG.info("stack_id: %s", setup_res_content["stack_id"])
 
             while not self.setup_done:
                 self._query_setup_state()
@@ -148,7 +148,7 @@ class StorPerf(base.Scenario):
             if job_argument in self.options:
                 job_args[job_argument] = self.options[job_argument]
 
-        LOG.info("Starting a job with parameters %s" % job_args)
+        LOG.info("Starting a job with parameters %s", job_args)
         job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
                                 json=job_args)
 
@@ -159,7 +159,7 @@ class StorPerf(base.Scenario):
                                job_res_content["message"])
         elif job_res.status_code == 200:
             job_id = job_res_content["job_id"]
-            LOG.info("Started job id: %s..." % job_id)
+            LOG.info("Started job id: %s...", job_id)
 
             time.sleep(self.timeout)
             terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
index 2298d00..98e772d 100644 (file)
@@ -81,14 +81,14 @@ class HttpDispatcher(DispatchBase):
                 case_name = v["scenario_cfg"]["tc"]
                 break
         if case_name == "":
-            LOG.error('Test result : %s' % json.dumps(self.result))
+            LOG.error('Test result : %s', json.dumps(self.result))
             LOG.error('The case_name cannot be found, no data will be posted.')
             return
 
         self.result["case_name"] = case_name
 
         try:
-            LOG.debug('Test result : %s' % json.dumps(self.result))
+            LOG.debug('Test result : %s', json.dumps(self.result))
             res = requests.post(self.target,
                                 data=json.dumps(self.result),
                                 headers=self.headers,
index 8673253..fc9f3e9 100644 (file)
@@ -127,7 +127,7 @@ class InfluxdbDispatcher(DispatchBase):
         return make_lines(msg).encode('utf-8')
 
     def record_result_data(self, data):
-        LOG.debug('Test result : %s' % json.dumps(data))
+        LOG.debug('Test result : %s', json.dumps(data))
         self.raw_result.append(data)
         if self.target == '':
             # if the target was not set, do not do anything
@@ -148,13 +148,13 @@ class InfluxdbDispatcher(DispatchBase):
             return 0
 
         if self.tc == "":
-            LOG.error('Test result : %s' % json.dumps(data))
+            LOG.error('Test result : %s', json.dumps(data))
             LOG.error('The case_name cannot be found, no data will be posted.')
             return -1
 
         try:
             line = self._data_to_line_protocol(data)
-            LOG.debug('Test result line format : %s' % line)
+            LOG.debug('Test result line format : %s', line)
             res = requests.post(self.influxdb_url,
                                 data=line,
                                 auth=(self.username, self.password),
@@ -171,5 +171,5 @@ class InfluxdbDispatcher(DispatchBase):
         return 0
 
     def flush_result_data(self):
-        LOG.debug('Test result all : %s' % json.dumps(self.raw_result))
+        LOG.debug('Test result all : %s', json.dumps(self.raw_result))
         return 0
index d287b4d..5d5719b 100644 (file)
@@ -197,14 +197,14 @@ class SSH(object):
 
             if session.recv_ready():
                 data = session.recv(4096)
-                self.log.debug("stdout: %r" % data)
+                self.log.debug("stdout: %r", data)
                 if stdout is not None:
                     stdout.write(data)
                 continue
 
             if session.recv_stderr_ready():
                 stderr_data = session.recv_stderr(4096)
-                self.log.debug("stderr: %r" % stderr_data)
+                self.log.debug("stderr: %r", stderr_data)
                 if stderr is not None:
                     stderr.write(stderr_data)
                 continue
@@ -267,10 +267,10 @@ class SSH(object):
             try:
                 return self.execute("uname")
             except (socket.error, SSHError) as e:
-                self.log.debug("Ssh is still unavailable: %r" % e)
+                self.log.debug("Ssh is still unavailable: %r", e)
                 time.sleep(interval)
             if time.time() > (start_time + timeout):
-                raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+                raise SSHTimeout("Timeout waiting for '%s'", self.host)
 
     def put(self, files, remote_path=b'.', recursive=False):
         client = self._get_client()