Merge "Add unit test file for DurationRunner"
[yardstick.git] / yardstick / benchmark / runners / iteration.py
index 88158ee..4c88f36 100644 (file)
 """
 
 from __future__ import absolute_import
-import os
-import multiprocessing
+
 import logging
-import traceback
+import multiprocessing
 import time
+import traceback
+
+import os
 
 from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 
@@ -69,15 +72,17 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             data = {}
             errors = ""
 
+            benchmark.pre_run_wait_time(interval)
+
             try:
                 result = method(data)
-            except AssertionError as assertion:
+            except y_exc.SLAValidationError as error:
                 # SLA validation failed in scenario, determine what to do now
                 if sla_action == "assert":
                     raise
                 elif sla_action == "monitor":
-                    LOG.warning("SLA validation failed: %s", assertion.args)
-                    errors = assertion.args
+                    LOG.warning("SLA validation failed: %s", error.args)
+                    errors = error.args
                 elif sla_action == "rate-control":
                     try:
                         scenario_cfg['options']['rate']
@@ -88,16 +93,16 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                     scenario_cfg['options']['rate'] -= delta
                     sequence = 1
                     continue
-            except Exception as e:
+            except Exception:  # pylint: disable=broad-except
                 errors = traceback.format_exc()
-                LOG.exception(e)
+                LOG.exception("")
             else:
                 if result:
                     # add timeout for put so we don't block test
                     # if we do timeout we don't care about dropping individual KPIs
                     output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-            time.sleep(interval)
+            benchmark.post_run_wait_time(interval)
 
             benchmark_output = {
                 'timestamp': time.time(),
@@ -151,7 +156,9 @@ If the scenario ends before the time has elapsed, it will be started again.
     __execution_type__ = 'Iteration'
 
     def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
         self.process = multiprocessing.Process(
+            name=name,
             target=_worker_process,
             args=(self.result_queue, cls, method, scenario_cfg,
                   context_cfg, self.aborted, self.output_queue))