Merge "TC042 bug fix"
[yardstick.git] / yardstick / benchmark / runners / iteration.py
index 973bb9a..20d6da0 100644 (file)
 """
 
 from __future__ import absolute_import
-import os
-import multiprocessing
+
 import logging
-import traceback
+import multiprocessing
 import time
+import traceback
+
+import os
 
 from yardstick.benchmark.runners import base
 
 LOG = logging.getLogger(__name__)
 
 
+QUEUE_PUT_TIMEOUT = 10
+
+
 def _worker_process(queue, cls, method_name, scenario_cfg,
                     context_cfg, aborted, output_queue):
 
@@ -53,10 +58,6 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
     method = getattr(benchmark, method_name)
 
-    queue.put({'runner_id': runner_cfg['runner_id'],
-               'scenario_cfg': scenario_cfg,
-               'context_cfg': context_cfg})
-
     sla_action = None
     if "sla" in scenario_cfg:
         sla_action = scenario_cfg["sla"].get("action", "assert")
@@ -70,6 +71,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             data = {}
             errors = ""
 
+            benchmark.pre_run_wait_time(interval)
+
             try:
                 result = method(data)
             except AssertionError as assertion:
@@ -89,14 +92,16 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                     scenario_cfg['options']['rate'] -= delta
                     sequence = 1
                     continue
-            except Exception as e:
+            except Exception:  # pylint: disable=broad-except
                 errors = traceback.format_exc()
-                LOG.exception(e)
+                LOG.exception("")
             else:
                 if result:
-                    output_queue.put(result)
+                    # add timeout for put so we don't block test
+                    # if we do timeout we don't care about dropping individual KPIs
+                    output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-            time.sleep(interval)
+            benchmark.post_run_wait_time(interval)
 
             benchmark_output = {
                 'timestamp': time.time(),
@@ -105,10 +110,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 'errors': errors
             }
 
-            record = {'runner_id': runner_cfg['runner_id'],
-                      'benchmark': benchmark_output}
-
-            queue.put(record)
+            queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
 
             LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                       {"runner": runner_cfg["runner_id"],
@@ -121,7 +123,18 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 LOG.info("worker END")
                 break
     if "teardown" in run_step:
-        benchmark.teardown()
+        try:
+            benchmark.teardown()
+        except Exception:
+            # catch any exception in teardown and convert to simple exception
+            # never pass exceptions back to multiprocessing, because some exceptions can
+            # be unpicklable
+            # https://bugs.python.org/issue9400
+            LOG.exception("")
+            raise SystemExit(1)
+
+    LOG.debug("queue.qsize() = %s", queue.qsize())
+    LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
 
 
 class IterationRunner(base.Runner):
@@ -142,7 +155,9 @@ If the scenario ends before the time has elapsed, it will be started again.
     __execution_type__ = 'Iteration'
 
     def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+        name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
         self.process = multiprocessing.Process(
+            name=name,
             target=_worker_process,
             args=(self.result_queue, cls, method, scenario_cfg,
                   context_cfg, self.aborted, self.output_queue))