To avoid breaking tests when monitor is set
[yardstick.git] / yardstick / benchmark / runners / iteration.py
index 03dcfae..e38ed37 100755 (executable)
@@ -21,37 +21,41 @@ from yardstick.benchmark.runners import base
 LOG = logging.getLogger(__name__)
 
 
-def _worker_process(queue, cls, method_name, context, scenario_args):
+def _worker_process(queue, cls, method_name, scenario_cfg,
+                    context_cfg, aborted):
 
     sequence = 1
 
-    interval = context.get("interval", 1)
-    iterations = context.get("iterations", 1)
+    runner_cfg = scenario_cfg['runner']
+
+    interval = runner_cfg.get("interval", 1)
+    iterations = runner_cfg.get("iterations", 1)
     LOG.info("worker START, iterations %d times, class %s", iterations, cls)
 
-    context['runner'] = os.getpid()
+    runner_cfg['runner_id'] = os.getpid()
 
-    benchmark = cls(context)
+    benchmark = cls(scenario_cfg, context_cfg)
     benchmark.setup()
     method = getattr(benchmark, method_name)
 
-    record_context = {"runner": context["runner"],
-                      "host": context["host"]}
+    queue.put({'runner_id': runner_cfg['runner_id'],
+               'scenario_cfg': scenario_cfg,
+               'context_cfg': context_cfg})
 
     sla_action = None
-    if "sla" in scenario_args:
-        sla_action = scenario_args["sla"].get("action", "assert")
+    if "sla" in scenario_cfg:
+        sla_action = scenario_cfg["sla"].get("action", "assert")
 
     while True:
 
         LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
-                  {"runner": context["runner"], "sequence": sequence})
+                  {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
         errors = ""
 
         try:
-            data = method(scenario_args)
+            method(data)
         except AssertionError as assertion:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
@@ -72,15 +76,18 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
             'errors': errors
         }
 
-        queue.put({'context': record_context, 'sargs': scenario_args,
-                   'benchmark': benchmark_output})
+        record = {'runner_id': runner_cfg['runner_id'],
+                  'benchmark': benchmark_output}
+
+        queue.put(record)
 
         LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
-                  {"runner": context["runner"], "sequence": sequence})
+                  {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
 
-        if (errors and sla_action is None) or (sequence > iterations):
+        if (errors and sla_action is None) or \
+                (sequence > iterations or aborted.is_set()):
             LOG.info("worker END")
             break
 
@@ -104,8 +111,9 @@ If the scenario ends before the time has elapsed, it will be started again.
     '''
     __execution_type__ = 'Iteration'
 
-    def _run_benchmark(self, cls, method, scenario_args):
+    def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
         self.process = multiprocessing.Process(
             target=_worker_process,
-            args=(self.result_queue, cls, method, self.config, scenario_args))
+            args=(self.result_queue, cls, method, scenario_cfg,
+                  context_cfg, self.aborted))
         self.process.start()