Add arguments to the traffic profile render
[yardstick.git] / yardstick / benchmark / runners / base.py
index 57903eb..99386a4 100755 (executable)
 # rally/rally/benchmark/runners/base.py
 
 from __future__ import absolute_import
-import importlib
+
 import logging
 import multiprocessing
 import subprocess
 import time
 import traceback
+from subprocess import CalledProcessError
+
+import importlib
+
+from six.moves.queue import Empty
 
 import yardstick.common.utils as utils
 from yardstick.benchmark.scenarios import base as base_scenario
+from yardstick.dispatcher.base import Base as DispatcherBase
 
 log = logging.getLogger(__name__)
 
@@ -33,10 +39,9 @@ log = logging.getLogger(__name__)
 def _execute_shell_command(command):
     """execute shell script with error handling"""
     exitcode = 0
-    output = []
     try:
         output = subprocess.check_output(command, shell=True)
-    except Exception:
+    except CalledProcessError:
         exitcode = -1
         output = traceback.format_exc()
         log.error("exec command '%s' error:\n ", command)
@@ -47,7 +52,6 @@ def _execute_shell_command(command):
 
 def _single_action(seconds, command, queue):
     """entrypoint for the single action process"""
-    queue.cancel_join_thread()
     log.debug("single action, fires after %d seconds (from now)", seconds)
     time.sleep(seconds)
     log.debug("single action: executing command: '%s'", command)
@@ -62,7 +66,6 @@ def _single_action(seconds, command, queue):
 
 def _periodic_action(interval, command, queue):
     """entrypoint for the periodic action process"""
-    queue.cancel_join_thread()
     log.debug("periodic action, fires every: %d seconds", interval)
     time_spent = 0
     while True:
@@ -118,7 +121,7 @@ class Runner(object):
     @staticmethod
     def terminate_all():
         """Terminate all runners (subprocesses)"""
-        log.debug("Terminating all runners")
+        log.debug("Terminating all runners", exc_info=True)
 
         # release dumper process as some errors before any runner is created
         if not Runner.runners:
@@ -136,12 +139,12 @@ class Runner(object):
             Runner.release(runner)
 
     def __init__(self, config):
+        self.task_id = None
+        self.case_name = None
         self.config = config
         self.periodic_action_process = None
         self.output_queue = multiprocessing.Queue()
-        self.output_queue.cancel_join_thread()
         self.result_queue = multiprocessing.Queue()
-        self.result_queue.cancel_join_thread()
         self.process = None
         self.aborted = multiprocessing.Event()
         Runner.runners.append(self)
@@ -171,6 +174,8 @@ class Runner(object):
         cls = getattr(module, path_split[-1])
 
         self.config['object'] = class_name
+        self.case_name = scenario_cfg['tc']
+        self.task_id = scenario_cfg['task_id']
         self.aborted.clear()
 
         # run a potentially configured pre-start action
@@ -209,9 +214,25 @@ class Runner(object):
         """Abort the execution of a scenario"""
         self.aborted.set()
 
-    def join(self, timeout=None):
+    QUEUE_JOIN_INTERVAL = 5
+
+    def poll(self, timeout=QUEUE_JOIN_INTERVAL):
         self.process.join(timeout)
+        return self.process.exitcode
+
+    def join(self, outputs, result, interval=QUEUE_JOIN_INTERVAL):
+        while self.process.exitcode is None:
+            # drain the queue while we are running otherwise we won't terminate
+            outputs.update(self.get_output())
+            result.extend(self.get_result())
+            self.process.join(interval)
+        # drain after the process has exited
+        outputs.update(self.get_output())
+        result.extend(self.get_result())
+
+        self.process.terminate()
         if self.periodic_action_process:
+            self.periodic_action_process.join(1)
             self.periodic_action_process.terminate()
             self.periodic_action_process = None
 
@@ -221,11 +242,33 @@ class Runner(object):
     def get_output(self):
         result = {}
         while not self.output_queue.empty():
-            result.update(self.output_queue.get())
+            log.debug("output_queue size %s", self.output_queue.qsize())
+            try:
+                result.update(self.output_queue.get(True, 1))
+            except Empty:
+                pass
         return result
 
     def get_result(self):
         result = []
+
+        dispatcher = self.config['output_config']['DEFAULT']['dispatcher']
+        output_in_influxdb = 'influxdb' in dispatcher
+
         while not self.result_queue.empty():
-            result.append(self.result_queue.get())
+            log.debug("result_queue size %s", self.result_queue.qsize())
+            try:
+                one_record = self.result_queue.get(True, 1)
+            except Empty:
+                pass
+            else:
+                if output_in_influxdb:
+                    self._output_to_influxdb(one_record)
+
+                result.append(one_record)
         return result
+
+    def _output_to_influxdb(self, record):
+        dispatchers = DispatcherBase.get(self.config['output_config'])
+        dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
+        dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)