Merge "Test case description and configuration file for yardstick_tc090:Control Node...
[yardstick.git] / yardstick / benchmark / runners / base.py
index 0e02927..99386a4 100755 (executable)
 # rally/rally/benchmark/runners/base.py
 
 from __future__ import absolute_import
-import importlib
+
 import logging
 import multiprocessing
 import subprocess
 import time
 import traceback
+from subprocess import CalledProcessError
 
-from oslo_config import cfg
+import importlib
+
+from six.moves.queue import Empty
 
 import yardstick.common.utils as utils
 from yardstick.benchmark.scenarios import base as base_scenario
@@ -32,36 +35,13 @@ from yardstick.dispatcher.base import Base as DispatcherBase
 
 log = logging.getLogger(__name__)
 
-CONF = cfg.CONF
-
-
-def _output_serializer_main(filename, queue):
-    '''entrypoint for the singleton subprocess writing to outfile
-    Use of this process enables multiple instances of a scenario without
-    messing up the output file.
-    '''
-    config = {}
-    config["type"] = CONF.dispatcher.capitalize()
-    config["file_path"] = filename
-    dispatcher = DispatcherBase.get(config)
-
-    while True:
-        # blocks until data becomes available
-        record = queue.get()
-        if record == '_TERMINATE_':
-            dispatcher.flush_result_data()
-            break
-        else:
-            dispatcher.record_result_data(record)
-
 
 def _execute_shell_command(command):
-    '''execute shell script with error handling'''
+    """execute shell script with error handling"""
     exitcode = 0
-    output = []
     try:
         output = subprocess.check_output(command, shell=True)
-    except Exception:
+    except CalledProcessError:
         exitcode = -1
         output = traceback.format_exc()
         log.error("exec command '%s' error:\n ", command)
@@ -71,7 +51,7 @@ def _execute_shell_command(command):
 
 
 def _single_action(seconds, command, queue):
-    '''entrypoint for the single action process'''
+    """entrypoint for the single action process"""
     log.debug("single action, fires after %d seconds (from now)", seconds)
     time.sleep(seconds)
     log.debug("single action: executing command: '%s'", command)
@@ -85,7 +65,7 @@ def _single_action(seconds, command, queue):
 
 
 def _periodic_action(interval, command, queue):
-    '''entrypoint for the periodic action process'''
+    """entrypoint for the periodic action process"""
     log.debug("periodic action, fires every: %d seconds", interval)
     time_spent = 0
     while True:
@@ -102,13 +82,11 @@ def _periodic_action(interval, command, queue):
 
 
 class Runner(object):
-    queue = None
-    dump_process = None
     runners = []
 
     @staticmethod
     def get_cls(runner_type):
-        '''return class of specified type'''
+        """return class of specified type"""
         for runner in utils.itersubclasses(Runner):
             if runner_type == runner.__execution_type__:
                 return runner
@@ -116,62 +94,37 @@ class Runner(object):
 
     @staticmethod
     def get_types():
-        '''return a list of known runner type (class) names'''
+        """return a list of known runner type (class) names"""
         types = []
         for runner in utils.itersubclasses(Runner):
             types.append(runner)
         return types
 
     @staticmethod
-    def get(config):
+    def get(runner_cfg):
         """Returns instance of a scenario runner for execution type.
         """
-        # if there is no runner, start the output serializer subprocess
-        if len(Runner.runners) == 0:
-            log.debug("Starting dump process file '%s'",
-                      config["output_filename"])
-            Runner.queue = multiprocessing.Queue()
-            Runner.dump_process = multiprocessing.Process(
-                target=_output_serializer_main,
-                name="Dumper",
-                args=(config["output_filename"], Runner.queue))
-            Runner.dump_process.start()
-
-        return Runner.get_cls(config["type"])(config, Runner.queue)
-
-    @staticmethod
-    def release_dump_process():
-        '''Release the dumper process'''
-        log.debug("Stopping dump process")
-        if Runner.dump_process:
-            Runner.queue.put('_TERMINATE_')
-            Runner.dump_process.join()
-            Runner.dump_process = None
+        return Runner.get_cls(runner_cfg["type"])(runner_cfg)
 
     @staticmethod
     def release(runner):
-        '''Release the runner'''
+        """Release the runner"""
         if runner in Runner.runners:
             Runner.runners.remove(runner)
 
-        # if this was the last runner, stop the output serializer subprocess
-        if len(Runner.runners) == 0:
-            Runner.release_dump_process()
-
     @staticmethod
     def terminate(runner):
-        '''Terminate the runner'''
+        """Terminate the runner"""
         if runner.process and runner.process.is_alive():
             runner.process.terminate()
 
     @staticmethod
     def terminate_all():
-        '''Terminate all runners (subprocesses)'''
-        log.debug("Terminating all runners")
+        """Terminate all runners (subprocesses)"""
+        log.debug("Terminating all runners", exc_info=True)
 
         # release dumper process as some errors before any runner is created
-        if len(Runner.runners) == 0:
-            Runner.release_dump_process()
+        if not Runner.runners:
             return
 
         for runner in Runner.runners:
@@ -185,16 +138,19 @@ class Runner(object):
                 runner.periodic_action_process = None
             Runner.release(runner)
 
-    def __init__(self, config, queue):
+    def __init__(self, config):
+        self.task_id = None
+        self.case_name = None
         self.config = config
         self.periodic_action_process = None
-        self.result_queue = queue
+        self.output_queue = multiprocessing.Queue()
+        self.result_queue = multiprocessing.Queue()
         self.process = None
         self.aborted = multiprocessing.Event()
         Runner.runners.append(self)
 
     def run_post_stop_action(self):
-        '''run a potentially configured post-stop action'''
+        """run a potentially configured post-stop action"""
         if "post-stop-action" in self.config:
             command = self.config["post-stop-action"]["command"]
             log.debug("post stop action: command: '%s'", command)
@@ -206,6 +162,9 @@ class Runner(object):
             log.debug("post-stop data: \n%s", data)
             self.result_queue.put({'post-stop-action-data': data})
 
+    def _run_benchmark(self, cls, method_name, scenario_cfg, context_cfg):
+        raise NotImplementedError
+
     def run(self, scenario_cfg, context_cfg):
         scenario_type = scenario_cfg["type"]
         class_name = base_scenario.Scenario.get(scenario_type)
@@ -215,6 +174,8 @@ class Runner(object):
         cls = getattr(module, path_split[-1])
 
         self.config['object'] = class_name
+        self.case_name = scenario_cfg['tc']
+        self.task_id = scenario_cfg['task_id']
         self.aborted.clear()
 
         # run a potentially configured pre-start action
@@ -250,14 +211,64 @@ class Runner(object):
         self._run_benchmark(cls, "run", scenario_cfg, context_cfg)
 
     def abort(self):
-        '''Abort the execution of a scenario'''
+        """Abort the execution of a scenario"""
         self.aborted.set()
 
-    def join(self, timeout=None):
+    QUEUE_JOIN_INTERVAL = 5
+
+    def poll(self, timeout=QUEUE_JOIN_INTERVAL):
         self.process.join(timeout)
+        return self.process.exitcode
+
+    def join(self, outputs, result, interval=QUEUE_JOIN_INTERVAL):
+        while self.process.exitcode is None:
+            # drain the queue while we are running otherwise we won't terminate
+            outputs.update(self.get_output())
+            result.extend(self.get_result())
+            self.process.join(interval)
+        # drain after the process has exited
+        outputs.update(self.get_output())
+        result.extend(self.get_result())
+
+        self.process.terminate()
         if self.periodic_action_process:
+            self.periodic_action_process.join(1)
             self.periodic_action_process.terminate()
             self.periodic_action_process = None
 
         self.run_post_stop_action()
         return self.process.exitcode
+
+    def get_output(self):
+        result = {}
+        while not self.output_queue.empty():
+            log.debug("output_queue size %s", self.output_queue.qsize())
+            try:
+                result.update(self.output_queue.get(True, 1))
+            except Empty:
+                pass
+        return result
+
+    def get_result(self):
+        result = []
+
+        dispatcher = self.config['output_config']['DEFAULT']['dispatcher']
+        output_in_influxdb = 'influxdb' in dispatcher
+
+        while not self.result_queue.empty():
+            log.debug("result_queue size %s", self.result_queue.qsize())
+            try:
+                one_record = self.result_queue.get(True, 1)
+            except Empty:
+                pass
+            else:
+                if output_in_influxdb:
+                    self._output_to_influxdb(one_record)
+
+                result.append(one_record)
+        return result
+
+    def _output_to_influxdb(self, record):
+        dispatchers = DispatcherBase.get(self.config['output_config'])
+        dispatcher = next((d for d in dispatchers if d.__dispatcher_type__ == 'Influxdb'))
+        dispatcher.upload_one_record(record, self.case_name, '', task_id=self.task_id)