Merge "Add ODL noHA testcase"
[yardstick.git] / yardstick / benchmark / core / task.py
index 2c3edfe..e7acde6 100644 (file)
@@ -7,7 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-
 import sys
 import os
 from collections import OrderedDict
@@ -28,10 +27,10 @@ from yardstick.benchmark.runners import base as base_runner
 from yardstick.common.constants import CONF_FILE
 from yardstick.common.yaml_loader import yaml_load
 from yardstick.dispatcher.base import Base as DispatcherBase
-from yardstick.common.task_template import TaskTemplate
-from yardstick.common import utils
 from yardstick.common import constants
-from yardstick.common import exceptions
+from yardstick.common import exceptions as y_exc
+from yardstick.common import task_template
+from yardstick.common import utils
 from yardstick.common.html_template import report_template
 
 output_file_default = "/tmp/yardstick.out"
@@ -55,7 +54,7 @@ class Task(object):     # pragma: no cover
         out_types = [s.strip() for s in dispatchers.split(',')]
         output_config['DEFAULT']['dispatcher'] = out_types
 
-    def start(self, args):
+    def start(self, args, **kwargs):  # pylint: disable=unused-argument
         """Start a benchmark scenario."""
 
         atexit.register(self.atexit_handler)
@@ -87,8 +86,7 @@ class Task(object):     # pragma: no cover
 
         if args.suite:
             # 1.parse suite, return suite_params info
-            task_files, task_args, task_args_fnames = \
-                parser.parse_suite()
+            task_files, task_args, task_args_fnames = parser.parse_suite()
         else:
             task_files = [parser.path]
             task_args = [args.task_args]
@@ -101,32 +99,33 @@ class Task(object):     # pragma: no cover
             sys.exit(0)
 
         testcases = {}
-        # parse task_files
-        for i in range(0, len(task_files)):
-            one_task_start_time = time.time()
-            parser.path = task_files[i]
-            scenarios, run_in_parallel, meet_precondition, contexts = \
-                parser.parse_task(self.task_id, task_args[i],
-                                  task_args_fnames[i])
-
-            self.contexts.extend(contexts)
+        tasks = self._parse_tasks(parser, task_files, args, task_args,
+                                  task_args_fnames)
 
-            if not meet_precondition:
-                LOG.info("meet_precondition is %s, please check envrionment",
-                         meet_precondition)
+        # Execute task files.
+        for i, _ in enumerate(task_files):
+            one_task_start_time = time.time()
+            self.contexts.extend(tasks[i]['contexts'])
+            if not tasks[i]['meet_precondition']:
+                LOG.info('"meet_precondition" is %s, please check environment',
+                         tasks[i]['meet_precondition'])
                 continue
 
-            case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
             try:
-                data = self._run(scenarios, run_in_parallel, args.output_file)
+                data = self._run(tasks[i]['scenarios'],
+                                 tasks[i]['run_in_parallel'],
+                                 output_config)
             except KeyboardInterrupt:
                 raise
             except Exception:  # pylint: disable=broad-except
-                LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
-                testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+                LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+                          exc_info=True)
+                testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+                                                    'tc_data': []}
             else:
-                LOG.info('Testcase: "%s" SUCCESS!!!', case_name)
-                testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
+                LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+                testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+                                                    'tc_data': data}
 
             if args.keep_deploy:
                 # keep deployment, forget about stack
@@ -149,9 +148,8 @@ class Task(object):     # pragma: no cover
         LOG.info("Total finished in %d secs",
                  total_end_time - total_start_time)
 
-        scenario = scenarios[0]
-        LOG.info("To generate report, execute command "
-                 "'yardstick report generate %(task_id)s %(tc)s'", scenario)
+        LOG.info('To generate report, execute command "yardstick report '
+                 'generate %s <YAML_NAME>"', self.task_id)
         LOG.info("Task ALL DONE, exiting")
         return result
 
@@ -230,11 +228,12 @@ class Task(object):     # pragma: no cover
 
     def _do_output(self, output_config, result):
         dispatchers = DispatcherBase.get(output_config)
+        dispatchers = (d for d in dispatchers if d.__dispatcher_type__ != 'Influxdb')
 
         for dispatcher in dispatchers:
             dispatcher.flush_result_data(result)
 
-    def _run(self, scenarios, run_in_parallel, output_file):
+    def _run(self, scenarios, run_in_parallel, output_config):
         """Deploys context and calls runners"""
         for context in self.contexts:
             context.deploy()
@@ -245,14 +244,14 @@ class Task(object):     # pragma: no cover
         # Start all background scenarios
         for scenario in filter(_is_background_scenario, scenarios):
             scenario["runner"] = dict(type="Duration", duration=1000000000)
-            runner = self.run_one_scenario(scenario, output_file)
+            runner = self.run_one_scenario(scenario, output_config)
             background_runners.append(runner)
 
         runners = []
         if run_in_parallel:
             for scenario in scenarios:
                 if not _is_background_scenario(scenario):
-                    runner = self.run_one_scenario(scenario, output_file)
+                    runner = self.run_one_scenario(scenario, output_config)
                     runners.append(runner)
 
             # Wait for runners to finish
@@ -261,12 +260,12 @@ class Task(object):     # pragma: no cover
                 if status != 0:
                     raise RuntimeError(
                         "{0} runner status {1}".format(runner.__execution_type__, status))
-                LOG.info("Runner ended, output in %s", output_file)
+                LOG.info("Runner ended")
         else:
             # run serially
             for scenario in scenarios:
                 if not _is_background_scenario(scenario):
-                    runner = self.run_one_scenario(scenario, output_file)
+                    runner = self.run_one_scenario(scenario, output_config)
                     status = runner_join(runner, background_runners, self.outputs, result)
                     if status != 0:
                         LOG.error('Scenario NO.%s: "%s" ERROR!',
@@ -274,7 +273,7 @@ class Task(object):     # pragma: no cover
                                   scenario.get('type'))
                         raise RuntimeError(
                             "{0} runner status {1}".format(runner.__execution_type__, status))
-                    LOG.info("Runner ended, output in %s", output_file)
+                    LOG.info("Runner ended")
 
         # Abort background runners
         for runner in background_runners:
@@ -311,10 +310,34 @@ class Task(object):     # pragma: no cover
         else:
             return op
 
-    def run_one_scenario(self, scenario_cfg, output_file):
+    def _parse_tasks(self, parser, task_files, args, task_args,
+                     task_args_fnames):
+        tasks = []
+
+        # Parse task_files.
+        for i, _ in enumerate(task_files):
+            parser.path = task_files[i]
+            tasks.append(parser.parse_task(self.task_id, task_args[i],
+                                           task_args_fnames[i]))
+            tasks[i]['case_name'] = os.path.splitext(
+                os.path.basename(task_files[i]))[0]
+
+        if args.render_only:
+            utils.makedirs(args.render_only)
+            for idx, task in enumerate(tasks):
+                output_file_name = os.path.abspath(os.path.join(
+                    args.render_only,
+                    '{0:03d}-{1}.yml'.format(idx, task['case_name'])))
+                utils.write_file(output_file_name, task['rendered'])
+
+            sys.exit(0)
+
+        return tasks
+
+    def run_one_scenario(self, scenario_cfg, output_config):
         """run one scenario using context"""
         runner_cfg = scenario_cfg["runner"]
-        runner_cfg['output_filename'] = output_file
+        runner_cfg['output_config'] = output_config
 
         options = scenario_cfg.get('options', {})
         scenario_cfg['options'] = self._parse_options(options)
@@ -476,33 +499,42 @@ class TaskParser(object):       # pragma: no cover
 
         return valid_task_files, valid_task_args, valid_task_args_fnames
 
-    def parse_task(self, task_id, task_args=None, task_args_file=None):
-        """parses the task file and return an context and scenario instances"""
-        LOG.info("Parsing task config: %s", self.path)
+    def _render_task(self, task_args, task_args_file):
+        """Render the input task with the given arguments
 
+        :param task_args: (dict) arguments to render the task
+        :param task_args_file: (str) file containing the arguments to render
+                               the task
+        :return: (str) task file rendered
+        """
         try:
             kw = {}
             if task_args_file:
                 with open(task_args_file) as f:
-                    kw.update(parse_task_args("task_args_file", f.read()))
-            kw.update(parse_task_args("task_args", task_args))
+                    kw.update(parse_task_args('task_args_file', f.read()))
+            kw.update(parse_task_args('task_args', task_args))
         except TypeError:
-            raise TypeError()
+            raise y_exc.TaskRenderArgumentError()
 
+        input_task = None
         try:
             with open(self.path) as f:
-                try:
-                    input_task = f.read()
-                    rendered_task = TaskTemplate.render(input_task, **kw)
-                except Exception as e:
-                    LOG.exception('Failed to render template:\n%s\n', input_task)
-                    raise e
-                LOG.debug("Input task is:\n%s\n", rendered_task)
-
-                cfg = yaml_load(rendered_task)
-        except IOError as ioerror:
-            sys.exit(ioerror)
+                input_task = f.read()
+            rendered_task = task_template.TaskTemplate.render(input_task, **kw)
+            LOG.debug('Input task is:\n%s', rendered_task)
+            parsed_task = yaml_load(rendered_task)
+        except (IOError, OSError):
+            raise y_exc.TaskReadError(task_file=self.path)
+        except Exception:
+            raise y_exc.TaskRenderError(input_task=input_task)
+
+        return parsed_task, rendered_task
+
+    def parse_task(self, task_id, task_args=None, task_args_file=None):
+        """parses the task file and return an context and scenario instances"""
+        LOG.info("Parsing task config: %s", self.path)
 
+        cfg, rendered = self._render_task(task_args, task_args_file)
         self._check_schema(cfg["schema"], "task")
         meet_precondition = self._check_precondition(cfg)
 
@@ -541,7 +573,11 @@ class TaskParser(object):       # pragma: no cover
             self._change_node_names(scenario, contexts)
 
         # TODO we need something better here, a class that represent the file
-        return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
+        return {'scenarios': cfg['scenarios'],
+                'run_in_parallel': run_in_parallel,
+                'meet_precondition': meet_precondition,
+                'contexts': contexts,
+                'rendered': rendered}
 
     @staticmethod
     def _change_node_names(scenario, contexts):
@@ -582,7 +618,7 @@ class TaskParser(object):       # pragma: no cover
                 ctx = next((context for context in contexts
                        if context.assigned_name == context_name))
             except StopIteration:
-                raise exceptions.ScenarioConfigContextNameNotFound(
+                raise y_exc.ScenarioConfigContextNameNotFound(
                     context_name=context_name)
 
             return '{}.{}'.format(node_name, ctx.name)