Add real time log view in GUI
[yardstick.git] / yardstick / benchmark / core / task.py
index 091aa99..dd35bd4 100644 (file)
@@ -13,6 +13,8 @@ from __future__ import absolute_import
 from __future__ import print_function
 import sys
 import os
+from collections import OrderedDict
+
 import yaml
 import atexit
 import ipaddress
@@ -20,6 +22,8 @@ import time
 import logging
 import uuid
 import errno
+import collections
+
 from six.moves import filter
 
 from yardstick.benchmark.contexts.base import Context
@@ -34,6 +38,7 @@ output_file_default = "/tmp/yardstick.out"
 config_file = '/etc/yardstick/yardstick.conf'
 test_cases_dir_default = "tests/opnfv/test_cases/"
 LOG = logging.getLogger(__name__)
+JOIN_TIMEOUT = 60
 
 
 class Task(object):     # pragma: no cover
@@ -46,21 +51,38 @@ class Task(object):     # pragma: no cover
         self.contexts = []
         self.outputs = {}
 
+    def _set_dispatchers(self, output_config):
+        dispatchers = output_config.get('DEFAULT', {}).get('dispatcher',
+                                                           'file')
+        out_types = [s.strip() for s in dispatchers.split(',')]
+        output_config['DEFAULT']['dispatcher'] = out_types
+
     def start(self, args, **kwargs):
         """Start a benchmark scenario."""
 
         atexit.register(self.atexit_handler)
 
-        self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
+        task_id = getattr(args, 'task_id')
+        self.task_id = task_id if task_id else str(uuid.uuid4())
+
+        self._set_log()
 
         check_environment()
 
-        output_config = utils.parse_ini_file(config_file)
+        try:
+            output_config = utils.parse_ini_file(config_file)
+        except Exception:
+            # all error will be ignore, the default value is {}
+            output_config = {}
+
         self._init_output_config(output_config)
         self._set_output_config(output_config, args.output_file)
         LOG.debug('Output configuration is: %s', output_config)
 
-        if output_config['DEFAULT'].get('dispatcher') == 'file':
+        self._set_dispatchers(output_config)
+
+        # update dispatcher list
+        if 'file' in output_config['DEFAULT']['dispatcher']:
             result = {'status': 0, 'result': {}}
             utils.write_json_to_file(args.output_file, result)
 
@@ -104,6 +126,7 @@ class Task(object):     # pragma: no cover
             except KeyboardInterrupt:
                 raise
             except Exception:
+                LOG.exception("Running test case %s failed!", case_name)
                 testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
             else:
                 testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
@@ -133,6 +156,18 @@ class Task(object):     # pragma: no cover
               scenario['task_id'], scenario['tc'])
 
         print("Done, exiting")
+        return result
+
+    def _set_log(self):
+        log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s'
+        log_formatter = logging.Formatter(log_format)
+
+        log_path = os.path.join(constants.TASK_LOG_DIR, '{}.log'.format(self.task_id))
+        log_handler = logging.FileHandler(log_path)
+        log_handler.setFormatter(log_formatter)
+        log_handler.setLevel(logging.DEBUG)
+
+        logging.root.addHandler(log_handler)
 
     def _init_output_config(self, output_config):
         output_config.setdefault('DEFAULT', {})
@@ -189,9 +224,10 @@ class Task(object):     # pragma: no cover
             return 'PASS'
 
     def _do_output(self, output_config, result):
+        dispatchers = DispatcherBase.get(output_config)
 
-        dispatcher = DispatcherBase.get(output_config)
-        dispatcher.flush_result_data(result)
+        for dispatcher in dispatchers:
+            dispatcher.flush_result_data(result)
 
     def _run(self, scenarios, run_in_parallel, output_file):
         """Deploys context and calls runners"""
@@ -241,19 +277,16 @@ class Task(object):     # pragma: no cover
 
         # Wait for background runners to finish
         for runner in background_runners:
-            status = runner.join(timeout=60)
+            status = runner.join(JOIN_TIMEOUT)
             if status is None:
                 # Nuke if it did not stop nicely
                 base_runner.Runner.terminate(runner)
-                status = runner_join(runner)
-                self.outputs.update(runner.get_output())
-                result.extend(runner.get_result())
-            else:
-                base_runner.Runner.release(runner)
-            if status != 0:
-                raise RuntimeError
-            print("Background task ended")
+                runner.join(JOIN_TIMEOUT)
+            base_runner.Runner.release(runner)
 
+            self.outputs.update(runner.get_output())
+            result.extend(runner.get_result())
+            print("Background task ended")
         return result
 
     def atexit_handler(self):
@@ -320,6 +353,8 @@ class Task(object):     # pragma: no cover
 
         if "nodes" in scenario_cfg:
             context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+            context_cfg["networks"] = get_networks_from_nodes(
+                context_cfg["nodes"])
         runner = base_runner.Runner.get(runner_cfg)
 
         print("Starting runner of type '%s'" % runner_cfg["type"])
@@ -369,17 +404,17 @@ class TaskParser(object):       # pragma: no cover
                 tc_fit_installer = constraint.get('installer', None)
                 LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
                          cur_pod, cur_installer, constraint)
-                if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+                if (cur_pod is None) or (tc_fit_pod and cur_pod not in tc_fit_pod):
                     return False
-                if cur_installer and tc_fit_installer and \
-                        cur_installer not in tc_fit_installer:
+                if (cur_installer is None) or (tc_fit_installer and cur_installer
+                                               not in tc_fit_installer):
                     return False
         return True
 
     def _get_task_para(self, task, cur_pod):
         task_args = task.get('task_args', None)
         if task_args is not None:
-            task_args = task_args.get(cur_pod, None)
+            task_args = task_args.get(cur_pod, task_args.get('default'))
         task_args_fnames = task.get('task_args_fnames', None)
         if task_args_fnames is not None:
             task_args_fnames = task_args_fnames.get(cur_pod, None)
@@ -493,6 +528,9 @@ class TaskParser(object):       # pragma: no cover
             task_name = os.path.splitext(os.path.basename(self.path))[0]
             scenario["tc"] = task_name
             scenario["task_id"] = task_id
+            # embed task path into scenario so we can load other files
+            # relative to task path
+            scenario["task_path"] = os.path.dirname(self.path)
 
             change_server_name(scenario, name_suffix)
 
@@ -513,7 +551,7 @@ class TaskParser(object):       # pragma: no cover
                                                                cfg_schema))
 
     def _check_precondition(self, cfg):
-        """Check if the envrionment meet the preconditon"""
+        """Check if the environment meet the precondition"""
 
         if "precondition" in cfg:
             precondition = cfg["precondition"]
@@ -568,14 +606,28 @@ def _is_background_scenario(scenario):
 
 
 def parse_nodes_with_context(scenario_cfg):
-    """paras the 'nodes' fields in scenario """
-    nodes = scenario_cfg["nodes"]
-
-    nodes_cfg = {}
-    for nodename in nodes:
-        nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
-    return nodes_cfg
+    """parse the 'nodes' fields in scenario """
+    # ensure consistency in node instantiation order
+    return OrderedDict((nodename, Context.get_server(scenario_cfg["nodes"][nodename]))
+                       for nodename in sorted(scenario_cfg["nodes"]))
+
+
+def get_networks_from_nodes(nodes):
+    """parse the 'nodes' fields in scenario """
+    networks = {}
+    for node in nodes.values():
+        if not node:
+            continue
+        interfaces = node.get('interfaces', {})
+        for interface in interfaces.values():
+            vld_id = interface.get('vld_id')
+            # mgmt network doesn't have vld_id
+            if not vld_id:
+                continue
+            network = Context.get_network({"vld_id": vld_id})
+            if network:
+                networks[network['name']] = network
+    return networks
 
 
 def runner_join(runner):
@@ -591,6 +643,9 @@ def print_invalid_header(source_name, args):
 
 
 def parse_task_args(src_name, args):
+    if isinstance(args, collections.Mapping):
+        return args
+
     try:
         kw = args and yaml.safe_load(args)
         kw = {} if kw is None else kw