Kubernetes context adoption when run in CI
[yardstick.git] / yardstick / benchmark / core / task.py
index 30ad98f..a49a2cb 100644 (file)
@@ -13,29 +13,33 @@ from __future__ import absolute_import
 from __future__ import print_function
 import sys
 import os
+from collections import OrderedDict
+
 import yaml
 import atexit
 import ipaddress
 import time
 import logging
 import uuid
-import errno
 import collections
 
 from six.moves import filter
+from jinja2 import Environment
 
 from yardstick.benchmark.contexts.base import Context
 from yardstick.benchmark.runners import base as base_runner
+from yardstick.common.yaml_loader import yaml_load
 from yardstick.dispatcher.base import Base as DispatcherBase
 from yardstick.common.task_template import TaskTemplate
-from yardstick.common.utils import source_env
 from yardstick.common import utils
 from yardstick.common import constants
+from yardstick.common.html_template import report_template
 
 output_file_default = "/tmp/yardstick.out"
 config_file = '/etc/yardstick/yardstick.conf'
 test_cases_dir_default = "tests/opnfv/test_cases/"
 LOG = logging.getLogger(__name__)
+JOIN_TIMEOUT = 60
 
 
 class Task(object):     # pragma: no cover
@@ -48,6 +52,12 @@ class Task(object):     # pragma: no cover
         self.contexts = []
         self.outputs = {}
 
+    def _set_dispatchers(self, output_config):
+        dispatchers = output_config.get('DEFAULT', {}).get('dispatcher',
+                                                           'file')
+        out_types = [s.strip() for s in dispatchers.split(',')]
+        output_config['DEFAULT']['dispatcher'] = out_types
+
     def start(self, args, **kwargs):
         """Start a benchmark scenario."""
 
@@ -56,14 +66,22 @@ class Task(object):     # pragma: no cover
         task_id = getattr(args, 'task_id')
         self.task_id = task_id if task_id else str(uuid.uuid4())
 
-        check_environment()
+        self._set_log()
+
+        try:
+            output_config = utils.parse_ini_file(config_file)
+        except Exception:
+            # all error will be ignore, the default value is {}
+            output_config = {}
 
-        output_config = utils.parse_ini_file(config_file)
         self._init_output_config(output_config)
         self._set_output_config(output_config, args.output_file)
         LOG.debug('Output configuration is: %s', output_config)
 
-        if output_config['DEFAULT'].get('dispatcher') == 'file':
+        self._set_dispatchers(output_config)
+
+        # update dispatcher list
+        if 'file' in output_config['DEFAULT']['dispatcher']:
             result = {'status': 0, 'result': {}}
             utils.write_json_to_file(args.output_file, result)
 
@@ -107,6 +125,7 @@ class Task(object):     # pragma: no cover
             except KeyboardInterrupt:
                 raise
             except Exception:
+                LOG.exception("Running test case %s failed!", case_name)
                 testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
             else:
                 testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
@@ -126,6 +145,7 @@ class Task(object):     # pragma: no cover
         result = self._get_format_result(testcases)
 
         self._do_output(output_config, result)
+        self._generate_reporting(result)
 
         total_end_time = time.time()
         LOG.info("total finished in %d secs",
@@ -138,6 +158,25 @@ class Task(object):     # pragma: no cover
         print("Done, exiting")
         return result
 
+    def _generate_reporting(self, result):
+        env = Environment()
+        with open(constants.REPORTING_FILE, 'w') as f:
+            f.write(env.from_string(report_template).render(result))
+
+        LOG.info('yardstick reporting generate in %s', constants.REPORTING_FILE)
+
+    def _set_log(self):
+        log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s'
+        log_formatter = logging.Formatter(log_format)
+
+        utils.makedirs(constants.TASK_LOG_DIR)
+        log_path = os.path.join(constants.TASK_LOG_DIR, '{}.log'.format(self.task_id))
+        log_handler = logging.FileHandler(log_path)
+        log_handler.setFormatter(log_formatter)
+        log_handler.setLevel(logging.DEBUG)
+
+        logging.root.addHandler(log_handler)
+
     def _init_output_config(self, output_config):
         output_config.setdefault('DEFAULT', {})
         output_config.setdefault('dispatcher_http', {})
@@ -193,9 +232,10 @@ class Task(object):     # pragma: no cover
             return 'PASS'
 
     def _do_output(self, output_config, result):
+        dispatchers = DispatcherBase.get(output_config)
 
-        dispatcher = DispatcherBase.get(output_config)
-        dispatcher.flush_result_data(result)
+        for dispatcher in dispatchers:
+            dispatcher.flush_result_data(result)
 
     def _run(self, scenarios, run_in_parallel, output_file):
         """Deploys context and calls runners"""
@@ -245,21 +285,16 @@ class Task(object):     # pragma: no cover
 
         # Wait for background runners to finish
         for runner in background_runners:
-            status = runner.join(timeout=60)
+            status = runner.join(JOIN_TIMEOUT)
             if status is None:
                 # Nuke if it did not stop nicely
                 base_runner.Runner.terminate(runner)
-                status = runner_join(runner)
-            else:
-                base_runner.Runner.release(runner)
+                runner.join(JOIN_TIMEOUT)
+            base_runner.Runner.release(runner)
 
             self.outputs.update(runner.get_output())
             result.extend(runner.get_result())
-
-            if status != 0:
-                raise RuntimeError
             print("Background task ended")
-
         return result
 
     def atexit_handler(self):
@@ -326,6 +361,8 @@ class Task(object):     # pragma: no cover
 
         if "nodes" in scenario_cfg:
             context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+            context_cfg["networks"] = get_networks_from_nodes(
+                context_cfg["nodes"])
         runner = base_runner.Runner.get(runner_cfg)
 
         print("Starting runner of type '%s'" % runner_cfg["type"])
@@ -398,7 +435,7 @@ class TaskParser(object):       # pragma: no cover
 
         try:
             with open(self.path) as stream:
-                cfg = yaml.load(stream)
+                cfg = yaml_load(stream)
         except IOError as ioerror:
             sys.exit(ioerror)
 
@@ -462,7 +499,7 @@ class TaskParser(object):       # pragma: no cover
                     raise e
                 print("Input task is:\n%s\n" % rendered_task)
 
-                cfg = yaml.load(rendered_task)
+                cfg = yaml_load(rendered_task)
         except IOError as ioerror:
             sys.exit(ioerror)
 
@@ -522,7 +559,7 @@ class TaskParser(object):       # pragma: no cover
                                                                cfg_schema))
 
     def _check_precondition(self, cfg):
-        """Check if the envrionment meet the preconditon"""
+        """Check if the environment meet the precondition"""
 
         if "precondition" in cfg:
             precondition = cfg["precondition"]
@@ -577,14 +614,28 @@ def _is_background_scenario(scenario):
 
 
 def parse_nodes_with_context(scenario_cfg):
-    """paras the 'nodes' fields in scenario """
-    nodes = scenario_cfg["nodes"]
-
-    nodes_cfg = {}
-    for nodename in nodes:
-        nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
-    return nodes_cfg
+    """parse the 'nodes' fields in scenario """
+    # ensure consistency in node instantiation order
+    return OrderedDict((nodename, Context.get_server(scenario_cfg["nodes"][nodename]))
+                       for nodename in sorted(scenario_cfg["nodes"]))
+
+
+def get_networks_from_nodes(nodes):
+    """parse the 'nodes' fields in scenario """
+    networks = {}
+    for node in nodes.values():
+        if not node:
+            continue
+        interfaces = node.get('interfaces', {})
+        for interface in interfaces.values():
+            vld_id = interface.get('vld_id')
+            # mgmt network doesn't have vld_id
+            if not vld_id:
+                continue
+            network = Context.get_network({"vld_id": vld_id})
+            if network:
+                networks[network['name']] = network
+    return networks
 
 
 def runner_join(runner):
@@ -604,7 +655,7 @@ def parse_task_args(src_name, args):
         return args
 
     try:
-        kw = args and yaml.safe_load(args)
+        kw = args and yaml_load(args)
         kw = {} if kw is None else kw
     except yaml.parser.ParserError as e:
         print_invalid_header(src_name, args)
@@ -620,17 +671,6 @@ def parse_task_args(src_name, args):
     return kw
 
 
-def check_environment():
-    auth_url = os.environ.get('OS_AUTH_URL', None)
-    if not auth_url:
-        try:
-            source_env(constants.OPENRC)
-        except IOError as e:
-            if e.errno != errno.EEXIST:
-                raise
-            LOG.debug('OPENRC file not found')
-
-
 def change_server_name(scenario, suffix):
     try:
         host = scenario['host']