Merge "support more parameters in iperf3 testcase"
[yardstick.git] / yardstick / benchmark / core / task.py
index 522ad4d..ede14b1 100644 (file)
@@ -20,15 +20,20 @@ import time
 import logging
 import uuid
 import errno
+import collections
+
 from six.moves import filter
 
 from yardstick.benchmark.contexts.base import Context
 from yardstick.benchmark.runners import base as base_runner
+from yardstick.dispatcher.base import Base as DispatcherBase
 from yardstick.common.task_template import TaskTemplate
 from yardstick.common.utils import source_env
+from yardstick.common import utils
 from yardstick.common import constants
 
 output_file_default = "/tmp/yardstick.out"
+config_file = '/etc/yardstick/yardstick.conf'
 test_cases_dir_default = "tests/opnfv/test_cases/"
 LOG = logging.getLogger(__name__)
 
@@ -39,15 +44,34 @@ class Task(object):     # pragma: no cover
        Set of commands to manage benchmark tasks.
     """
 
+    def __init__(self):
+        self.contexts = []
+        self.outputs = {}
+
     def start(self, args, **kwargs):
         """Start a benchmark scenario."""
 
-        atexit.register(atexit_handler)
+        atexit.register(self.atexit_handler)
 
-        self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
+        task_id = getattr(args, 'task_id')
+        self.task_id = task_id if task_id else str(uuid.uuid4())
 
         check_environment()
 
+        try:
+            output_config = utils.parse_ini_file(config_file)
+        except Exception:
+            # all error will be ignore, the default value is {}
+            output_config = {}
+
+        self._init_output_config(output_config)
+        self._set_output_config(output_config, args.output_file)
+        LOG.debug('Output configuration is: %s', output_config)
+
+        if output_config['DEFAULT'].get('dispatcher') == 'file':
+            result = {'status': 0, 'result': {}}
+            utils.write_json_to_file(args.output_file, result)
+
         total_start_time = time.time()
         parser = TaskParser(args.inputfile[0])
 
@@ -66,68 +90,158 @@ class Task(object):     # pragma: no cover
         if args.parse_only:
             sys.exit(0)
 
+        testcases = {}
         # parse task_files
         for i in range(0, len(task_files)):
             one_task_start_time = time.time()
             parser.path = task_files[i]
-            scenarios, run_in_parallel, meet_precondition = parser.parse_task(
-                self.task_id, task_args[i], task_args_fnames[i])
+            scenarios, run_in_parallel, meet_precondition, contexts = \
+                parser.parse_task(self.task_id, task_args[i],
+                                  task_args_fnames[i])
+
+            self.contexts.extend(contexts)
 
             if not meet_precondition:
                 LOG.info("meet_precondition is %s, please check envrionment",
                          meet_precondition)
                 continue
 
-            self._run(scenarios, run_in_parallel, args.output_file)
+            case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
+            try:
+                data = self._run(scenarios, run_in_parallel, args.output_file)
+            except KeyboardInterrupt:
+                raise
+            except Exception:
+                testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+            else:
+                testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
 
             if args.keep_deploy:
                 # keep deployment, forget about stack
                 # (hide it for exit handler)
-                Context.list = []
+                self.contexts = []
             else:
-                for context in Context.list[::-1]:
+                for context in self.contexts[::-1]:
                     context.undeploy()
-                Context.list = []
+                self.contexts = []
             one_task_end_time = time.time()
             LOG.info("task %s finished in %d secs", task_files[i],
                      one_task_end_time - one_task_start_time)
 
+        result = self._get_format_result(testcases)
+
+        self._do_output(output_config, result)
+
         total_end_time = time.time()
         LOG.info("total finished in %d secs",
                  total_end_time - total_start_time)
 
+        scenario = scenarios[0]
+        print("To generate report execute => yardstick report generate ",
+              scenario['task_id'], scenario['tc'])
+
         print("Done, exiting")
+        return result
+
+    def _init_output_config(self, output_config):
+        output_config.setdefault('DEFAULT', {})
+        output_config.setdefault('dispatcher_http', {})
+        output_config.setdefault('dispatcher_file', {})
+        output_config.setdefault('dispatcher_influxdb', {})
+        output_config.setdefault('nsb', {})
+
+    def _set_output_config(self, output_config, file_path):
+        try:
+            out_type = os.environ['DISPATCHER']
+        except KeyError:
+            output_config['DEFAULT'].setdefault('dispatcher', 'file')
+        else:
+            output_config['DEFAULT']['dispatcher'] = out_type
+
+        output_config['dispatcher_file']['file_path'] = file_path
+
+        try:
+            target = os.environ['TARGET']
+        except KeyError:
+            pass
+        else:
+            k = 'dispatcher_{}'.format(output_config['DEFAULT']['dispatcher'])
+            output_config[k]['target'] = target
+
+    def _get_format_result(self, testcases):
+        criteria = self._get_task_criteria(testcases)
+
+        info = {
+            'deploy_scenario': os.environ.get('DEPLOY_SCENARIO', 'unknown'),
+            'installer': os.environ.get('INSTALLER_TYPE', 'unknown'),
+            'pod_name': os.environ.get('NODE_NAME', 'unknown'),
+            'version': os.environ.get('YARDSTICK_BRANCH', 'unknown')
+        }
+
+        result = {
+            'status': 1,
+            'result': {
+                'criteria': criteria,
+                'task_id': self.task_id,
+                'info': info,
+                'testcases': testcases
+            }
+        }
+
+        return result
+
+    def _get_task_criteria(self, testcases):
+        criteria = any(t.get('criteria') != 'PASS' for t in testcases.values())
+        if criteria:
+            return 'FAIL'
+        else:
+            return 'PASS'
+
+    def _do_output(self, output_config, result):
+
+        dispatcher = DispatcherBase.get(output_config)
+        dispatcher.flush_result_data(result)
 
     def _run(self, scenarios, run_in_parallel, output_file):
         """Deploys context and calls runners"""
-        for context in Context.list:
+        for context in self.contexts:
             context.deploy()
 
         background_runners = []
 
+        result = []
         # Start all background scenarios
         for scenario in filter(_is_background_scenario, scenarios):
             scenario["runner"] = dict(type="Duration", duration=1000000000)
-            runner = run_one_scenario(scenario, output_file)
+            runner = self.run_one_scenario(scenario, output_file)
             background_runners.append(runner)
 
         runners = []
         if run_in_parallel:
             for scenario in scenarios:
                 if not _is_background_scenario(scenario):
-                    runner = run_one_scenario(scenario, output_file)
+                    runner = self.run_one_scenario(scenario, output_file)
                     runners.append(runner)
 
             # Wait for runners to finish
             for runner in runners:
-                runner_join(runner)
+                status = runner_join(runner)
+                if status != 0:
+                    raise RuntimeError
+                self.outputs.update(runner.get_output())
+                result.extend(runner.get_result())
                 print("Runner ended, output in", output_file)
         else:
             # run serially
             for scenario in scenarios:
                 if not _is_background_scenario(scenario):
-                    runner = run_one_scenario(scenario, output_file)
-                    runner_join(runner)
+                    runner = self.run_one_scenario(scenario, output_file)
+                    status = runner_join(runner)
+                    if status != 0:
+                        LOG.error('Scenario: %s ERROR', scenario.get('type'))
+                        raise RuntimeError
+                    self.outputs.update(runner.get_output())
+                    result.extend(runner.get_result())
                     print("Runner ended, output in", output_file)
 
         # Abort background runners
@@ -136,16 +250,118 @@ class Task(object):     # pragma: no cover
 
         # Wait for background runners to finish
         for runner in background_runners:
-            if runner.join(timeout=60) is None:
+            status = runner.join(timeout=60)
+            if status is None:
                 # Nuke if it did not stop nicely
                 base_runner.Runner.terminate(runner)
-                runner_join(runner)
+                status = runner_join(runner)
             else:
                 base_runner.Runner.release(runner)
+
+            self.outputs.update(runner.get_output())
+            result.extend(runner.get_result())
             print("Background task ended")
+        return result
+
+    def atexit_handler(self):
+        """handler for process termination"""
+        base_runner.Runner.terminate_all()
+
+        if self.contexts:
+            print("Undeploying all contexts")
+            for context in self.contexts[::-1]:
+                context.undeploy()
+
+    def _parse_options(self, op):
+        if isinstance(op, dict):
+            return {k: self._parse_options(v) for k, v in op.items()}
+        elif isinstance(op, list):
+            return [self._parse_options(v) for v in op]
+        elif isinstance(op, str):
+            return self.outputs.get(op[1:]) if op.startswith('$') else op
+        else:
+            return op
+
+    def run_one_scenario(self, scenario_cfg, output_file):
+        """run one scenario using context"""
+        runner_cfg = scenario_cfg["runner"]
+        runner_cfg['output_filename'] = output_file
+
+        options = scenario_cfg.get('options', {})
+        scenario_cfg['options'] = self._parse_options(options)
+
+        # TODO support get multi hosts/vms info
+        context_cfg = {}
+        if "host" in scenario_cfg:
+            context_cfg['host'] = Context.get_server(scenario_cfg["host"])
+
+        if "target" in scenario_cfg:
+            if is_ip_addr(scenario_cfg["target"]):
+                context_cfg['target'] = {}
+                context_cfg['target']["ipaddr"] = scenario_cfg["target"]
+            else:
+                context_cfg['target'] = Context.get_server(
+                    scenario_cfg["target"])
+                if self._is_same_heat_context(scenario_cfg["host"],
+                                              scenario_cfg["target"]):
+                    context_cfg["target"]["ipaddr"] = \
+                        context_cfg["target"]["private_ip"]
+                else:
+                    context_cfg["target"]["ipaddr"] = \
+                        context_cfg["target"]["ip"]
+
+        if "targets" in scenario_cfg:
+            ip_list = []
+            for target in scenario_cfg["targets"]:
+                if is_ip_addr(target):
+                    ip_list.append(target)
+                    context_cfg['target'] = {}
+                else:
+                    context_cfg['target'] = Context.get_server(target)
+                    if self._is_same_heat_context(scenario_cfg["host"],
+                                                  target):
+                        ip_list.append(context_cfg["target"]["private_ip"])
+                    else:
+                        ip_list.append(context_cfg["target"]["ip"])
+            context_cfg['target']['ipaddr'] = ','.join(ip_list)
+
+        if "nodes" in scenario_cfg:
+            context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+            context_cfg["networks"] = get_networks_from_nodes(
+                context_cfg["nodes"])
+        runner = base_runner.Runner.get(runner_cfg)
+
+        print("Starting runner of type '%s'" % runner_cfg["type"])
+        runner.run(scenario_cfg, context_cfg)
+
+        return runner
+
+    def _is_same_heat_context(self, host_attr, target_attr):
+        """check if two servers are in the same heat context
+        host_attr: either a name for a server created by yardstick or a dict
+        with attribute name mapping when using external heat templates
+        target_attr: either a name for a server created by yardstick or a dict
+        with attribute name mapping when using external heat templates
+        """
+        host = None
+        target = None
+        for context in self.contexts:
+            if context.__context_type__ != "Heat":
+                continue
+
+            host = context._get_server(host_attr)
+            if host is None:
+                continue
 
+            target = context._get_server(target_attr)
+            if target is None:
+                return False
 
-# TODO: Move stuff below into TaskCommands class !?
+            # Both host and target is not None, then they are in the
+            # same heat context.
+            return True
+
+        return False
 
 
 class TaskParser(object):       # pragma: no cover
@@ -162,17 +378,17 @@ class TaskParser(object):       # pragma: no cover
                 tc_fit_installer = constraint.get('installer', None)
                 LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
                          cur_pod, cur_installer, constraint)
-                if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+                if (cur_pod is None) or (tc_fit_pod and cur_pod not in tc_fit_pod):
                     return False
-                if cur_installer and tc_fit_installer and \
-                        cur_installer not in tc_fit_installer:
+                if (cur_installer is None) or (tc_fit_installer and cur_installer
+                                               not in tc_fit_installer):
                     return False
         return True
 
     def _get_task_para(self, task, cur_pod):
         task_args = task.get('task_args', None)
         if task_args is not None:
-            task_args = task_args.get(cur_pod, None)
+            task_args = task_args.get(cur_pod, task_args.get('default'))
         task_args_fnames = task.get('task_args_fnames', None)
         if task_args_fnames is not None:
             task_args_fnames = task_args_fnames.get(cur_pod, None)
@@ -265,23 +481,19 @@ class TaskParser(object):       # pragma: no cover
         else:
             context_cfgs = [{"type": "Dummy"}]
 
+        contexts = []
         name_suffix = '-{}'.format(task_id[:8])
         for cfg_attrs in context_cfgs:
-            cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'], name_suffix)
+            try:
+                cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'],
+                                                  name_suffix)
+            except KeyError:
+                pass
+            # default to Heat context because we are testing OpenStack
             context_type = cfg_attrs.get("type", "Heat")
-            if "Heat" == context_type and "networks" in cfg_attrs:
-                # bugfix: if there are more than one network,
-                # only add "external_network" on first one.
-                # the name of netwrok should follow this rule:
-                # test, test2, test3 ...
-                # sort network with the length of network's name
-                sorted_networks = sorted(cfg_attrs["networks"])
-                # config external_network based on env var
-                cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
-                    = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
-
             context = Context.get(context_type)
             context.init(cfg_attrs)
+            contexts.append(context)
 
         run_in_parallel = cfg.get("run_in_parallel", False)
 
@@ -290,6 +502,9 @@ class TaskParser(object):       # pragma: no cover
             task_name = os.path.splitext(os.path.basename(self.path))[0]
             scenario["tc"] = task_name
             scenario["task_id"] = task_id
+            # embed task path into scenario so we can load other files
+            # relative to task path
+            scenario["task_path"] = os.path.dirname(self.path)
 
             change_server_name(scenario, name_suffix)
 
@@ -300,7 +515,7 @@ class TaskParser(object):       # pragma: no cover
                 pass
 
         # TODO we need something better here, a class that represent the file
-        return cfg["scenarios"], run_in_parallel, meet_precondition
+        return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
 
     def _check_schema(self, cfg_schema, schema_type):
         """Check if config file is using the correct schema type"""
@@ -310,7 +525,7 @@ class TaskParser(object):       # pragma: no cover
                                                                cfg_schema))
 
     def _check_precondition(self, cfg):
-        """Check if the envrionment meet the preconditon"""
+        """Check if the environment meet the precondition"""
 
         if "precondition" in cfg:
             precondition = cfg["precondition"]
@@ -342,16 +557,6 @@ class TaskParser(object):       # pragma: no cover
         return True
 
 
-def atexit_handler():
-    """handler for process termination"""
-    base_runner.Runner.terminate_all()
-
-    if len(Context.list) > 0:
-        print("Undeploying all contexts")
-        for context in Context.list[::-1]:
-            context.undeploy()
-
-
 def is_ip_addr(addr):
     """check if string addr is an IP address"""
     try:
@@ -367,34 +572,6 @@ def is_ip_addr(addr):
         return True
 
 
-def _is_same_heat_context(host_attr, target_attr):
-    """check if two servers are in the same heat context
-    host_attr: either a name for a server created by yardstick or a dict
-    with attribute name mapping when using external heat templates
-    target_attr: either a name for a server created by yardstick or a dict
-    with attribute name mapping when using external heat templates
-    """
-    host = None
-    target = None
-    for context in Context.list:
-        if context.__context_type__ != "Heat":
-            continue
-
-        host = context._get_server(host_attr)
-        if host is None:
-            continue
-
-        target = context._get_server(target_attr)
-        if target is None:
-            return False
-
-        # Both host and target is not None, then they are in the
-        # same heat context.
-        return True
-
-    return False
-
-
 def _is_background_scenario(scenario):
     if "run_in_background" in scenario:
         return scenario["run_in_background"]
@@ -402,71 +579,34 @@ def _is_background_scenario(scenario):
         return False
 
 
-def run_one_scenario(scenario_cfg, output_file):
-    """run one scenario using context"""
-    runner_cfg = scenario_cfg["runner"]
-    runner_cfg['output_filename'] = output_file
-
-    # TODO support get multi hosts/vms info
-    context_cfg = {}
-    if "host" in scenario_cfg:
-        context_cfg['host'] = Context.get_server(scenario_cfg["host"])
-
-    if "target" in scenario_cfg:
-        if is_ip_addr(scenario_cfg["target"]):
-            context_cfg['target'] = {}
-            context_cfg['target']["ipaddr"] = scenario_cfg["target"]
-        else:
-            context_cfg['target'] = Context.get_server(scenario_cfg["target"])
-            if _is_same_heat_context(scenario_cfg["host"],
-                                     scenario_cfg["target"]):
-                context_cfg["target"]["ipaddr"] = \
-                    context_cfg["target"]["private_ip"]
-            else:
-                context_cfg["target"]["ipaddr"] = \
-                    context_cfg["target"]["ip"]
-
-    if "targets" in scenario_cfg:
-        ip_list = []
-        for target in scenario_cfg["targets"]:
-            if is_ip_addr(target):
-                ip_list.append(target)
-                context_cfg['target'] = {}
-            else:
-                context_cfg['target'] = Context.get_server(target)
-                if _is_same_heat_context(scenario_cfg["host"], target):
-                    ip_list.append(context_cfg["target"]["private_ip"])
-                else:
-                    ip_list.append(context_cfg["target"]["ip"])
-        context_cfg['target']['ipaddr'] = ','.join(ip_list)
-
-    if "nodes" in scenario_cfg:
-        context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
-    runner = base_runner.Runner.get(runner_cfg)
-
-    print("Starting runner of type '%s'" % runner_cfg["type"])
-    runner.run(scenario_cfg, context_cfg)
-
-    return runner
-
-
 def parse_nodes_with_context(scenario_cfg):
-    """paras the 'nodes' fields in scenario """
+    """parse the 'nodes' fields in scenario """
     nodes = scenario_cfg["nodes"]
+    return {nodename: Context.get_server(node) for nodename, node in nodes.items()}
 
-    nodes_cfg = {}
-    for nodename in nodes:
-        nodes_cfg[nodename] = Context.get_server(nodes[nodename])
 
-    return nodes_cfg
+def get_networks_from_nodes(nodes):
+    """parse the 'nodes' fields in scenario """
+    networks = {}
+    for node in nodes.values():
+        if not node:
+            continue
+        for interface in node['interfaces'].values():
+            vld_id = interface.get('vld_id')
+            # mgmt network doesn't have vld_id
+            if not vld_id:
+                continue
+            network = Context.get_network({"vld_id": vld_id})
+            if network:
+                networks[network['name']] = network
+    return networks
 
 
 def runner_join(runner):
     """join (wait for) a runner, exit process at runner failure"""
     status = runner.join()
     base_runner.Runner.release(runner)
-    if status != 0:
-        sys.exit("Runner failed")
+    return status
 
 
 def print_invalid_header(source_name, args):
@@ -475,6 +615,9 @@ def print_invalid_header(source_name, args):
 
 
 def parse_task_args(src_name, args):
+    if isinstance(args, collections.Mapping):
+        return args
+
     try:
         kw = args and yaml.safe_load(args)
         kw = {} if kw is None else kw
@@ -496,7 +639,7 @@ def check_environment():
     auth_url = os.environ.get('OS_AUTH_URL', None)
     if not auth_url:
         try:
-            source_env(constants.OPENSTACK_RC_FILE)
+            source_env(constants.OPENRC)
         except IOError as e:
             if e.errno != errno.EEXIST:
                 raise