X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=yardstick%2Fbenchmark%2Fcore%2Ftask.py;h=dd35bd4f43ab6ad21a4193bc8e3179f5b8017be4;hb=43bf12d6ab7bcaea16dc75ed4ccbe3895cf51da3;hp=0e85e6316323d009de25b6537a91be73ddef6ee9;hpb=dae41a002f756d1da65a749bc82eef3ccf4252db;p=yardstick.git diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 0e85e6316..dd35bd4f4 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -13,6 +13,8 @@ from __future__ import absolute_import from __future__ import print_function import sys import os +from collections import OrderedDict + import yaml import atexit import ipaddress @@ -36,6 +38,7 @@ output_file_default = "/tmp/yardstick.out" config_file = '/etc/yardstick/yardstick.conf' test_cases_dir_default = "tests/opnfv/test_cases/" LOG = logging.getLogger(__name__) +JOIN_TIMEOUT = 60 class Task(object): # pragma: no cover @@ -48,6 +51,12 @@ class Task(object): # pragma: no cover self.contexts = [] self.outputs = {} + def _set_dispatchers(self, output_config): + dispatchers = output_config.get('DEFAULT', {}).get('dispatcher', + 'file') + out_types = [s.strip() for s in dispatchers.split(',')] + output_config['DEFAULT']['dispatcher'] = out_types + def start(self, args, **kwargs): """Start a benchmark scenario.""" @@ -56,14 +65,24 @@ class Task(object): # pragma: no cover task_id = getattr(args, 'task_id') self.task_id = task_id if task_id else str(uuid.uuid4()) + self._set_log() + check_environment() - output_config = utils.parse_ini_file(config_file) + try: + output_config = utils.parse_ini_file(config_file) + except Exception: + # all error will be ignore, the default value is {} + output_config = {} + self._init_output_config(output_config) self._set_output_config(output_config, args.output_file) LOG.debug('Output configuration is: %s', output_config) - if output_config['DEFAULT'].get('dispatcher') == 'file': + self._set_dispatchers(output_config) + + # update dispatcher list + if 'file' in output_config['DEFAULT']['dispatcher']: result = {'status': 0, 'result': {}} utils.write_json_to_file(args.output_file, result) @@ -107,6 +126,7 @@ class Task(object): # pragma: no cover except KeyboardInterrupt: raise except Exception: + LOG.exception("Running test case %s failed!", case_name) testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []} else: testcases[case_name] = {'criteria': 'PASS', 'tc_data': data} @@ -138,6 +158,17 @@ class Task(object): # pragma: no cover print("Done, exiting") return result + def _set_log(self): + log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s' + log_formatter = logging.Formatter(log_format) + + log_path = os.path.join(constants.TASK_LOG_DIR, '{}.log'.format(self.task_id)) + log_handler = logging.FileHandler(log_path) + log_handler.setFormatter(log_formatter) + log_handler.setLevel(logging.DEBUG) + + logging.root.addHandler(log_handler) + def _init_output_config(self, output_config): output_config.setdefault('DEFAULT', {}) output_config.setdefault('dispatcher_http', {}) @@ -193,9 +224,10 @@ class Task(object): # pragma: no cover return 'PASS' def _do_output(self, output_config, result): + dispatchers = DispatcherBase.get(output_config) - dispatcher = DispatcherBase.get(output_config) - dispatcher.flush_result_data(result) + for dispatcher in dispatchers: + dispatcher.flush_result_data(result) def _run(self, scenarios, run_in_parallel, output_file): """Deploys context and calls runners""" @@ -245,13 +277,12 @@ class Task(object): # pragma: no cover # Wait for background runners to finish for runner in background_runners: - status = runner.join(timeout=60) + status = runner.join(JOIN_TIMEOUT) if status is None: # Nuke if it did not stop nicely base_runner.Runner.terminate(runner) - status = runner_join(runner) - else: - base_runner.Runner.release(runner) + runner.join(JOIN_TIMEOUT) + base_runner.Runner.release(runner) self.outputs.update(runner.get_output()) result.extend(runner.get_result()) @@ -322,6 +353,8 @@ class Task(object): # pragma: no cover if "nodes" in scenario_cfg: context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg) + context_cfg["networks"] = get_networks_from_nodes( + context_cfg["nodes"]) runner = base_runner.Runner.get(runner_cfg) print("Starting runner of type '%s'" % runner_cfg["type"]) @@ -518,7 +551,7 @@ class TaskParser(object): # pragma: no cover cfg_schema)) def _check_precondition(self, cfg): - """Check if the envrionment meet the preconditon""" + """Check if the environment meet the precondition""" if "precondition" in cfg: precondition = cfg["precondition"] @@ -573,14 +606,28 @@ def _is_background_scenario(scenario): def parse_nodes_with_context(scenario_cfg): - """paras the 'nodes' fields in scenario """ - nodes = scenario_cfg["nodes"] - - nodes_cfg = {} - for nodename in nodes: - nodes_cfg[nodename] = Context.get_server(nodes[nodename]) - - return nodes_cfg + """parse the 'nodes' fields in scenario """ + # ensure consistency in node instantiation order + return OrderedDict((nodename, Context.get_server(scenario_cfg["nodes"][nodename])) + for nodename in sorted(scenario_cfg["nodes"])) + + +def get_networks_from_nodes(nodes): + """parse the 'nodes' fields in scenario """ + networks = {} + for node in nodes.values(): + if not node: + continue + interfaces = node.get('interfaces', {}) + for interface in interfaces.values(): + vld_id = interface.get('vld_id') + # mgmt network doesn't have vld_id + if not vld_id: + continue + network = Context.get_network({"vld_id": vld_id}) + if network: + networks[network['name']] = network + return networks def runner_join(runner):