1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 """ Handler for yardstick command 'task' """
19 from yardstick.benchmark.contexts.base import Context
20 from yardstick.benchmark.runners import base as base_runner
21 from yardstick.common.task_template import TaskTemplate
22 from yardstick.common.utils import cliargs
24 output_file_default = "/tmp/yardstick.out"
25 test_cases_dir_default = "tests/opnfv/test_cases/"
28 class TaskCommands(object):
31 Set of commands to manage benchmark tasks.
34 @cliargs("inputfile", type=str, help="path to task or suite file", nargs=1)
35 @cliargs("--task-args", dest="task_args",
36 help="Input task args (dict in json). These args are used"
37 "to render input task that is jinja2 template.")
38 @cliargs("--task-args-file", dest="task_args_file",
39 help="Path to the file with input task args (dict in "
40 "json/yaml). These args are used to render input"
41 "task that is jinja2 template.")
42 @cliargs("--keep-deploy", help="keep context deployed in cloud",
44 @cliargs("--parse-only", help="parse the config file and exit",
46 @cliargs("--output-file", help="file where output is stored, default %s" %
47 output_file_default, default=output_file_default)
48 @cliargs("--suite", help="process test suite file instead of a task file",
50 def do_start(self, args):
51 '''Start a benchmark scenario.'''
53 atexit.register(atexit_handler)
55 parser = TaskParser(args.inputfile[0])
59 suite_params = parser.parse_suite()
60 test_cases_dir = suite_params["test_cases_dir"]
61 if test_cases_dir[-1] != os.sep:
62 test_cases_dir += os.sep
63 task_files = [test_cases_dir + task
64 for task in suite_params["task_fnames"]]
66 task_files = [parser.path]
68 task_args = suite_params.get("task_args", [args.task_args])
69 task_args_fnames = suite_params.get("task_args_fnames",
70 [args.task_args_file])
75 if os.path.isfile(args.output_file):
76 os.remove(args.output_file)
78 for i in range(0, len(task_files)):
79 parser.path = task_files[i]
80 scenarios, run_in_parallel = parser.parse_task(task_args[i],
83 self._run(scenarios, run_in_parallel, args.output_file)
86 # keep deployment, forget about stack
87 # (hide it for exit handler)
90 for context in Context.list:
96 def _run(self, scenarios, run_in_parallel, output_file):
97 '''Deploys context and calls runners'''
98 for context in Context.list:
103 for scenario in scenarios:
104 runner = run_one_scenario(scenario, output_file)
105 runners.append(runner)
107 # Wait for runners to finish
108 for runner in runners:
110 print "Runner ended, output in", output_file
113 for scenario in scenarios:
114 runner = run_one_scenario(scenario, output_file)
116 print "Runner ended, output in", output_file
118 # TODO: Move stuff below into TaskCommands class !?
121 class TaskParser(object):
122 '''Parser for task config files in yaml format'''
123 def __init__(self, path):
126 def parse_suite(self):
127 '''parse the suite file and return a list of task config file paths
128 and lists of optional parameters if present'''
129 print "Parsing suite file:", self.path
132 with open(self.path) as stream:
133 cfg = yaml.load(stream)
134 except IOError as ioerror:
137 self._check_schema(cfg["schema"], "suite")
138 print "Starting suite:", cfg["name"]
140 test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
143 task_args_fnames = []
145 for task in cfg["test_cases"]:
146 task_fnames.append(task["file_name"])
147 if "task_args" in task:
148 task_args.append(task["task_args"])
150 task_args.append(None)
152 if "task_args_file" in task:
153 task_args_fnames.append(task["task_args_file"])
155 task_args_fnames.append(None)
158 "test_cases_dir": test_cases_dir,
159 "task_fnames": task_fnames,
160 "task_args": task_args,
161 "task_args_fnames": task_args_fnames
166 def parse_task(self, task_args=None, task_args_file=None):
167 '''parses the task file and return an context and scenario instances'''
168 print "Parsing task config:", self.path
173 with open(task_args_file) as f:
174 kw.update(parse_task_args("task_args_file", f.read()))
175 kw.update(parse_task_args("task_args", task_args))
180 with open(self.path) as f:
182 input_task = f.read()
183 rendered_task = TaskTemplate.render(input_task, **kw)
184 except Exception as e:
185 print(("Failed to render template:\n%(task)s\n%(err)s\n")
186 % {"task": input_task, "err": e})
188 print(("Input task is:\n%s\n") % rendered_task)
190 cfg = yaml.load(rendered_task)
191 except IOError as ioerror:
194 self._check_schema(cfg["schema"], "task")
196 # TODO: support one or many contexts? Many would simpler and precise
197 # TODO: support hybrid context type
199 context_cfgs = [cfg["context"]]
201 context_cfgs = cfg["contexts"]
203 for cfg_attrs in context_cfgs:
204 context_type = cfg_attrs.get("type", "Heat")
205 if "Heat" == context_type and "networks" in cfg_attrs:
206 # config external_network based on env var
207 for _, attrs in cfg_attrs["networks"].items():
208 attrs["external_network"] = os.environ.get(
209 'EXTERNAL_NETWORK', 'net04_ext')
210 context = Context.get(context_type)
211 context.init(cfg_attrs)
213 run_in_parallel = cfg.get("run_in_parallel", False)
215 # TODO we need something better here, a class that represent the file
216 return cfg["scenarios"], run_in_parallel
218 def _check_schema(self, cfg_schema, schema_type):
219 '''Check if config file is using the correct schema type'''
221 if cfg_schema != "yardstick:" + schema_type + ":0.1":
222 sys.exit("error: file %s has unknown schema %s" % (self.path,
226 def atexit_handler():
227 '''handler for process termination'''
228 base_runner.Runner.terminate_all()
230 if len(Context.list) > 0:
231 print "Undeploying all contexts"
232 for context in Context.list:
236 def is_ip_addr(addr):
237 '''check if string addr is an IP address'''
239 ipaddress.ip_address(unicode(addr))
245 def run_one_scenario(scenario_cfg, output_file):
246 '''run one scenario using context'''
247 key_filename = pkg_resources.resource_filename(
248 'yardstick.resources', 'files/yardstick_key')
250 # TODO support get multi hosts/vms info
251 host = Context.get_server(scenario_cfg["host"])
253 runner_cfg = scenario_cfg["runner"]
254 runner_cfg['host'] = host.public_ip
255 runner_cfg['user'] = host.context.user
256 runner_cfg['key_filename'] = key_filename
257 runner_cfg['output_filename'] = output_file
259 if "target" in scenario_cfg:
260 if is_ip_addr(scenario_cfg["target"]):
261 scenario_cfg["ipaddr"] = scenario_cfg["target"]
263 target = Context.get_server(scenario_cfg["target"])
265 # get public IP for target server, some scenarios require it
267 runner_cfg['target'] = target.public_ip
269 # TODO scenario_cfg["ipaddr"] is bad naming
270 if host.context != target.context:
271 # target is in another context, get its public IP
272 scenario_cfg["ipaddr"] = target.public_ip
274 # target is in the same context, get its private IP
275 scenario_cfg["ipaddr"] = target.private_ip
277 runner = base_runner.Runner.get(runner_cfg)
279 print "Starting runner of type '%s'" % runner_cfg["type"]
280 runner.run(scenario_cfg["type"], scenario_cfg)
285 def runner_join(runner):
286 '''join (wait for) a runner, exit process at runner failure'''
287 status = runner.join()
288 base_runner.Runner.release(runner)
290 sys.exit("Runner failed")
293 def print_invalid_header(source_name, args):
294 print(("Invalid %(source)s passed:\n\n %(args)s\n")
295 % {"source": source_name, "args": args})
298 def parse_task_args(src_name, args):
300 kw = args and yaml.safe_load(args)
301 kw = {} if kw is None else kw
302 except yaml.parser.ParserError as e:
303 print_invalid_header(src_name, args)
304 print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
305 % {"source": src_name, "err": e})
308 if not isinstance(kw, dict):
309 print_invalid_header(src_name, args)
310 print(("%(src)s had to be dict, actually %(src_type)s\n")
311 % {"src": src_name, "src_type": type(kw)})