3 ##############################################################################
4 # Copyright (c) 2015 Ericsson AB and others.
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
12 """ yardstick - command line tool for managing benchmarks
15 $ yardstick samples/ping-task.yaml
23 from yardstick.benchmark.context.model import Context
24 from yardstick.benchmark.runners import base as base_runner
25 from yardstick.cmdparser import CmdParser
26 from yardstick.orchestrator.heat import HeatStack
29 class TaskParser(object):
30 '''Parser for task config files in yaml format'''
31 def __init__(self, path):
35 '''parses the task file and return an context and scenario instances'''
36 print "Parsing task config:", self.path
38 with open(self.path) as stream:
39 cfg = yaml.load(stream)
40 except IOError as ioerror:
43 if cfg["schema"] != "yardstick:task:0.1":
44 sys.exit("error: file %s has unknown schema %s" % (self.path,
47 context.init(cfg["context"])
49 run_in_parallel = cfg.get("run_in_parallel", False)
51 # TODO we need something better here, a class that represent the file
52 return cfg["scenarios"], run_in_parallel, context
56 '''handler for process termination'''
57 base_runner.Runner.terminate_all()
59 if HeatStack.stacks_exist():
60 print "Deleting all stacks"
61 HeatStack.delete_all()
64 def run_one_scenario(scenario_cfg, context, output_file):
65 '''run one scenario using context'''
66 key_filename = pkg_resources.resource_filename(
67 'yardstick.resources', 'files/yardstick_key')
69 host = context.get_server(scenario_cfg["host"])
71 runner_cfg = scenario_cfg["runner"]
72 runner_cfg['host'] = host.floating_ip["ipaddr"]
73 runner_cfg['user'] = context.user
74 runner_cfg['key_filename'] = key_filename
75 runner_cfg['output_filename'] = output_file
77 target = context.get_server(scenario_cfg["target"])
78 if target.floating_ip:
79 runner_cfg['target'] = target.floating_ip["ipaddr"]
81 # TODO hardcoded name below, a server can be attached to several nets
82 scenario_cfg["ipaddr"] = target.ports["test"]["ipaddr"]
84 runner = base_runner.Runner.get(runner_cfg)
86 print "Starting runner of type '%s'" % runner_cfg["type"]
87 runner.run(scenario_cfg["type"], scenario_cfg)
92 def runner_join(runner):
93 '''join (wait for) a runner, exit process at runner failure'''
94 status = runner.join()
95 base_runner.Runner.release(runner)
97 sys.exit("Runner failed")
103 atexit.register(atexit_handler)
105 prog_args = CmdParser().parse_args()
107 parser = TaskParser(prog_args.taskfile[0])
108 scenarios, run_in_parallel, context = parser.parse()
110 if prog_args.parse_only:
117 for scenario in scenarios:
118 runner = run_one_scenario(scenario, context, prog_args.output_file)
119 runners.append(runner)
121 # Wait for runners to finish
122 for runner in runners:
124 print "Runner ended, output in", prog_args.output_file
127 for scenario in scenarios:
128 runner = run_one_scenario(scenario, context, prog_args.output_file)
130 print "Runner ended, output in", prog_args.output_file
132 if prog_args.keep_deploy:
133 # keep deployment, forget about stack (hide it for exit handler)
138 print "Done, exiting"
140 if __name__ == '__main__':