1 ##############################################################################
2 # Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 from __future__ import absolute_import
10 from __future__ import print_function
15 from oslo_serialization import jsonutils
17 import yardstick.ssh as ssh
18 from yardstick.benchmark.scenarios import base
20 LOG = logging.getLogger(__name__)
23 class Unixbench(base.Scenario):
24 """Execute Unixbench cpu benchmark in a host
25 The Run script takes a number of options which you can use to customise a
26 test, and you can specify the names of the tests to run. The full usage
29 Run [ -q | -v ] [-i <n> ] [-c <n> [-c <n> ...]] [test ...]
31 -i <count> Run <count> iterations for each test -- slower tests
32 use <count> / 3, but at least 1. Defaults to 10 (3 for
34 -c <n> Run <n> copies of each test in parallel.
36 Parameters for setting unixbench
37 run_mode - Run in quiet mode or verbose mode
41 test_type - The available tests are organised into categories;
45 iterations - Run <count> iterations for each test -- slower tests
46 use <count> / 3, but at least 1. Defaults to 10 (3 for slow tests).
50 copies - Run <n> copies of each test in parallel.
55 more info https://github.com/kdlucas/byte-unixbench/blob/master/UnixBench
57 __scenario_type__ = "UnixBench"
59 TARGET_SCRIPT = "unixbench_benchmark.bash"
61 def __init__(self, scenario_cfg, context_cfg):
62 self.scenario_cfg = scenario_cfg
63 self.context_cfg = context_cfg
64 self.setup_done = False
68 self.target_script = pkg_resources.resource_filename(
69 "yardstick.benchmark.scenarios.compute",
70 Unixbench.TARGET_SCRIPT)
72 host = self.context_cfg["host"]
73 user = host.get("user", "ubuntu")
74 ssh_port = host.get("ssh_port", ssh.DEFAULT_PORT)
75 ip = host.get("ip", None)
76 key_filename = host.get('key_filename', "~/.ssh/id_rsa")
78 LOG.info("user:%s, host:%s", user, ip)
79 self.client = ssh.SSH(user, ip, key_filename=key_filename,
81 self.client.wait(timeout=600)
83 # copy scripts to host
84 self.client._put_file_shell(
85 self.target_script, '~/unixbench_benchmark.sh')
87 self.setup_done = True
89 def run(self, result):
90 """execute the benchmark"""
92 if not self.setup_done:
95 options = self.scenario_cfg["options"]
97 run_mode = options.get("run_mode", None)
98 LOG.debug("Executing run_mode: %s", run_mode)
100 if run_mode == "quiet":
102 elif run_mode == "verbose":
105 option_pair_list = [("iterations", "-i"),
107 for option_pair in option_pair_list:
108 if option_pair[0] in options:
109 cmd_args += " %s %s " % (option_pair[1],
110 options[option_pair[0]])
112 test_type = options.get("test_type", None)
113 if test_type is not None:
114 cmd_args += " %s " % (test_type)
116 cmd = "sudo bash unixbench_benchmark.sh %s" % (cmd_args)
117 LOG.debug("Executing command: %s", cmd)
118 status, stdout, stderr = self.client.execute(cmd)
120 raise RuntimeError(stderr)
122 result.update(jsonutils.loads(stdout))
124 if "sla" in self.scenario_cfg:
126 for t, score in result.items():
127 if t not in self.scenario_cfg['sla']:
129 sla_score = float(self.scenario_cfg['sla'][t])
131 if score < sla_score:
132 sla_error += "%s score %f < sla:%s_score(%f); " % \
133 (t, score, t, sla_score)
134 assert sla_error == "", sla_error
137 def _test(): # pragma: no cover
138 """internal test function"""
139 key_filename = pkg_resources.resource_filename('yardstick.resources',
140 'files/yardstick_key')
143 'ip': '10.229.47.137',
145 'key_filename': key_filename
150 'test_type': 'dhrystone',
151 'run_mode': 'verbose'
154 args = {'options': options}
157 p = Unixbench(args, ctx)
162 if __name__ == '__main__':