a874ea94c6c13dfe0580393ee8d463863b9ead3c
[yardstick.git] / yardstick / benchmark / scenarios / compute / perf.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 import pkg_resources
10 import logging
11 import json
12
13 import yardstick.ssh as ssh
14 from yardstick.benchmark.scenarios import base
15
16 LOG = logging.getLogger(__name__)
17
18
19 class Perf(base.Scenario):
20     """Execute perf benchmark in a host
21
22   Parameters
23     events - perf tool software, hardware or tracepoint events
24         type:       [str]
25         unit:       na
26         default:    ['task-clock']
27     load - simulate load on the host by doing IO operations
28         type:       bool
29         unit:       na
30         default:    false
31
32     For more info about perf and perf events see https://perf.wiki.kernel.org
33     """
34
35     __scenario_type__ = "Perf"
36
37     TARGET_SCRIPT = 'perf_benchmark.bash'
38
39     def __init__(self, context):
40         self.context = context
41         self.setup_done = False
42
43     def setup(self):
44         """scenario setup"""
45         self.target_script = pkg_resources.resource_filename(
46             'yardstick.benchmark.scenarios.compute', Perf.TARGET_SCRIPT)
47         user = self.context.get('user', 'ubuntu')
48         host = self.context.get('host', None)
49         key_filename = self.context.get('key_filename', '~/.ssh/id_rsa')
50
51         LOG.info("user:%s, host:%s", user, host)
52         self.client = ssh.SSH(user, host, key_filename=key_filename)
53         self.client.wait(timeout=600)
54
55         # copy script to host
56         self.client.run("cat > ~/perf_benchmark.sh",
57                         stdin=open(self.target_script, "rb"))
58
59         self.setup_done = True
60
61     def run(self, args):
62         """execute the benchmark"""
63
64         if not self.setup_done:
65             self.setup()
66
67         options = args['options']
68         events = options.get('events', ['task-clock'])
69
70         events_string = ""
71         for event in events:
72             events_string += event + " "
73
74         # if run by a duration runner
75         duration_time = self.context.get("duration", None)
76         # if run by an arithmetic runner
77         arithmetic_time = options.get("duration", None)
78         if duration_time:
79             duration = duration_time
80         elif arithmetic_time:
81             duration = arithmetic_time
82         else:
83             duration = 30
84
85         if 'load' in options:
86             load = "dd if=/dev/urandom of=/dev/null"
87         else:
88             load = "sleep %d" % duration
89
90         cmd = "sudo bash perf_benchmark.sh '%s' %d %s" \
91             % (load, duration, events_string)
92
93         LOG.debug("Executing command: %s", cmd)
94         status, stdout, stderr = self.client.execute(cmd)
95
96         if status:
97             raise RuntimeError(stdout)
98
99         output = json.loads(stdout)
100
101         if "sla" in args:
102             metric = args['sla']['metric']
103             exp_val = args['sla']['expected_value']
104             smaller_than_exp = 'smaller_than_expected' in args['sla']
105
106             if metric not in output:
107                 assert False, "Metric (%s) not found." % metric
108             else:
109                 if smaller_than_exp:
110                     assert output[metric] < exp_val, "%s %d >= %d (sla)" \
111                         % (metric, output[metric], exp_val)
112                 else:
113                     assert output[metric] >= exp_val, "%s %d < %d (sla)" \
114                         % (metric, output[metric], exp_val)
115         return output
116
117
118 def _test():
119     """internal test function"""
120     key_filename = pkg_resources.resource_filename('yardstick.resources',
121                                                    'files/yardstick_key')
122     ctx = {'host': '172.16.0.137',
123            'user': 'ubuntu',
124            'key_filename': key_filename
125            }
126
127     logger = logging.getLogger('yardstick')
128     logger.setLevel(logging.DEBUG)
129
130     p = Perf(ctx)
131
132     options = {'load': True}
133     args = {'options': options}
134
135     result = p.run(args)
136     print result
137
138 if __name__ == '__main__':
139     _test()