Add unit test for utils
[yardstick.git] / yardstick / benchmark / scenarios / compute / perf.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 import pkg_resources
10 import logging
11 import json
12
13 import yardstick.ssh as ssh
14 from yardstick.benchmark.scenarios import base
15
16 LOG = logging.getLogger(__name__)
17 LOG.setLevel(logging.DEBUG)
18
19
20 class Perf(base.Scenario):
21     """Execute perf benchmark in a host
22
23   Parameters
24     events - perf tool software, hardware or tracepoint events
25         type:       [str]
26         unit:       na
27         default:    ['task-clock']
28     load - simulate load on the host by doing IO operations
29         type:       bool
30         unit:       na
31         default:    false
32
33     For more info about perf and perf events see https://perf.wiki.kernel.org
34     """
35
36     __scenario_type__ = "Perf"
37
38     TARGET_SCRIPT = 'perf_benchmark.bash'
39
40     def __init__(self, context):
41         self.context = context
42         self.setup_done = False
43
44     def setup(self):
45         """scenario setup"""
46         self.target_script = pkg_resources.resource_filename(
47             'yardstick.benchmark.scenarios.compute', Perf.TARGET_SCRIPT)
48         user = self.context.get('user', 'ubuntu')
49         host = self.context.get('host', None)
50         key_filename = self.context.get('key_filename', '~/.ssh/id_rsa')
51
52         LOG.debug("user:%s, host:%s", user, host)
53         self.client = ssh.SSH(user, host, key_filename=key_filename)
54         self.client.wait(timeout=600)
55
56         # copy script to host
57         self.client.run("cat > ~/perf_benchmark.sh",
58                         stdin=open(self.target_script, "rb"))
59
60         self.setup_done = True
61
62     def run(self, args):
63         """execute the benchmark"""
64
65         if not self.setup_done:
66             self.setup()
67
68         options = args['options']
69         events = options.get('events', ['task-clock'])
70
71         events_string = ""
72         for event in events:
73             events_string += event + " "
74
75         # if run by a duration runner
76         duration_time = self.context.get("duration", None)
77         # if run by an arithmetic runner
78         arithmetic_time = options.get("duration", None)
79         if duration_time:
80             duration = duration_time
81         elif arithmetic_time:
82             duration = arithmetic_time
83         else:
84             duration = 30
85
86         if 'load' in options:
87             load = "dd if=/dev/urandom of=/dev/null"
88         else:
89             load = "sleep %d" % duration
90
91         cmd = "sudo bash perf_benchmark.sh '%s' %d %s" \
92             % (load, duration, events_string)
93
94         LOG.debug("Executing command: %s", cmd)
95         status, stdout, stderr = self.client.execute(cmd)
96
97         if status:
98             raise RuntimeError(stdout)
99
100         output = json.loads(stdout)
101
102         if "sla" in args:
103             metric = args['sla']['metric']
104             exp_val = args['sla']['expected_value']
105             smaller_than_exp = 'smaller_than_expected' in args['sla']
106
107             if metric not in output:
108                 assert False, "Metric (%s) not found." % metric
109             else:
110                 if smaller_than_exp:
111                     assert output[metric] < exp_val, "%s %d >= %d (sla)" \
112                         % (metric, output[metric], exp_val)
113                 else:
114                     assert output[metric] >= exp_val, "%s %d < %d (sla)" \
115                         % (metric, output[metric], exp_val)
116         return output
117
118
119 def _test():
120     """internal test function"""
121     key_filename = pkg_resources.resource_filename('yardstick.resources',
122                                                    'files/yardstick_key')
123     ctx = {'host': '172.16.0.137',
124            'user': 'ubuntu',
125            'key_filename': key_filename
126            }
127
128     logger = logging.getLogger('yardstick')
129     logger.setLevel(logging.DEBUG)
130
131     p = Perf(ctx)
132
133     options = {'load': True}
134     args = {'options': options}
135
136     result = p.run(args)
137     print result
138
139 if __name__ == '__main__':
140     _test()