Merge "constants: cache YAML config values"
[yardstick.git] / yardstick / benchmark / scenarios / compute / lmbench.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 from __future__ import absolute_import
10 from __future__ import print_function
11
12 import logging
13
14 import pkg_resources
15 from oslo_serialization import jsonutils
16
17 import yardstick.ssh as ssh
18 from yardstick.common import utils
19 from yardstick.benchmark.scenarios import base
20
21 LOG = logging.getLogger(__name__)
22
23
24 class Lmbench(base.Scenario):
25     """Execute lmbench memory read latency or memory bandwidth benchmark in a host
26
27     Parameters
28         test_type - specifies whether to measure memory latency or bandwidth
29             type:       string
30             unit:       na
31             default:    "latency"
32
33     Parameters for memory read latency benchmark
34         stride - number of locations in memory between starts of array elements
35             type:       int
36             unit:       bytes
37             default:    128
38         stop_size - maximum array size to test (minimum value is 0.000512)
39             type:       float
40             unit:       megabytes
41             default:    16.0
42
43         Results are accurate to the ~2-5 nanosecond range.
44
45     Parameters for memory bandwidth benchmark
46         size - the amount of memory to test
47             type:       int
48             unit:       kilobyte
49             default:    128
50         benchmark - the name of the memory bandwidth benchmark test to execute.
51         Valid test names are rd, wr, rdwr, cp, frd, fwr, fcp, bzero, bcopy
52             type:       string
53             unit:       na
54             default:    "rd"
55         warmup - the number of repetitons to perform before taking measurements
56             type:       int
57             unit:       na
58             default:    0
59     more info http://manpages.ubuntu.com/manpages/trusty/lmbench.8.html
60     """
61     __scenario_type__ = "Lmbench"
62
63     LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
64     BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
65     LATENCY_CACHE_SCRIPT = "lmbench_latency_for_cache.bash"
66
67     def __init__(self, scenario_cfg, context_cfg):
68         self.scenario_cfg = scenario_cfg
69         self.context_cfg = context_cfg
70         self.setup_done = False
71
72     def setup(self):
73         """scenario setup"""
74         self.bandwidth_target_script = pkg_resources.resource_filename(
75             "yardstick.benchmark.scenarios.compute",
76             Lmbench.BANDWIDTH_BENCHMARK_SCRIPT)
77         self.latency_target_script = pkg_resources.resource_filename(
78             "yardstick.benchmark.scenarios.compute",
79             Lmbench.LATENCY_BENCHMARK_SCRIPT)
80         self.latency_for_cache_script = pkg_resources.resource_filename(
81             "yardstick.benchmark.scenarios.compute",
82             Lmbench.LATENCY_CACHE_SCRIPT)
83         host = self.context_cfg["host"]
84
85         self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
86         self.client.wait(timeout=600)
87
88         # copy scripts to host
89         self.client._put_file_shell(
90             self.latency_target_script, '~/lmbench_latency.sh')
91         self.client._put_file_shell(
92             self.bandwidth_target_script, '~/lmbench_bandwidth.sh')
93         self.client._put_file_shell(
94             self.latency_for_cache_script, '~/lmbench_latency_for_cache.sh')
95         self.setup_done = True
96
97     def run(self, result):
98         """execute the benchmark"""
99
100         if not self.setup_done:
101             self.setup()
102
103         options = self.scenario_cfg['options']
104         test_type = options.get('test_type', 'latency')
105
106         if test_type == 'latency':
107             stride = options.get('stride', 128)
108             stop_size = options.get('stop_size', 16.0)
109             cmd = "sudo bash lmbench_latency.sh %f %d" % (stop_size, stride)
110         elif test_type == 'bandwidth':
111             size = options.get('size', 128)
112             benchmark = options.get('benchmark', 'rd')
113             warmup_repetitions = options.get('warmup', 0)
114             cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
115                   (size, benchmark, warmup_repetitions)
116         elif test_type == 'latency_for_cache':
117             repetition = options.get('repetition', 1)
118             warmup = options.get('warmup', 0)
119             cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
120                   (repetition, warmup)
121         else:
122             raise RuntimeError("No such test_type: %s for Lmbench scenario",
123                                test_type)
124
125         LOG.debug("Executing command: %s", cmd)
126         status, stdout, stderr = self.client.execute(cmd)
127
128         if status:
129             raise RuntimeError(stderr)
130
131         lmbench_result = {}
132         if test_type == 'latency':
133             lmbench_result.update(
134                 {"latencies": jsonutils.loads(stdout)})
135         else:
136             lmbench_result.update(jsonutils.loads(stdout))
137         result.update(utils.flatten_dict_key(lmbench_result))
138
139         if "sla" in self.scenario_cfg:
140             sla_error = ""
141             if test_type == 'latency':
142                 sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
143                 for t_latency in lmbench_result["latencies"]:
144                     latency = t_latency['latency']
145                     if latency > sla_max_latency:
146                         sla_error += "latency %f > sla:max_latency(%f); " \
147                             % (latency, sla_max_latency)
148             elif test_type == 'bandwidth':
149                 sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
150                 bw = lmbench_result["bandwidth(MBps)"]
151                 if bw < sla_min_bw:
152                     sla_error += "bandwidth %f < " \
153                                  "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
154             elif test_type == 'latency_for_cache':
155                 sla_latency = float(self.scenario_cfg['sla']['max_latency'])
156                 cache_latency = float(lmbench_result['L1cache'])
157                 if sla_latency < cache_latency:
158                     sla_error += "latency %f > sla:max_latency(%f); " \
159                         % (cache_latency, sla_latency)
160             assert sla_error == "", sla_error
161
162
163 def _test():
164     """internal test function"""
165     key_filename = pkg_resources.resource_filename('yardstick.resources',
166                                                    'files/yardstick_key')
167     ctx = {
168         'host': {
169             'ip': '10.229.47.137',
170             'user': 'root',
171             'key_filename': key_filename
172         }
173     }
174
175     logger = logging.getLogger('yardstick')
176     logger.setLevel(logging.DEBUG)
177
178     options = {
179         'test_type': 'latency',
180         'stride': 128,
181         'stop_size': 16
182     }
183
184     sla = {'max_latency': 35, 'action': 'monitor'}
185     args = {'options': options, 'sla': sla}
186     result = {}
187
188     p = Lmbench(args, ctx)
189     p.run(result)
190     print(result)
191
192
193 if __name__ == '__main__':
194     _test()