Merge "Adding python package requirement for VNF testing."
[yardstick.git] / yardstick / benchmark / scenarios / compute / lmbench.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9 from __future__ import absolute_import
10 from __future__ import print_function
11
12 import logging
13
14 import pkg_resources
15 from oslo_serialization import jsonutils
16
17 import yardstick.ssh as ssh
18 from yardstick.benchmark.scenarios import base
19
20 LOG = logging.getLogger(__name__)
21
22
23 class Lmbench(base.Scenario):
24     """Execute lmbench memory read latency or memory bandwidth benchmark in a host
25
26     Parameters
27         test_type - specifies whether to measure memory latency or bandwidth
28             type:       string
29             unit:       na
30             default:    "latency"
31
32     Parameters for memory read latency benchmark
33         stride - number of locations in memory between starts of array elements
34             type:       int
35             unit:       bytes
36             default:    128
37         stop_size - maximum array size to test (minimum value is 0.000512)
38             type:       float
39             unit:       megabytes
40             default:    16.0
41
42         Results are accurate to the ~2-5 nanosecond range.
43
44     Parameters for memory bandwidth benchmark
45         size - the amount of memory to test
46             type:       int
47             unit:       kilobyte
48             default:    128
49         benchmark - the name of the memory bandwidth benchmark test to execute.
50         Valid test names are rd, wr, rdwr, cp, frd, fwr, fcp, bzero, bcopy
51             type:       string
52             unit:       na
53             default:    "rd"
54         warmup - the number of repetitons to perform before taking measurements
55             type:       int
56             unit:       na
57             default:    0
58     more info http://manpages.ubuntu.com/manpages/trusty/lmbench.8.html
59     """
60     __scenario_type__ = "Lmbench"
61
62     LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
63     BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
64     LATENCY_CACHE_SCRIPT = "lmbench_latency_for_cache.bash"
65
66     def __init__(self, scenario_cfg, context_cfg):
67         self.scenario_cfg = scenario_cfg
68         self.context_cfg = context_cfg
69         self.setup_done = False
70
71     def setup(self):
72         """scenario setup"""
73         self.bandwidth_target_script = pkg_resources.resource_filename(
74             "yardstick.benchmark.scenarios.compute",
75             Lmbench.BANDWIDTH_BENCHMARK_SCRIPT)
76         self.latency_target_script = pkg_resources.resource_filename(
77             "yardstick.benchmark.scenarios.compute",
78             Lmbench.LATENCY_BENCHMARK_SCRIPT)
79         self.latency_for_cache_script = pkg_resources.resource_filename(
80             "yardstick.benchmark.scenarios.compute",
81             Lmbench.LATENCY_CACHE_SCRIPT)
82         host = self.context_cfg["host"]
83         user = host.get("user", "ubuntu")
84         ssh_port = host.get("ssh_port", ssh.DEFAULT_PORT)
85         ip = host.get("ip", None)
86         key_filename = host.get('key_filename', "~/.ssh/id_rsa")
87
88         LOG.info("user:%s, host:%s", user, ip)
89         self.client = ssh.SSH(user, ip, key_filename=key_filename,
90                               port=ssh_port)
91         self.client.wait(timeout=600)
92
93         # copy scripts to host
94         self.client._put_file_shell(
95             self.latency_target_script, '~/lmbench_latency.sh')
96         self.client._put_file_shell(
97             self.bandwidth_target_script, '~/lmbench_bandwidth.sh')
98         self.client._put_file_shell(
99             self.latency_for_cache_script, '~/lmbench_latency_for_cache.sh')
100         self.setup_done = True
101
102     def run(self, result):
103         """execute the benchmark"""
104
105         if not self.setup_done:
106             self.setup()
107
108         options = self.scenario_cfg['options']
109         test_type = options.get('test_type', 'latency')
110
111         if test_type == 'latency':
112             stride = options.get('stride', 128)
113             stop_size = options.get('stop_size', 16.0)
114             cmd = "sudo bash lmbench_latency.sh %f %d" % (stop_size, stride)
115         elif test_type == 'bandwidth':
116             size = options.get('size', 128)
117             benchmark = options.get('benchmark', 'rd')
118             warmup_repetitions = options.get('warmup', 0)
119             cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
120                   (size, benchmark, warmup_repetitions)
121         elif test_type == 'latency_for_cache':
122             repetition = options.get('repetition', 1)
123             warmup = options.get('warmup', 0)
124             cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
125                   (repetition, warmup)
126         else:
127             raise RuntimeError("No such test_type: %s for Lmbench scenario",
128                                test_type)
129
130         LOG.debug("Executing command: %s", cmd)
131         status, stdout, stderr = self.client.execute(cmd)
132
133         if status:
134             raise RuntimeError(stderr)
135
136         if test_type == 'latency':
137             result.update(
138                 {"latencies": jsonutils.loads(stdout)})
139         else:
140             result.update(jsonutils.loads(stdout))
141
142         if "sla" in self.scenario_cfg:
143             sla_error = ""
144             if test_type == 'latency':
145                 sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
146                 for t_latency in result["latencies"]:
147                     latency = t_latency['latency']
148                     if latency > sla_max_latency:
149                         sla_error += "latency %f > sla:max_latency(%f); " \
150                             % (latency, sla_max_latency)
151             elif test_type == 'bandwidth':
152                 sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
153                 bw = result["bandwidth(MBps)"]
154                 if bw < sla_min_bw:
155                     sla_error += "bandwidth %f < " \
156                                  "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
157             elif test_type == 'latency_for_cache':
158                 sla_latency = float(self.scenario_cfg['sla']['max_latency'])
159                 cache_latency = float(result['L1cache'])
160                 if sla_latency < cache_latency:
161                     sla_error += "latency %f > sla:max_latency(%f); " \
162                         % (cache_latency, sla_latency)
163             assert sla_error == "", sla_error
164
165
166 def _test():
167     """internal test function"""
168     key_filename = pkg_resources.resource_filename('yardstick.resources',
169                                                    'files/yardstick_key')
170     ctx = {
171         'host': {
172             'ip': '10.229.47.137',
173             'user': 'root',
174             'key_filename': key_filename
175         }
176     }
177
178     logger = logging.getLogger('yardstick')
179     logger.setLevel(logging.DEBUG)
180
181     options = {
182         'test_type': 'latency',
183         'stride': 128,
184         'stop_size': 16
185     }
186
187     sla = {'max_latency': 35, 'action': 'monitor'}
188     args = {'options': options, 'sla': sla}
189     result = {}
190
191     p = Lmbench(args, ctx)
192     p.run(result)
193     print(result)
194
195
196 if __name__ == '__main__':
197     _test()