Merge "Cleanup process test cases"
[yardstick.git] / yardstick / benchmark / scenarios / compute / lmbench.py
index e15fe7e..2237e49 100644 (file)
@@ -6,11 +6,16 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import pkg_resources
+from __future__ import absolute_import
+from __future__ import print_function
+
 import logging
-import json
+
+import pkg_resources
+from oslo_serialization import jsonutils
 
 import yardstick.ssh as ssh
+from yardstick.common import utils
 from yardstick.benchmark.scenarios import base
 
 LOG = logging.getLogger(__name__)
@@ -57,6 +62,7 @@ class Lmbench(base.Scenario):
 
     LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
     BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
+    LATENCY_CACHE_SCRIPT = "lmbench_latency_for_cache.bash"
 
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
@@ -71,20 +77,21 @@ class Lmbench(base.Scenario):
         self.latency_target_script = pkg_resources.resource_filename(
             "yardstick.benchmark.scenarios.compute",
             Lmbench.LATENCY_BENCHMARK_SCRIPT)
+        self.latency_for_cache_script = pkg_resources.resource_filename(
+            "yardstick.benchmark.scenarios.compute",
+            Lmbench.LATENCY_CACHE_SCRIPT)
         host = self.context_cfg["host"]
-        user = host.get("user", "ubuntu")
-        ip = host.get("ip", None)
-        key_filename = host.get('key_filename', "~/.ssh/id_rsa")
 
-        LOG.info("user:%s, host:%s", user, ip)
-        self.client = ssh.SSH(user, ip, key_filename=key_filename)
+        self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
         self.client.wait(timeout=600)
 
         # copy scripts to host
-        self.client.run("cat > ~/lmbench_latency.sh",
-                        stdin=open(self.latency_target_script, 'rb'))
-        self.client.run("cat > ~/lmbench_bandwidth.sh",
-                        stdin=open(self.bandwidth_target_script, 'rb'))
+        self.client._put_file_shell(
+            self.latency_target_script, '~/lmbench_latency.sh')
+        self.client._put_file_shell(
+            self.bandwidth_target_script, '~/lmbench_bandwidth.sh')
+        self.client._put_file_shell(
+            self.latency_for_cache_script, '~/lmbench_latency_for_cache.sh')
         self.setup_done = True
 
     def run(self, result):
@@ -106,9 +113,14 @@ class Lmbench(base.Scenario):
             warmup_repetitions = options.get('warmup', 0)
             cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
                   (size, benchmark, warmup_repetitions)
+        elif test_type == 'latency_for_cache':
+            repetition = options.get('repetition', 1)
+            warmup = options.get('warmup', 0)
+            cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
+                  (repetition, warmup)
         else:
-            raise RuntimeError("No such test_type: %s for Lmbench scenario",
-                               test_type)
+            raise RuntimeError("No such test_type: %s for Lmbench scenario"
+                               test_type)
 
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
@@ -116,27 +128,36 @@ class Lmbench(base.Scenario):
         if status:
             raise RuntimeError(stderr)
 
+        lmbench_result = {}
         if test_type == 'latency':
-            result.update({"latencies": json.loads(stdout)})
+            lmbench_result.update(
+                {"latencies": jsonutils.loads(stdout)})
         else:
-            result.update(json.loads(stdout))
+            lmbench_result.update(jsonutils.loads(stdout))
+        result.update(utils.flatten_dict_key(lmbench_result))
 
         if "sla" in self.scenario_cfg:
             sla_error = ""
             if test_type == 'latency':
                 sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
-                for t_latency in result["latencies"]:
+                for t_latency in lmbench_result["latencies"]:
                     latency = t_latency['latency']
                     if latency > sla_max_latency:
                         sla_error += "latency %f > sla:max_latency(%f); " \
                             % (latency, sla_max_latency)
-            else:
+            elif test_type == 'bandwidth':
                 sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
-                bw = result["bandwidth(MBps)"]
+                bw = lmbench_result["bandwidth(MBps)"]
                 if bw < sla_min_bw:
                     sla_error += "bandwidth %f < " \
                                  "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
-            assert sla_error == "", sla_error
+            elif test_type == 'latency_for_cache':
+                sla_latency = float(self.scenario_cfg['sla']['max_latency'])
+                cache_latency = float(lmbench_result['L1cache'])
+                if sla_latency < cache_latency:
+                    sla_error += "latency %f > sla:max_latency(%f); " \
+                        % (cache_latency, sla_latency)
+            self.verify_SLA(sla_error == "", sla_error)
 
 
 def _test():
@@ -166,7 +187,8 @@ def _test():
 
     p = Lmbench(args, ctx)
     p.run(result)
-    print result
+    print(result)
+
 
 if __name__ == '__main__':
     _test()