add latency for cache read operations(LMBench) 47/11747/3
authorkubi <jean.gaoliang@huawei.com>
Fri, 1 Apr 2016 03:02:11 +0000 (11:02 +0800)
committerqi liang <liangqi1@huawei.com>
Fri, 8 Apr 2016 06:09:56 +0000 (06:09 +0000)
Using LMBench to measure latency of cache.
two parameter can be configured (repetition and warmup)

Change-Id: I5e4ecca0f9dd9c9ce2cecce3623dd8347ab2b5b1
Signed-off-by: kubi <jean.gaoliang@huawei.com>
samples/lmbench_cache.yaml [new file with mode: 0644]
tests/unit/benchmark/scenarios/compute/test_lmbench.py
yardstick/benchmark/scenarios/compute/lmbench.py
yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash [new file with mode: 0644]

diff --git a/samples/lmbench_cache.yaml b/samples/lmbench_cache.yaml
new file mode 100644 (file)
index 0000000..7a22cf1
--- /dev/null
@@ -0,0 +1,41 @@
+---
+# Sample benchmark task config file
+# measure memory cache latency using lmbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Lmbench
+  options:
+    test_type: "latency_for_cache"
+    line_size: 128
+    repetition: 1
+    warmup: 0
+
+  host: demeter.demo
+
+  runner:
+    type: Iteration
+    iterations: 2
+    interval: 1
+
+  sla:
+    max_latency: 35
+    action: monitor
+
+context:
+  name: demo
+  image: yardstick-trusty-server
+  flavor: yardstick-flavor
+  user: ubuntu
+
+  servers:
+    demeter:
+      floating_ip: true
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+
+
index 1b24258..6be1163 100644 (file)
@@ -159,6 +159,25 @@ class LmbenchTestCase(unittest.TestCase):
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         self.assertRaises(AssertionError, l.run, self.result)
 
+    def test_successful_latency_for_cache_run_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "latency_for_cache",
+            "repetition":1,
+            "warmup": 0
+        }
+        args = {
+            "options": options,
+            "sla": {"max_latency": 35}
+        }
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = "{\"L1cache\": 1.6}"
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        l.run(self.result)
+        expected_result = json.loads(sample_output)
+        self.assertEqual(self.result, expected_result)
+
     def test_unsuccessful_script_error(self, mock_ssh):
 
         options = {"test_type": "bandwidth"}
index e15fe7e..d3e802f 100644 (file)
@@ -57,6 +57,7 @@ class Lmbench(base.Scenario):
 
     LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
     BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
+    LATENCY_CACHE_SCRIPT = "lmbench_latency_for_cache.bash"
 
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
@@ -71,6 +72,9 @@ class Lmbench(base.Scenario):
         self.latency_target_script = pkg_resources.resource_filename(
             "yardstick.benchmark.scenarios.compute",
             Lmbench.LATENCY_BENCHMARK_SCRIPT)
+        self.latency_for_cache_script = pkg_resources.resource_filename(
+            "yardstick.benchmark.scenarios.compute",
+            Lmbench.LATENCY_CACHE_SCRIPT)
         host = self.context_cfg["host"]
         user = host.get("user", "ubuntu")
         ip = host.get("ip", None)
@@ -85,6 +89,8 @@ class Lmbench(base.Scenario):
                         stdin=open(self.latency_target_script, 'rb'))
         self.client.run("cat > ~/lmbench_bandwidth.sh",
                         stdin=open(self.bandwidth_target_script, 'rb'))
+        self.client.run("cat > ~/lmbench_latency_for_cache.sh",
+                        stdin=open(self.latency_for_cache_script, 'rb'))
         self.setup_done = True
 
     def run(self, result):
@@ -106,6 +112,11 @@ class Lmbench(base.Scenario):
             warmup_repetitions = options.get('warmup', 0)
             cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
                   (size, benchmark, warmup_repetitions)
+        elif test_type == 'latency_for_cache':
+            repetition = options.get('repetition', 1)
+            warmup = options.get('warmup', 0)
+            cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
+                  (repetition, warmup)
         else:
             raise RuntimeError("No such test_type: %s for Lmbench scenario",
                                test_type)
@@ -130,12 +141,18 @@ class Lmbench(base.Scenario):
                     if latency > sla_max_latency:
                         sla_error += "latency %f > sla:max_latency(%f); " \
                             % (latency, sla_max_latency)
-            else:
+            elif test_type == 'bandwidth':
                 sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
                 bw = result["bandwidth(MBps)"]
                 if bw < sla_min_bw:
                     sla_error += "bandwidth %f < " \
                                  "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
+            elif test_type == 'latency_for_cache':
+                sla_latency = float(self.scenario_cfg['sla']['max_latency'])
+                cache_latency = float(result['L1cache'])
+                if sla_latency < cache_latency:
+                    sla_error += "latency %f > sla:max_latency(%f); " \
+                        % (cache_latency, sla_latency)
             assert sla_error == "", sla_error
 
 
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash b/yardstick/benchmark/scenarios/compute/lmbench_latency_for_cache.bash
new file mode 100644 (file)
index 0000000..2ed1bbe
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a lmbench cache latency benchmark in a host and
+# outputs in json format the array sizes in megabytes and
+# load latency over all points in that array in nanosecods
+
+set -e
+
+REPETITON=$1
+WARMUP=$2
+
+# write the result to stdout in json format
+output_json()
+{
+    read DATA
+    echo $DATA | awk '{printf "{\"L1cache\": %s}", $5}'
+}
+
+/usr/lib/lmbench/bin/x86_64-linux-gnu/cache -W $WARMUP -N $REPETITON  2>&1 | output_json
+