add scenario and sample file for Unixbench. 71/4571/6
authorkubi <jean.gaoliang@huawei.com>
Mon, 14 Dec 2015 11:51:45 +0000 (19:51 +0800)
committerliang gao <jean.gaoliang@huawei.com>
Sat, 26 Dec 2015 09:59:18 +0000 (09:59 +0000)
JIRA:YARDSTICK-184

Change-Id: Iedd4a3708e08305b1c8fa7a8e1766ceef03ab8bb
Signed-off-by: kubi <jean.gaoliang@huawei.com>
samples/unixbench.yaml [new file with mode: 0644]
tests/unit/benchmark/scenarios/compute/test_unixbench.py [new file with mode: 0644]
tools/ubuntu-server-cloudimg-modify.sh
yardstick/benchmark/scenarios/compute/unixbench.py [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash [new file with mode: 0644]

diff --git a/samples/unixbench.yaml b/samples/unixbench.yaml
new file mode 100644 (file)
index 0000000..9af032f
--- /dev/null
@@ -0,0 +1,35 @@
+---
+# Sample benchmark task config file
+# measure CPU performance
+# There is one sample scenario for Dhrystone
+# Dhrystone (MIPS) - higher results are better, i.e. better integer performance.
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: UnixBench
+  options:
+    run_mode: 'verbose'
+    test_type: 'dhry2reg'
+  host: Chang'e.demo
+
+  runner:
+    type: Iteration
+    iterations: 1
+    interval: 1
+
+context:
+  name: demo
+  image: yardstick-trusty-server
+  flavor: yardstick-flavor
+  user: ec2-user
+
+  servers:
+    Chang'e:
+      floating_ip: true
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+
diff --git a/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/tests/unit/benchmark/scenarios/compute/test_unixbench.py
new file mode 100644 (file)
index 0000000..0935bca
--- /dev/null
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.unixbench.Unixbench
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.compute import unixbench
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
+class UnixbenchTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.ctx = {
+            "host": {
+                "ip": "192.168.50.28",
+                "user": "root",
+                "key_filename": "mykey.key"
+            }
+        }
+
+    def test_unixbench_successful_setup(self, mock_ssh):
+
+        u = unixbench.Unixbench({}, self.ctx)
+        u.setup()
+
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+        self.assertIsNotNone(u.client)
+        self.assertEqual(u.setup_done, True)
+
+    def test_unixbench_successful_no_sla(self, mock_ssh):
+
+        options = {
+            "test_type": 'dhry2reg',
+            "run_mode": 'verbose'
+        }
+        args = {
+            "options": options,
+        }
+        u = unixbench.Unixbench(args, self.ctx)
+        result = {}
+
+        u.server = mock_ssh.SSH()
+
+        sample_output = '{"Score":"4425.4"}'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+        u.run(result)
+        expected_result = json.loads(sample_output)
+        self.assertEqual(result, expected_result)
+
+    def test_unixbench_successful_in_quiet_mode(self, mock_ssh):
+
+        options = {
+            "test_type": 'dhry2reg',
+            "run_mode": 'quiet',
+            "copies":1
+        }
+        args = {
+            "options": options,
+        }
+        u = unixbench.Unixbench(args, self.ctx)
+        result = {}
+
+        u.server = mock_ssh.SSH()
+
+        sample_output = '{"Score":"4425.4"}'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+        u.run(result)
+        expected_result = json.loads(sample_output)
+        self.assertEqual(result, expected_result)
+
+
+    def test_unixbench_successful_sla(self, mock_ssh):
+
+        options = {
+            "test_type": 'dhry2reg',
+            "run_mode": 'verbose'
+        }
+        sla = {
+            "single_score": '100',
+            "parallel_score": '500'
+        }
+        args = {
+            "options": options,
+            "sla": sla
+        }
+        u = unixbench.Unixbench(args, self.ctx)
+        result = {}
+
+        u.server = mock_ssh.SSH()
+
+        sample_output = '{"signle_score":"2251.7","parallel_score":"4395.9"}'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+
+        u.run(result)
+        expected_result = json.loads(sample_output)
+        self.assertEqual(result, expected_result)
+
+    def test_unixbench_unsuccessful_sla_single_score(self, mock_ssh):
+
+        args = {
+            "options": {},
+            "sla": {"single_score": "500"}
+        }
+        u = unixbench.Unixbench(args, self.ctx)
+        result = {}
+
+        u.server = mock_ssh.SSH()
+        sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
+
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        self.assertRaises(AssertionError, u.run, result)
+
+    def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
+
+        args = {
+            "options": {},
+            "sla": {"parallel_score": "4000"}
+        }
+        u = unixbench.Unixbench(args, self.ctx)
+        result = {}
+
+        u.server = mock_ssh.SSH()
+        sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
+
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        self.assertRaises(AssertionError, u.run, result)
+
+    def test_unixbench_unsuccessful_script_error(self, mock_ssh):
+
+        options = {
+            "test_type": 'dhry2reg',
+            "run_mode": 'verbose'
+        }
+        sla = {
+            "single_score": '100',
+            "parallel_score": '500'
+        }
+        args = {
+            "options": options,
+            "sla": sla
+        }
+        u = unixbench.Unixbench(args, self.ctx)
+        result = {}
+
+        u.server = mock_ssh.SSH()
+
+        mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+        self.assertRaises(RuntimeError, u.run, result)
+
+
+def main():
+    unittest.main()
+
+if __name__ == '__main__':
+    main()
index 58fcd92..11e6051 100755 (executable)
@@ -42,14 +42,21 @@ EOF
 apt-get update
 apt-get install -y \
     fio \
+    git \
+    gcc \
     iperf3 \
     linux-tools-common \
     linux-tools-generic \
     lmbench \
+    make \
     netperf \
+    patch \
+    perl \
     rt-tests \
     stress \
     sysstat
 
+git clone https://github.com/kdlucas/byte-unixbench.git /opt/tempT
+make --directory /opt/tempT/UnixBench/
 # restore symlink
 ln -sf /run/resolvconf/resolv.conf /etc/resolv.conf
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
new file mode 100644 (file)
index 0000000..e6318b9
--- /dev/null
@@ -0,0 +1,156 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Unixbench(base.Scenario):
+    """Execute Unixbench cpu benchmark in a host
+    The Run script takes a number of options which you can use to customise a
+    test, and you can specify the names of the tests to run.  The full usage
+    is:
+
+    Run [ -q | -v ] [-i <n> ] [-c <n> [-c <n> ...]] [test ...]
+
+    -i <count>    Run <count> iterations for each test -- slower tests
+                use <count> / 3, but at least 1.  Defaults to 10 (3 for
+                slow tests).
+    -c <n>        Run <n> copies of each test in parallel.
+
+    Parameters for setting unixbench
+        run_mode - Run in quiet mode or verbose mode
+            type:       string
+            unit:       None
+            default:    None
+        test_type - The available tests are organised into categories;
+            type:       string
+            unit:       None
+            default:    None
+        iterations - Run <count> iterations for each test -- slower tests
+        use <count> / 3, but at least 1.  Defaults to 10 (3 for slow tests).
+            type:       int
+            unit:       None
+            default:    None
+        copies - Run <n> copies of each test in parallel.
+            type:       int
+            unit:       None
+            default:    None
+
+    more info https://github.com/kdlucas/byte-unixbench/blob/master/UnixBench
+    """
+    __scenario_type__ = "UnixBench"
+
+    TARGET_SCRIPT = "unixbench_benchmark.bash"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+        self.target_script = pkg_resources.resource_filename(
+            "yardstick.benchmark.scenarios.compute",
+            Unixbench.TARGET_SCRIPT)
+
+        host = self.context_cfg["host"]
+        user = host.get("user", "ubuntu")
+        ip = host.get("ip", None)
+        key_filename = host.get('key_filename', "~/.ssh/id_rsa")
+
+        LOG.info("user:%s, host:%s", user, ip)
+        self.client = ssh.SSH(user, ip, key_filename=key_filename)
+        self.client.wait(timeout=600)
+
+        # copy scripts to host
+        self.client.run("cat > ~/unixbench_benchmark.sh",
+                        stdin=open(self.target_script, 'rb'))
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the benchmark"""
+
+        if not self.setup_done:
+            self.setup()
+
+        options = self.scenario_cfg["options"]
+
+        run_mode = options.get("run_mode", None)
+        LOG.debug("Executing run_mode: %s", run_mode)
+        cmd_args = ""
+        if run_mode == "quiet":
+            cmd_args = "-q"
+        elif run_mode == "verbose":
+            cmd_args = "-v"
+
+        option_pair_list = [("iterations", "-i"),
+                            ("copies", "-c")]
+        for option_pair in option_pair_list:
+            if option_pair[0] in options:
+                cmd_args += " %s %s " % (option_pair[1],
+                                         options[option_pair[0]])
+
+        test_type = options.get("test_type", None)
+        if test_type is not None:
+            cmd_args += " %s " % (test_type)
+
+        cmd = "sudo bash unixbench_benchmark.sh %s" % (cmd_args)
+        LOG.debug("Executing command: %s", cmd)
+        status, stdout, stderr = self.client.execute(cmd)
+        if status:
+            raise RuntimeError(stderr)
+
+        result.update(json.loads(stdout))
+
+        if "sla" in self.scenario_cfg:
+            sla_error = ""
+            for t, score in result.items():
+                if t not in self.scenario_cfg['sla']:
+                    continue
+                sla_score = float(self.scenario_cfg['sla'][t])
+                score = float(score)
+                if score < sla_score:
+                    sla_error += "%s score %f < sla:%s_score(%f); " % \
+                        (t, score, t, sla_score)
+            assert sla_error == "", sla_error
+
+
+def _test():  # pragma: no cover
+    """internal test function"""
+    key_filename = pkg_resources.resource_filename('yardstick.resources',
+                                                   'files/yardstick_key')
+    ctx = {
+        'host': {
+            'ip': '10.229.47.137',
+            'user': 'root',
+            'key_filename': key_filename
+        }
+    }
+
+    options = {
+        'test_type': 'dhrystone',
+        'run_mode': 'verbose'
+    }
+
+    args = {'options': options}
+    result = {}
+
+    p = Unixbench(args, ctx)
+    p.run(result)
+    print result
+
+if __name__ == '__main__':
+    _test()
diff --git a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
new file mode 100644 (file)
index 0000000..5a5dbc3
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+OPTIONS="$@"
+OUTPUT_FILE=/tmp/unixbench-out.log
+
+# run unixbench test
+run_unixbench()
+{
+    cd /opt/tempT/UnixBench/
+    ./Run $OPTIONS > $OUTPUT_FILE
+}
+
+# write the result to stdout in json format
+output_json()
+{
+    single_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | head -1 )
+    parallel_score=$(awk '/Score/{print $7}' $OUTPUT_FILE | tail -1 )
+    echo -e "{  \
+        \"single_score\":\"$single_score\", \
+        \"parallel_score\":\"$parallel_score\" \
+    }"
+}
+
+# main entry
+main()
+{
+    # run the test
+    run_unixbench
+
+    # output result
+    output_json
+}
+
+main