Support for netperf 77/2477/5
authorkubi <jean.gaoliang@huawei.com>
Tue, 13 Oct 2015 07:22:20 +0000 (15:22 +0800)
committerkubi <jean.gaoliang@huawei.com>
Thu, 15 Oct 2015 01:15:37 +0000 (09:15 +0800)
As agreed in the IRC meeting, use iperf for YARDSTICK-112;
support for netperf shall also be included in the framework (Yardstick-121).
bulk data test and req/rsp test are supportted by netperf

JIRA:YARDSTICK-121
Change-Id: I0e934dc067802c2792d751a19a187cd00ad8741b
Signed-off-by: kubi <jean.gaoliang@huawei.com>
samples/netperf.yaml [new file with mode: 0755]
tests/unit/benchmark/scenarios/networking/netperf_sample_output.json [new file with mode: 0755]
tests/unit/benchmark/scenarios/networking/test_netperf.py [new file with mode: 0755]
tools/ubuntu-server-cloudimg-modify.sh
yardstick/benchmark/scenarios/networking/netperf.py [new file with mode: 0755]
yardstick/benchmark/scenarios/networking/netperf_benchmark.bash [new file with mode: 0755]

diff --git a/samples/netperf.yaml b/samples/netperf.yaml
new file mode 100755 (executable)
index 0000000..6d1dd8e
--- /dev/null
@@ -0,0 +1,69 @@
+---
+# Sample benchmark task config file
+# measure network latency and throughput using netperf
+# There are two sample scenarios: bulk test and request/response test
+# In bulk test, UDP_STREAM and TCP_STREAM can be used
+# send_msg_size and recv_msg_size are options of bulk test
+# In req/rsp test, TCP_RR TCP_CRR UDP_RR can be used
+# req_rsp_size is option of req/rsp test
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Netperf
+  options:
+    testname: 'UDP_STREAM'
+    send_msg_size: 1024
+    duration: 20
+
+  host: Chang'e.demo
+  target: Houyi.demo
+
+  runner:
+    type: Iteration
+    iterations: 1
+    interval: 1
+  sla:
+    mean_latency: 100
+    action: monitor
+-
+  type: Netperf
+  options:
+    testname: 'TCP_RR'
+    req_rsp_size: '32,1024'
+
+  host: Chang'e.demo
+  target: Houyi.demo
+
+  runner:
+    type: Duration
+    duration: 20
+    interval: 1
+  sla:
+    mean_latency: 300
+    action: monitor
+
+context:
+  name: demo
+  image: yardstick-trusty-server
+  flavor: yardstick-flavor
+  user: ec2-user
+
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+
+  servers:
+    Chang'e:
+      floating_ip: true
+      placement: "pgrp1"
+    Houyi:
+      floating_ip: true
+      placement: "pgrp1"
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+
+
diff --git a/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json b/tests/unit/benchmark/scenarios/networking/netperf_sample_output.json
new file mode 100755 (executable)
index 0000000..bba76cf
--- /dev/null
@@ -0,0 +1 @@
+{"mean_latency":"9.49","troughput":"823.77","troughput_unit":"10^6bits/s"}
\ No newline at end of file
diff --git a/tests/unit/benchmark/scenarios/networking/test_netperf.py b/tests/unit/benchmark/scenarios/networking/test_netperf.py
new file mode 100755 (executable)
index 0000000..d5c1991
--- /dev/null
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.netperf.Netperf
+
+import mock
+import unittest
+import os
+import json
+
+from yardstick.benchmark.scenarios.networking import netperf
+
+
+@mock.patch('yardstick.benchmark.scenarios.networking.netperf.ssh')
+class NetperfTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.ctx = {
+            'host': '172.16.0.137',
+            'target': '172.16.0.138',
+            'user': 'cirros',
+            'key_filename': "mykey.key"
+        }
+
+    def test_netperf_successful_setup(self, mock_ssh):
+
+        p = netperf.Netperf(self.ctx)
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+
+        p.setup()
+        self.assertIsNotNone(p.server)
+        self.assertIsNotNone(p.client)
+        self.assertEqual(p.setup_done, True)
+
+    def test_netperf_successful_no_sla(self, mock_ssh):
+
+        p = netperf.Netperf(self.ctx)
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+        p.host = mock_ssh.SSH()
+
+        options = {}
+        args = {'options': options}
+
+        sample_output = self._read_sample_output()
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        expected_result = json.loads(sample_output)
+        result = p.run(args)
+        self.assertEqual(result, expected_result)
+
+    def test_netperf_successful_sla(self, mock_ssh):
+
+        p = netperf.Netperf(self.ctx)
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+        p.host = mock_ssh.SSH()
+
+        options = {}
+        args = {
+            'options': options,
+            'sla': {'mean_latency': 100}
+        }
+
+        sample_output = self._read_sample_output()
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        expected_result = json.loads(sample_output)
+        result = p.run(args)
+        self.assertEqual(result, expected_result)
+
+    def test_netperf_unsuccessful_sla(self, mock_ssh):
+
+        p = netperf.Netperf(self.ctx)
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+        p.host = mock_ssh.SSH()
+
+        options = {}
+        args = {
+            'options': options,
+            'sla': {'mean_latency': 5}
+        }
+
+        sample_output = self._read_sample_output()
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        self.assertRaises(AssertionError, p.run, args)
+
+    def test_netperf_unsuccessful_script_error(self, mock_ssh):
+
+        p = netperf.Netperf(self.ctx)
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+        p.host = mock_ssh.SSH()
+
+        options = {}
+        args = {'options': options}
+
+        mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+        self.assertRaises(RuntimeError, p.run, args)
+
+    def _read_sample_output(self):
+        curr_path = os.path.dirname(os.path.abspath(__file__))
+        output = os.path.join(curr_path, 'netperf_sample_output.json')
+        with open(output) as f:
+            sample_output = f.read()
+        return sample_output
+
+
+def main():
+    unittest.main()
+
+if __name__ == '__main__':
+    main()
index 6e750e0..93f2d30 100755 (executable)
@@ -39,6 +39,7 @@ apt-get install -y \
     linux-tools-common \
     linux-tools-generic \
     lmbench \
+    netperf \
     stress
 
 # restore symlink
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
new file mode 100755 (executable)
index 0000000..3121fda
--- /dev/null
@@ -0,0 +1,163 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# bulk data test and req/rsp test are supported
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Netperf(base.Scenario):
+    """Execute netperf between two hosts
+
+  Parameters
+    testname - to specify the test you wish to perform.
+    the valid testnames are TCP_STREAM, TCP_RR, UDP_STREAM, UDP_RR
+        type:    string
+        unit:    na
+        default: TCP_STREAM
+    send_msg_size - value set the local send size to value bytes.
+        type:    int
+        unit:    bytes
+        default: na
+    recv_msg_size - setting the receive size for the remote system.
+        type:    int
+        unit:    bytes
+        default: na
+    req_rsp_size - set the request and/or response sizes based on sizespec.
+        type:    string
+        unit:    na
+        default: na
+    duration - duration of the test
+        type:    int
+        unit:    seconds
+        default: 20
+
+    read link below for more netperf args description:
+    http://www.netperf.org/netperf/training/Netperf.html
+    """
+    __scenario_type__ = "Netperf"
+
+    TARGET_SCRIPT = 'netperf_benchmark.bash'
+
+    def __init__(self, context):
+        self.context = context
+        self.setup_done = False
+
+    def setup(self):
+        '''scenario setup'''
+        self.target_script = pkg_resources.resource_filename(
+            'yardstick.benchmark.scenarios.networking',
+            Netperf.TARGET_SCRIPT)
+        user = self.context.get('user', 'ubuntu')
+        host = self.context.get('host', None)
+        target = self.context.get('target', None)
+        key_filename = self.context.get('key_filename', '~/.ssh/id_rsa')
+
+        # netserver start automatically during the vm boot
+        LOG.info("user:%s, target:%s", user, target)
+        self.server = ssh.SSH(user, target, key_filename=key_filename)
+        self.server.wait(timeout=600)
+
+        LOG.info("user:%s, host:%s", user, host)
+        self.client = ssh.SSH(user, host, key_filename=key_filename)
+        self.client.wait(timeout=600)
+
+        # copy script to host
+        self.client.run("cat > ~/netperf.sh",
+                        stdin=open(self.target_script, "rb"))
+
+        self.setup_done = True
+
+    def run(self, args):
+        """execute the benchmark"""
+
+        if not self.setup_done:
+            self.setup()
+
+        # get global options
+        ipaddr = args.get("ipaddr", '127.0.0.1')
+        options = args['options']
+        testname = options.get("testname", 'TCP_STREAM')
+        duration_time = self.context.get("duration", None)
+        arithmetic_time = options.get("duration", None)
+        if duration_time:
+            testlen = duration_time
+        elif arithmetic_time:
+            testlen = arithmetic_time
+        else:
+            testlen = 20
+
+        cmd_args = "-H %s -l %s -t %s" % (ipaddr, testlen, testname)
+
+        # get test specific options
+        default_args = "-O 'THROUGHPUT,THROUGHPUT_UNITS,MEAN_LATENCY'"
+        cmd_args += " -- %s" % default_args
+        option_pair_list = [("send_msg_size", "-m"),
+                            ("recv_msg_size", "-M"),
+                            ("req_rsp_size", "-r")]
+        for option_pair in option_pair_list:
+            if option_pair[0] in options:
+                cmd_args += " %s %s" % (option_pair[1],
+                                        options[option_pair[0]])
+
+        cmd = "sudo bash netperf.sh %s" % (cmd_args)
+        LOG.debug("Executing command: %s", cmd)
+        status, stdout, stderr = self.client.execute(cmd)
+
+        if status:
+            raise RuntimeError(stderr)
+
+        data = json.loads(stdout)
+        if data['mean_latency'] == '':
+            raise RuntimeError(stdout)
+
+        # sla check
+        mean_latency = float(data['mean_latency'])
+        if "sla" in args:
+            sla_max_mean_latency = int(args["sla"]["mean_latency"])
+
+            assert mean_latency <= sla_max_mean_latency, \
+                "mean_latency %f > sla_max_mean_latency(%f)" % \
+                (mean_latency, sla_max_mean_latency)
+
+        return data
+
+
+def _test():
+    '''internal test function'''
+    logger = logging.getLogger('yardstick')
+    logger.setLevel(logging.DEBUG)
+
+    key_filename = pkg_resources.resource_filename('yardstick.resources',
+                                                   'files/yardstick_key')
+    runner_cfg = {}
+    runner_cfg['type'] = 'Duration'
+    runner_cfg['duration'] = 5
+    runner_cfg['clinet'] = '10.0.2.33'
+    runner_cfg['server'] = '10.0.2.53'
+    runner_cfg['user'] = 'ubuntu'
+    runner_cfg['output_filename'] = "/tmp/yardstick.out"
+    runner_cfg['key_filename'] = key_filename
+
+    scenario_args = {}
+    scenario_args['options'] = {"testname": 'TCP_STREAM'}
+
+    from yardstick.benchmark.runners import base as base_runner
+    runner = base_runner.Runner.get(runner_cfg)
+    runner.run("Netperf", scenario_args)
+    runner.join()
+    base_runner.Runner.release(runner)
+
+if __name__ == '__main__':
+    _test()
diff --git a/yardstick/benchmark/scenarios/networking/netperf_benchmark.bash b/yardstick/benchmark/scenarios/networking/netperf_benchmark.bash
new file mode 100755 (executable)
index 0000000..a425c5d
--- /dev/null
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+OPTIONS="$@"
+OUTPUT_FILE=/tmp/netperf-out.log
+
+# run netperf test
+run_netperf()
+{
+    netperf $OPTIONS > $OUTPUT_FILE
+}
+
+# write the result to stdout in json format
+output_json()
+{
+    mean=$(awk '/\/s/{print $3}' $OUTPUT_FILE)
+    troughput=$(awk '/\/s/{print $1}' $OUTPUT_FILE)
+    unit=$(awk '/\/s/{print $2}' $OUTPUT_FILE)
+    echo -e "{ \
+        \"mean_latency\":\"$mean\", \
+        \"troughput\":\"$troughput\", \
+        \"troughput_unit\":\"$unit\" \
+    }"
+}
+
+# main entry
+main()
+{
+    # run the test
+    run_netperf
+
+    # output result
+    output_json
+}
+
+main
\ No newline at end of file