Merge "Add unit test for pktgen"
authorJörgen Karlsson <jorgen.w.karlsson@ericsson.com>
Wed, 19 Aug 2015 15:18:15 +0000 (15:18 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Wed, 19 Aug 2015 15:18:15 +0000 (15:18 +0000)
18 files changed:
README.rst
run_tests.sh
samples/lmbench.yaml [new file with mode: 0644]
samples/perf.yaml [new file with mode: 0644]
samples/ping-iteration.yaml [new file with mode: 0755]
setup.py
tools/ubuntu-server-cloudimg-modify.sh
yardstick/benchmark/runners/arithmetic.py [changed mode: 0644->0755]
yardstick/benchmark/runners/iteration.py [new file with mode: 0755]
yardstick/benchmark/runners/sequence.py
yardstick/benchmark/scenarios/compute/__init__.py [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/lmbench.py [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/perf.py [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/perf_benchmark.bash [new file with mode: 0644]
yardstick/main.py
yardstick/plot/__init__.py [new file with mode: 0644]
yardstick/plot/plotter.py [new file with mode: 0644]

index 049a426..f9276ab 100644 (file)
@@ -79,8 +79,10 @@ Example setup known to work for development and test:
 - Development environment: Ubuntu14.04, eclipse, virtual environment
 - Cloud: Mirantis OpenStack 6.0 deployed using Virtualbox
 
+Install dependencies:
+$ sudo apt-get install python-virtualenv python-dev libffi-dev libssl-dev
+
 Create a virtual environment:
-$ sudo apt-get install python-virtualenv
 $ virtualenv ~/yardstick_venv
 $ source ~/yardstick_venv/bin/activate
 $ python setup.py develop
index b2bf807..c8d8e7d 100755 (executable)
 
 # Run yardstick's test suite(s)
 
+getopts ":f" FILE_OPTION
+
 run_flake8() {
-    echo -n "Running flake8 ... "
-    logfile=pep8.log
-    flake8 yardstick > $logfile
+    echo "Running flake8 ... "
+    logfile=test_results.log
+    if [ $FILE_OPTION == "f" ]; then
+        flake8 yardstick > $logfile
+    else
+        flake8 yardstick
+    fi
+
     if [ $? -ne 0 ]; then
-        echo "FAILED, result in $logfile"
+        echo "FAILED"
+        if [ $FILE_OPTION == "f" ]; then
+            echo "Results in $logfile"
+        fi
         exit 1
     else
-        echo "OK, result in $logfile"
+        echo "OK"
     fi
 }
 
 run_tests() {
-    echo -n "Running unittest ... "
-    python -m unittest discover -s tests/unit
+    echo "Running unittest ... "
+    if [ $FILE_OPTION == "f" ]; then
+        python -m unittest discover -v -s tests/unit > $logfile 2>&1
+    else
+        python -m unittest discover -v -s tests/unit
+    fi
+
     if [ $? -ne 0 ]; then
-        echo "FAILED, result in $logfile"
+        if [ $FILE_OPTION == "f" ]; then
+            echo "FAILED, results in $logfile"
+        fi
         exit 1
     else
-        echo "OK, result in $logfile"
+        if [ $FILE_OPTION == "f" ]; then
+            echo "OK, results in $logfile"
+        fi
     fi
 }
 
 run_flake8
-#run_tests
+run_tests
 
diff --git a/samples/lmbench.yaml b/samples/lmbench.yaml
new file mode 100644 (file)
index 0000000..c7526c0
--- /dev/null
@@ -0,0 +1,46 @@
+---
+# Sample benchmark task config file
+# measure memory read latency using lmbench
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Lmbench
+  options:
+    stride: 64
+    stop_size: 32
+
+  host: demeter.demo
+
+  runner:
+    type: Arithmetic
+    name: stride
+    stop: 128
+    step: 64
+
+  sla:
+    max_latency: 35
+    action: monitor
+
+context:
+  name: demo
+  image: yardstick-trusty-server
+  flavor: yardstick-flavor
+  user: ec2-user
+
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+
+  servers:
+    demeter:
+      floating_ip: true
+      placement: "pgrp1"
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      external_network: "net04_ext"
+
+
diff --git a/samples/perf.yaml b/samples/perf.yaml
new file mode 100644 (file)
index 0000000..e7ba2d0
--- /dev/null
@@ -0,0 +1,43 @@
+---
+# Sample benchmark task config file
+# use perf to perform Linux performance measurements
+# this sample demonstrates measurements of various software perf events
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Perf
+  options:
+    load: true
+    events:
+    - task-clock
+    - cpu-clock
+    - context-switches
+    - page-faults
+    - cpu-migrations
+  host: hades.demo
+
+  runner:
+    type: Duration
+    duration: 30
+
+  sla:
+    metric: context-switches
+    smaller_than_expected: true
+    expected_value: 300
+    action: monitor
+
+context:
+  name: demo
+  image: yardstick-trusty-server
+  flavor: yardstick-flavor
+  user: ec2-user
+
+  servers:
+    hades:
+      floating_ip: true
+  networks:
+    test:
+      cidr: "10.0.1.0/24"
+      external_network: "net04_ext"
diff --git a/samples/ping-iteration.yaml b/samples/ping-iteration.yaml
new file mode 100755 (executable)
index 0000000..810530c
--- /dev/null
@@ -0,0 +1,45 @@
+---
+# Sample benchmark task config file
+# measure network latency using ping
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Ping
+  options:
+    packetsize: 200
+  host: athena.demo
+  target: ares.demo
+
+  runner:
+    type: Iteration
+    iterations: 60
+    interval: 1
+
+  sla:
+    max_rtt: 10
+    action: monitor
+
+context:
+  name: demo
+  image: cirros-0.3.3
+  flavor: m1.tiny
+  user: cirros
+
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+
+  servers:
+    athena:
+      floating_ip: true
+      placement: "pgrp1"
+    ares:
+      placement: "pgrp1"
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      external_network: "net04_ext"
+
index fee8f3c..f73094a 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -10,16 +10,17 @@ setup(
     include_package_data=True,
     package_data={
         'yardstick': [
+            'benchmark/scenarios/compute/*.bash',
             'benchmark/scenarios/networking/*.bash',
             'benchmark/scenarios/storage/*.bash',
             'resources/files/*'
-         ]
+        ]
     },
     url="https://www.opnfv.org",
     install_requires=["backport_ipaddress",  # remove with python3
                       "flake8",
                       "PyYAML>=3.10",
-                      "pbr!=0.7,<1.0,>=0.6",
+                      "pbr<2.0,>=1.3",
                       "python-glanceclient>=0.12.0",
                       "python-heatclient>=0.2.12",
                       "python-keystoneclient>=0.11.1",
@@ -29,9 +30,13 @@ setup(
                       "paramiko",
                       "six"
                       ],
+    extras_require={
+        'plot': ["matplotlib>=1.4.2"]
+    },
     entry_points={
         'console_scripts': [
             'yardstick=yardstick.main:main',
+            'yardstick-plot=yardstick.plot.plotter:main [plot]'
         ],
     },
     scripts=['tools/yardstick-img-modify']
index 96447d8..41d654a 100755 (executable)
@@ -29,6 +29,8 @@ apt-get update
 apt-get install -y \
     fio \
     iperf3 \
+    linux-tools-common \
+    linux-tools-generic \
     lmbench \
     stress
 
old mode 100644 (file)
new mode 100755 (executable)
index 9efafff..bae40eb
@@ -48,8 +48,9 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
     sla_action = None
     if "sla" in scenario_args:
         sla_action = scenario_args["sla"].get("action", "assert")
+    margin = 1 if step > 0 else -1
 
-    for value in range(start, stop+step, step):
+    for value in range(start, stop+margin, step):
 
         options[arg_name] = value
 
@@ -81,7 +82,7 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
             'errors': errors
         }
 
-        queue.put({'context': record_context, 'sargs:': scenario_args,
+        queue.put({'context': record_context, 'sargs': scenario_args,
                    'benchmark': benchmark_output})
 
         LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
new file mode 100755 (executable)
index 0000000..03dcfae
--- /dev/null
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+'''A runner that runs a configurable number of times before it returns
+'''
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, context, scenario_args):
+
+    sequence = 1
+
+    interval = context.get("interval", 1)
+    iterations = context.get("iterations", 1)
+    LOG.info("worker START, iterations %d times, class %s", iterations, cls)
+
+    context['runner'] = os.getpid()
+
+    benchmark = cls(context)
+    benchmark.setup()
+    method = getattr(benchmark, method_name)
+
+    record_context = {"runner": context["runner"],
+                      "host": context["host"]}
+
+    sla_action = None
+    if "sla" in scenario_args:
+        sla_action = scenario_args["sla"].get("action", "assert")
+
+    while True:
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+                  {"runner": context["runner"], "sequence": sequence})
+
+        data = {}
+        errors = ""
+
+        try:
+            data = method(scenario_args)
+        except AssertionError as assertion:
+            # SLA validation failed in scenario, determine what to do now
+            if sla_action == "assert":
+                raise
+            elif sla_action == "monitor":
+                LOG.warning("SLA validation failed: %s" % assertion.args)
+                errors = assertion.args
+        except Exception as e:
+            errors = traceback.format_exc()
+            LOG.exception(e)
+
+        time.sleep(interval)
+
+        benchmark_output = {
+            'timestamp': time.time(),
+            'sequence': sequence,
+            'data': data,
+            'errors': errors
+        }
+
+        queue.put({'context': record_context, 'sargs': scenario_args,
+                   'benchmark': benchmark_output})
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+                  {"runner": context["runner"], "sequence": sequence})
+
+        sequence += 1
+
+        if (errors and sla_action is None) or (sequence > iterations):
+            LOG.info("worker END")
+            break
+
+    benchmark.teardown()
+
+
+class IterationRunner(base.Runner):
+    '''Run a scenario for a configurable number of times
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+  Parameters
+    iterations - amount of times the scenario will be run for
+        type:    int
+        unit:    na
+        default: 1
+    interval - time to wait between each scenario invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    '''
+    __execution_type__ = 'Iteration'
+
+    def _run_benchmark(self, cls, method, scenario_args):
+        self.process = multiprocessing.Process(
+            target=_worker_process,
+            args=(self.result_queue, cls, method, self.config, scenario_args))
+        self.process.start()
index 52bb69a..25b65b0 100644 (file)
@@ -82,7 +82,7 @@ def _worker_process(queue, cls, method_name, context, scenario_args):
             'errors': errors
         }
 
-        queue.put({'context': record_context, 'sargs:': scenario_args,
+        queue.put({'context': record_context, 'sargs': scenario_args,
                    'benchmark': benchmark_output})
 
         LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
diff --git a/yardstick/benchmark/scenarios/compute/__init__.py b/yardstick/benchmark/scenarios/compute/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
new file mode 100644 (file)
index 0000000..4ce2825
--- /dev/null
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class Lmbench(base.Scenario):
+    """Execute lmbench memory read latency benchmark in a host
+
+    Parameters
+        stride - number of locations in memory between starts of array elements
+            type:       int
+            unit:       bytes
+            default:    128
+        stop_size - maximum array size to test (minimum value is 0.000512)
+            type:       int
+            unit:       megabytes
+            default:    16
+
+    Results are accurate to the ~2-5 nanosecond range.
+    """
+    __scenario_type__ = "Lmbench"
+
+    TARGET_SCRIPT = "lmbench_benchmark.bash"
+
+    def __init__(self, context):
+        self.context = context
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+        self.target_script = pkg_resources.resource_filename(
+            "yardstick.benchmark.scenarios.compute",
+            Lmbench.TARGET_SCRIPT)
+        user = self.context.get("user", "ubuntu")
+        host = self.context.get("host", None)
+        key_filename = self.context.get('key_filename', "~/.ssh/id_rsa")
+
+        LOG.debug("user:%s, host:%s", user, host)
+        self.client = ssh.SSH(user, host, key_filename=key_filename)
+        self.client.wait(timeout=600)
+
+        # copy script to host
+        self.client.run("cat > ~/lmbench.sh",
+                        stdin=open(self.target_script, 'rb'))
+
+        self.setup_done = True
+
+    def run(self, args):
+        """execute the benchmark"""
+
+        if not self.setup_done:
+            self.setup()
+
+        options = args['options']
+        stride = options.get('stride', 128)
+        stop_size = options.get('stop_size', 16)
+
+        cmd = "sudo bash lmbench.sh %d %d" % (stop_size, stride)
+        LOG.debug("Executing command: %s", cmd)
+        status, stdout, stderr = self.client.execute(cmd)
+
+        if status:
+            raise RuntimeError(stderr)
+
+        data = json.loads(stdout)
+
+        if "sla" in args:
+            sla_max_latency = int(args['sla']['max_latency'])
+            for result in data:
+                latency = result['latency']
+                assert latency <= sla_max_latency, "latency %f > " \
+                    "sla:max_latency(%f)" % (latency, sla_max_latency)
+
+        return data
+
+
+def _test():
+    """internal test function"""
+    key_filename = pkg_resources.resource_filename('yardstick.resources',
+                                                   'files/yardstick_key')
+    ctx = {'host': '172.16.0.137',
+           'user': 'ubuntu',
+           'key_filename': key_filename
+           }
+
+    logger = logging.getLogger('yardstick')
+    logger.setLevel(logging.DEBUG)
+
+    p = Lmbench(ctx)
+
+    options = {'stride': 128, 'stop_size': 16}
+
+    args = {'options': options}
+    result = p.run(args)
+    print result
+
+if __name__ == '__main__':
+    _test()
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash
new file mode 100644 (file)
index 0000000..04e3c1a
--- /dev/null
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a lmbench read memory latency benchmark in a host and
+# outputs in json format the array sizes in megabytes and
+# load latency over all points in that array in nanosecods
+
+set -e
+
+SIZE=$1
+shift
+STRIDE=$1
+
+# write the result to stdout in json format
+output_json()
+{
+    iter=0
+    echo [
+    while read DATA
+    do
+        if [ $iter -gt 1 ] && [ -n "$DATA" ]; then
+            echo ,
+        fi
+
+        echo -n $DATA | awk '/ /{printf "{\"size\": %s, \"latency\": %s}", $1, $2}'
+
+        iter=$((iter+1))
+    done
+    echo ]
+}
+
+/usr/lib/lmbench/bin/x86_64-linux-gnu/lat_mem_rd $SIZE $STRIDE 2>&1 | output_json
+
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
new file mode 100644 (file)
index 0000000..62b4297
--- /dev/null
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+
+
+class Perf(base.Scenario):
+    """Execute perf benchmark in a host
+
+  Parameters
+    events - perf tool software, hardware or tracepoint events
+        type:       [str]
+        unit:       na
+        default:    ['task-clock']
+    load - simulate load on the host by doing IO operations
+        type:       bool
+        unit:       na
+        default:    false
+
+    For more info about perf and perf events see https://perf.wiki.kernel.org
+    """
+
+    __scenario_type__ = "Perf"
+
+    TARGET_SCRIPT = 'perf_benchmark.bash'
+
+    def __init__(self, context):
+        self.context = context
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+        self.target_script = pkg_resources.resource_filename(
+            'yardstick.benchmark.scenarios.compute', Perf.TARGET_SCRIPT)
+        user = self.context.get('user', 'ubuntu')
+        host = self.context.get('host', None)
+        key_filename = self.context.get('key_filename', '~/.ssh/id_rsa')
+
+        LOG.debug("user:%s, host:%s", user, host)
+        self.client = ssh.SSH(user, host, key_filename=key_filename)
+        self.client.wait(timeout=600)
+
+        # copy script to host
+        self.client.run("cat > ~/perf_benchmark.sh",
+                        stdin=open(self.target_script, "rb"))
+
+        self.setup_done = True
+
+    def run(self, args):
+        """execute the benchmark"""
+
+        if not self.setup_done:
+            self.setup()
+
+        options = args['options']
+        events = options.get('events', ['task-clock'])
+
+        events_string = ""
+        for event in events:
+            events_string += event + " "
+
+        # if run by a duration runner
+        duration_time = self.context.get("duration", None)
+        # if run by an arithmetic runner
+        arithmetic_time = options.get("duration", None)
+        if duration_time:
+            duration = duration_time
+        elif arithmetic_time:
+            duration = arithmetic_time
+        else:
+            duration = 30
+
+        if 'load' in options:
+            load = "dd if=/dev/urandom of=/dev/null"
+        else:
+            load = "sleep %d" % duration
+
+        cmd = "sudo bash perf_benchmark.sh '%s' %d %s" \
+            % (load, duration, events_string)
+
+        LOG.debug("Executing command: %s", cmd)
+        status, stdout, stderr = self.client.execute(cmd)
+
+        if status:
+            raise RuntimeError(stdout)
+
+        output = json.loads(stdout)
+
+        if "sla" in args:
+            metric = args['sla']['metric']
+            exp_val = args['sla']['expected_value']
+            smaller_than_exp = 'smaller_than_expected' in args['sla']
+
+            if metric not in output:
+                assert False, "Metric (%s) not found." % metric
+            else:
+                if smaller_than_exp:
+                    assert output[metric] < exp_val, "%s %d >= %d (sla)" \
+                        % (metric, output[metric], exp_val)
+                else:
+                    assert output[metric] >= exp_val, "%s %d < %d (sla)" \
+                        % (metric, output[metric], exp_val)
+        return output
+
+
+def _test():
+    """internal test function"""
+    key_filename = pkg_resources.resource_filename('yardstick.resources',
+                                                   'files/yardstick_key')
+    ctx = {'host': '172.16.0.137',
+           'user': 'ubuntu',
+           'key_filename': key_filename
+           }
+
+    logger = logging.getLogger('yardstick')
+    logger.setLevel(logging.DEBUG)
+
+    p = Perf(ctx)
+
+    options = {'load': True}
+    args = {'options': options}
+
+    result = p.run(args)
+    print result
+
+if __name__ == '__main__':
+    _test()
diff --git a/yardstick/benchmark/scenarios/compute/perf_benchmark.bash b/yardstick/benchmark/scenarios/compute/perf_benchmark.bash
new file mode 100644 (file)
index 0000000..5ae107a
--- /dev/null
@@ -0,0 +1,68 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -e
+
+# Commandline arguments
+PAYLOAD_OP=$1
+shift
+DURATION=$1
+shift
+EVENTS=("$@")
+OUTPUT_FILE=/tmp/perfout.txt
+
+# run perf test
+run_perf()
+{
+    COMMA_SEP_E=$( IFS=$','; echo "${EVENTS[*]}" )
+
+    if [[ $PAYLOAD_OP == dd* ]]
+    then
+        sudo perf stat -o $OUTPUT_FILE -e ${COMMA_SEP_E[@]} $PAYLOAD_OP &
+        sleep $DURATION
+        sudo killall -q -u root dd
+    else
+        sudo perf stat -o $OUTPUT_FILE -e ${COMMA_SEP_E[@]} $PAYLOAD_OP
+    fi
+}
+
+# write the result to stdout in json format
+output_json()
+{
+    EVENTS+=('time')
+
+    last_pos=$(( ${#EVENTS[*]} - 1 ))
+    last=${EVENTS[$last_pos]}
+
+    echo -n {
+    for EVENT in ${EVENTS[@]}
+    do
+        value=$(cat $OUTPUT_FILE | grep $EVENT | awk 'match($0,/[0-9]+|[0-9]+\.[0-9]*/, a) { print a[0]}')
+
+        if [[ $EVENT != $last ]]
+        then
+            echo -n \"$EVENT\": $value,
+        else
+            echo -n \"$EVENT\": $value
+        fi
+    done
+    echo }
+}
+
+# main entry
+main()
+{
+    run_perf > /dev/null 2>&1
+    sleep 1
+    output_json
+}
+
+main
index c16a42e..418e3da 100755 (executable)
 """ yardstick - command line tool for managing benchmarks
 
     Example invocation:
-    $ yardstick samples/ping-task.yaml
+    $ yardstick task start samples/ping.yaml
 
-    Servers are the same as VMs (Nova call them servers in the API)
+    Servers are the same as VMs (Nova calls them servers in the API)
 
     Many tests use a client/server architecture. A test client is configured
     to use a specific test server e.g. using an IP address. This is true for
     example iperf. In some cases the test server is included in the kernel
     (ping, pktgen) and no additional software is needed on the server. In other
-    cases (iperf) a server process needs to be installed and started
+    cases (iperf) a server process needs to be installed and started.
 
     One server is required to host the test client program (such as ping or
     iperf). In the task file this server is called host.
diff --git a/yardstick/plot/__init__.py b/yardstick/plot/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/yardstick/plot/plotter.py b/yardstick/plot/plotter.py
new file mode 100644 (file)
index 0000000..f3fb75d
--- /dev/null
@@ -0,0 +1,311 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+''' yardstick-plot - a command line tool for visualizing results from the
+    output file of yardstick framework.
+
+    Example invocation:
+    $ yardstick-plot -i /tmp/yardstick.out -o /tmp/plots/
+'''
+
+import argparse
+import json
+import os
+import sys
+import time
+import matplotlib.pyplot as plt
+import matplotlib.lines as mlines
+
+
+class Parser(object):
+    ''' Command-line argument and input file parser for yardstick-plot tool'''
+
+    def __init__(self):
+        self.data = {
+            'ping': [],
+            'pktgen': [],
+            'iperf3': [],
+            'fio': []
+        }
+        self.default_input_loc = "/tmp/yardstick.out"
+
+    def _get_parser(self):
+        '''get a command-line parser'''
+        parser = argparse.ArgumentParser(
+            prog='yardstick-plot',
+            description="A tool for visualizing results from yardstick. "
+                        "Currently supports plotting graphs for output files "
+                        "from tests: " + str(self.data.keys())
+        )
+        parser.add_argument(
+            '-i', '--input',
+            help="The input file name. If left unspecified then "
+                 "it defaults to %s" % self.default_input_loc
+        )
+        parser.add_argument(
+            '-o', '--output-folder',
+            help="The output folder location. If left unspecified then "
+                 "it defaults to <script_directory>/plots/"
+        )
+        return parser
+
+    def _add_record(self, record):
+        '''add record to the relevant scenario'''
+        runner_object = record['sargs']['runner']['object']
+        for test_type in self.data.keys():
+            if test_type in runner_object:
+                self.data[test_type].append(record)
+
+    def parse_args(self):
+        '''parse command-line arguments'''
+        parser = self._get_parser()
+        self.args = parser.parse_args()
+        return self.args
+
+    def parse_input_file(self):
+        '''parse the input test results file'''
+        if self.args.input:
+            input_file = self.args.input
+        else:
+            print("No input file specified, reading from %s"
+                  % self.default_input_loc)
+            input_file = self.default_input_loc
+
+        try:
+            with open(input_file) as f:
+                for line in f:
+                    record = json.loads(line)
+                    self._add_record(record)
+        except IOError as e:
+            print(os.strerror(e.errno))
+            sys.exit(1)
+
+
+class Plotter(object):
+    '''Graph plotter for scenario-specific results from yardstick framework'''
+
+    def __init__(self, data, output_folder):
+        self.data = data
+        self.output_folder = output_folder
+        self.fig_counter = 1
+        self.colors = ['g', 'b', 'c', 'm', 'y']
+
+    def plot(self):
+        '''plot the graph(s)'''
+        for test_type in self.data.keys():
+            if self.data[test_type]:
+                plt.figure(self.fig_counter)
+                self.fig_counter += 1
+
+                plt.title(test_type, loc="left")
+                method_name = "_plot_" + test_type
+                getattr(self, method_name)(self.data[test_type])
+                self._save_plot(test_type)
+
+    def _save_plot(self, test_type):
+        '''save the graph to output folder'''
+        timestr = time.strftime("%Y%m%d-%H%M%S")
+        file_name = test_type + "_" + timestr + ".png"
+        if not self.output_folder:
+            curr_path = os.path.dirname(os.path.abspath(__file__))
+            self.output_folder = os.path.join(curr_path, "plots")
+        if not os.path.isdir(self.output_folder):
+            os.makedirs(self.output_folder)
+        new_file = os.path.join(self.output_folder, file_name)
+        plt.savefig(new_file)
+        print("Saved graph to " + new_file)
+
+    def _plot_ping(self, records):
+        '''ping test result interpretation and visualization on the graph'''
+        rtts = [r['benchmark']['data'] for r in records]
+        seqs = [r['benchmark']['sequence'] for r in records]
+
+        for i in range(0, len(rtts)):
+            # If SLA failed
+            if not rtts[i]:
+                rtts[i] = 0.0
+                plt.axvline(seqs[i], color='r')
+
+        # If there is a single data-point then display a bar-chart
+        if len(rtts) == 1:
+            plt.bar(1, rtts[0], 0.35, color=self.colors[0])
+        else:
+            plt.plot(seqs, rtts, self.colors[0]+'-')
+
+        self._construct_legend(['rtt'])
+        plt.xlabel("sequence number")
+        plt.xticks(seqs, seqs)
+        plt.ylabel("round trip time in milliseconds (rtt)")
+
+    def _plot_pktgen(self, records):
+        '''pktgen test result interpretation and visualization on the graph'''
+        flows = [r['benchmark']['data']['flows'] for r in records]
+        sent = [r['benchmark']['data']['packets_sent'] for r in records]
+        received = [int(r['benchmark']['data']['packets_received'])
+                    for r in records]
+
+        for i in range(0, len(sent)):
+            # If SLA failed
+            if not sent[i] or not received[i]:
+                sent[i] = 0.0
+                received[i] = 0.0
+                plt.axvline(flows[i], color='r')
+
+        ppm = [1000000.0*(i - j)/i for i, j in zip(sent, received)]
+
+        # If there is a single data-point then display a bar-chart
+        if len(ppm) == 1:
+            plt.bar(1, ppm[0], 0.35, color=self.colors[0])
+        else:
+            plt.plot(flows, ppm, self.colors[0]+'-')
+
+        self._construct_legend(['ppm'])
+        plt.xlabel("number of flows")
+        plt.ylabel("lost packets per million packets (ppm)")
+
+    def _plot_iperf3(self, records):
+        '''iperf3 test result interpretation and visualization on the graph'''
+        intervals = []
+        for r in records:
+            #  If did not fail the SLA
+            if r['benchmark']['data']:
+                intervals.append(r['benchmark']['data']['intervals'])
+            else:
+                intervals.append(None)
+
+        kbps = [0]
+        seconds = [0]
+        for i, val in enumerate(intervals):
+            if val:
+                for j, _ in enumerate(intervals):
+                    kbps.append(val[j]['sum']['bits_per_second']/1000)
+                    seconds.append(seconds[-1] + val[j]['sum']['seconds'])
+            else:
+                kbps.append(0.0)
+                # Don't know how long the failed test took, add 1 second
+                # TODO more accurate solution or replace x-axis from seconds
+                # to measurement nr
+                seconds.append(seconds[-1] + 1)
+                plt.axvline(seconds[-1], color='r')
+
+        self._construct_legend(['bandwidth'])
+        plt.plot(seconds[1:], kbps[1:], self.colors[0]+'-')
+        plt.xlabel("time in seconds")
+        plt.ylabel("bandwidth in Kb/s")
+
+    def _plot_fio(self, records):
+        '''fio test result interpretation and visualization on the graph'''
+        rw_types = [r['sargs']['options']['rw'] for r in records]
+        seqs = [x for x in range(1, len(records) + 1)]
+        data = {}
+
+        for i in range(0, len(records)):
+            is_r_type = rw_types[i] == "read" or rw_types[i] == "randread"
+            is_w_type = rw_types[i] == "write" or rw_types[i] == "randwrite"
+            is_rw_type = rw_types[i] == "rw" or rw_types[i] == "randrw"
+
+            if is_r_type or is_rw_type:
+                # Remove trailing 'usec' and convert to float
+                data['read_lat'] = \
+                    [r['benchmark']['data']['read_lat'][:-4] for r in records]
+                data['read_lat'] = \
+                    [float(i) for i in data['read_lat']]
+                # Remove trailing 'KB/s' and convert to float
+                data['read_bw'] = \
+                    [r['benchmark']['data']['read_bw'][:-4] for r in records]
+                data['read_bw'] =  \
+                    [float(i) for i in data['read_bw']]
+                # Convert to int
+                data['read_iops'] = \
+                    [r['benchmark']['data']['read_iops'] for r in records]
+                data['read_iops'] = \
+                    [int(i) for i in data['read_iops']]
+
+            if is_w_type or is_rw_type:
+                data['write_lat'] = \
+                    [r['benchmark']['data']['write_lat'][:-4] for r in records]
+                data['write_lat'] = \
+                    [float(i) for i in data['write_lat']]
+
+                data['write_bw'] = \
+                    [r['benchmark']['data']['write_bw'][:-4] for r in records]
+                data['write_bw'] = \
+                    [float(i) for i in data['write_bw']]
+
+                data['write_iops'] = \
+                    [r['benchmark']['data']['write_iops'] for r in records]
+                data['write_iops'] = \
+                    [int(i) for i in data['write_iops']]
+
+        # Divide the area into 3 subplots, sharing a common x-axis
+        fig, axl = plt.subplots(3, sharex=True)
+        axl[0].set_title("fio", loc="left")
+
+        self._plot_fio_helper(data, seqs, 'read_bw', self.colors[0], axl[0])
+        self._plot_fio_helper(data, seqs, 'write_bw', self.colors[1], axl[0])
+        axl[0].set_ylabel("Bandwidth in KB/s")
+
+        self._plot_fio_helper(data, seqs, 'read_iops', self.colors[0], axl[1])
+        self._plot_fio_helper(data, seqs, 'write_iops', self.colors[1], axl[1])
+        axl[1].set_ylabel("IOPS")
+
+        self._plot_fio_helper(data, seqs, 'read_lat', self.colors[0], axl[2])
+        self._plot_fio_helper(data, seqs, 'write_lat', self.colors[1], axl[2])
+        axl[2].set_ylabel("Latency in " + u"\u00B5s")
+
+        self._construct_legend(['read', 'write'], obj=axl[0])
+        plt.xlabel("Sequence number")
+        plt.xticks(seqs, seqs)
+
+    def _plot_fio_helper(self, data, seqs, key, bar_color, axl):
+        '''check if measurements exist for a key and then plot the
+           data to a given subplot'''
+        if key in data:
+            if len(data[key]) == 1:
+                axl.bar(0.1, data[key], 0.35, color=bar_color)
+            else:
+                line_style = bar_color + '-'
+                axl.plot(seqs, data[key], line_style)
+
+    def _construct_legend(self, legend_texts, obj=plt):
+        '''construct legend for the plot or subplot'''
+        ci = 0
+        lines = []
+
+        for text in legend_texts:
+            line = mlines.Line2D([], [], color=self.colors[ci], label=text)
+            lines.append(line)
+            ci += 1
+
+        lines.append(mlines.Line2D([], [], color='r', label="SLA failed"))
+
+        getattr(obj, "legend")(
+            bbox_to_anchor=(0.25, 1.02, 0.75, .102),
+            loc=3,
+            borderaxespad=0.0,
+            ncol=len(lines),
+            mode="expand",
+            handles=lines
+        )
+
+
+def main():
+    parser = Parser()
+    args = parser.parse_args()
+    print("Parsing input file")
+    parser.parse_input_file()
+    print("Initializing plotter")
+    plotter = Plotter(parser.data, args.output_folder)
+    print("Plotting graph(s)")
+    plotter.plot()
+
+if __name__ == '__main__':
+    main()