Merge "part 2 :add coverage report in Jenkins for Yardstick"
authorHou Jingwen <houjingwen@huawei.com>
Thu, 5 Nov 2015 09:03:09 +0000 (09:03 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Thu, 5 Nov 2015 09:03:09 +0000 (09:03 +0000)
20 files changed:
ci/docker/yardstick-ci/Dockerfile
ci/docker/yardstick-ci/run_benchmarks
ci/yardstick-verify
docs/source/vTC/README.rst [new file with mode: 0644]
docs/source/vTC/abbreviations.rst [new file with mode: 0644]
etc/yardstick/nodes/pod.yaml.sample [new file with mode: 0644]
samples/lmbench.yaml
samples/ping-node-context.yaml [new file with mode: 0644]
tests/functional/test_cli_scenario.py
tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml [new file with mode: 0644]
tests/unit/benchmark/contexts/nodes_sample.yaml [new file with mode: 0644]
tests/unit/benchmark/contexts/test_node.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/compute/test_lmbench.py [new file with mode: 0644]
vTC/build.sh [new file with mode: 0644]
yardstick/benchmark/contexts/node.py [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/lmbench.py
yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/compute/lmbench_latency_benchmark.bash [moved from yardstick/benchmark/scenarios/compute/lmbench_benchmark.bash with 100% similarity]
yardstick/benchmark/scenarios/networking/ping.py
yardstick/dispatcher/http.py

index 15b0f62..9a1e832 100644 (file)
@@ -11,7 +11,11 @@ FROM ubuntu:14.04
 
 LABEL image=opnfv/yardstick-ci
 
-ENV YARDSTICK_REPO_DIR /home/yardstick
+# GIT repo directory
+ENV REPOS_DIR /home/opnfv/repos
+
+# Yardstick repo
+ENV YARDSTICK_REPO_DIR ${REPOS_DIR}/yardstick
 
 RUN apt-get update && apt-get install -y \
     wget \
@@ -29,6 +33,9 @@ RUN apt-get update && apt-get install -y \
 RUN apt-get -y autoremove && \
     apt-get clean
 
+RUN mkdir -p ${REPOS_DIR}
+
+RUN git config --global http.sslVerify false
 RUN git clone https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
 
 COPY ./run_benchmarks /usr/local/bin/
index 391ee63..501b661 100755 (executable)
 set -e
 
 : ${YARDSTICK_REPO:='https://gerrit.opnfv.org/gerrit/yardstick'}
-: ${YARDSTICK_REPO_DIR:='/home/yardstick'}
-: ${YARDSTICK_BRANCH:='master'}
+: ${YARDSTICK_REPO_DIR:='/home/opnfv/yardstick'}
+: ${YARDSTICK_BRANCH:='master'} # branch, tag, sha1 or refspec
 
 : ${RELENG_REPO:='https://gerrit.opnfv.org/gerrit/releng'}
-: ${RELENG_REPO_DIR:='/home/releng'}
-: ${RELENG_BRANCH:='master'}
+: ${RELENG_REPO_DIR:='/home/opnfv/repos/releng'}
+: ${RELENG_BRANCH:='master'} # branch, tag, sha1 or refspec
 
 : ${INSTALLER_TYPE:='fuel'}
 : ${INSTALLER_IP:='10.20.0.2'}
 
-: ${EXTERNAL_NET_ID:='net04_ext'}
+: ${POD_NAME:='opnfv-jump-2'}
+: ${EXTERNAL_NET:='net04_ext'}
 
-# clone releng
+git_checkout()
+{
+    if git cat-file -e $1^{commit} 2>/dev/null; then
+        # branch, tag or sha1 object
+        git checkout $1
+    else
+        # refspec / changeset
+        git fetch --tags --progress $2 $1
+        git checkout FETCH_HEAD
+    fi
+}
+
+echo
+echo "INFO: Updating releng -> $RELENG_BRANCH"
 if [ ! -d $RELENG_REPO_DIR ]; then
     git clone $RELENG_REPO $RELENG_REPO_DIR
 fi
 cd $RELENG_REPO_DIR
-git fetch --tags --progress $RELENG_REPO $RELENG_BRANCH
-git checkout FETCH_HEAD
+git checkout master && git pull
+git_checkout $RELENG_BRANCH $RELENG_REPO
 
-# clone yardstick
+echo
+echo "INFO: Updating yardstick -> $YARDSTICK_BRANCH"
 if [ ! -d $YARDSTICK_REPO_DIR ]; then
     git clone YARDSTICK_REPO $YARDSTICK_REPO_DIR
 fi
 cd $YARDSTICK_REPO_DIR
-git fetch --tags --progress $YARDSTICK_REPO $YARDSTICK_BRANCH
-git checkout FETCH_HEAD
+git checkout master && git pull
+git_checkout $YARDSTICK_BRANCH $YARDSTICK_REPO
+
+echo
+echo "INFO: Creating openstack credentials .."
 
 # Create openstack credentials
 $RELENG_REPO_DIR/utils/fetch_os_creds.sh \
@@ -51,10 +69,11 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
     ssh_opts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
     if sshpass -p r00tme ssh 2>/dev/null $ssh_opts root@${INSTALLER_IP} \
         fuel environment --env 1 | grep opnfv-virt; then
+        echo "INFO: applying OPNFV playground hack"
         export OS_ENDPOINT_TYPE='publicURL'
     fi
 fi
 
-export EXTERNAL_NET_ID
+export EXTERNAL_NET INSTALLER_TYPE POD_NAME
 
-$YARDSTICK_REPO_DIR/ci/yardstick-verify
+$YARDSTICK_REPO_DIR/ci/yardstick-verify $@
index 15ea022..beb2170 100755 (executable)
@@ -8,10 +8,60 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-YARDSTICK_IMAGE_ID=
-CIRROS_IMAGE_ID=
+#
+# Set up the environment and run yardstick test suites.
+#
+# Example invocation: yardstick-verify -r 10.4.4.4 suite1.yaml suite2.yaml
+#
+# Openstack credentials must be set and the script must be run from its
+# original location in the yardstick repo.
+#
+# This script is intended to be used by the CI pipeline but it may also
+# be invoked manually.
+#
 
-QCOW_IMAGE="/tmp/workspace/yardstick/yardstick-trusty-server.img"
+SCRIPT=$0
+SCRIPT_ARGS=$@
+
+usage()
+{
+    cat << EOF
+usage: $0 options [TEST_SUITE ...]
+
+If no test suites are given ping.yaml is run.
+
+OPTIONS:
+   -h      Show this message
+   -r      IP address for Result API.
+           Default is to store the results to file ($DISPATCHER_FILE_NAME)
+           if this option is not present.
+
+EOF
+}
+
+DISPATCHER_TYPE=file
+DISPATCHER_FILE_NAME="/tmp/yardstick.out"
+DISPATCHER_HTTP_TARGET=
+
+while getopts "r:h" OPTION; do
+    case $OPTION in
+        h)
+            usage
+            exit 0
+            ;;
+        r)
+            DISPATCHER_TYPE=http
+            DISPATCHER_HTTP_TARGET=http://${OPTARG}/results
+            ;;
+        *)
+            echo "${OPTION} is not a valid argument"
+            exit 1
+            ;;
+    esac
+done
+
+shift $[OPTIND - 1]
+TEST_SUITES=$@
 
 cleanup()
 {
@@ -124,13 +174,64 @@ load_yardstick_image()
 run_test()
 {
     echo
-    echo "========== Running yardstick test suite =========="
+    echo "========== Running yardstick test suites =========="
+
+    mkdir -p /etc/yardstick
+
+    cat << EOF >> /etc/yardstick/yardstick.conf
+[DEFAULT]
+debug = True
+dispatcher = ${DISPATCHER_TYPE}
+
+[dispatcher_file]
+file_name = ${DISPATCHER_FILE_NAME}
+
+[dispatcher_http]
+timeout = 5
+target = ${DISPATCHER_HTTP_TARGET}
+EOF
+
+    local failed=0
+
+    if [ ${#SUITE_FILES[@]} -gt 0 ]; then
+
+        for suite in ${SUITE_FILES[*]}; do
+
+            echo "---------------------------"
+            echo "Running test suite: $suite"
+            echo "---------------------------"
+
+             if ! yardstick task start --suite $suite; then
+                 echo "test suite $suite FAILED";
+
+                 # Mark the test suite failed but continue
+                 # running the remaining test suites.
+                 (( failed++ ))
+             fi
+
+         done
+
+         if [ $failed -gt 0 ]; then
+
+             echo "---------------------------"
+             echo "$failed out of ${SUITE_FILES[*]} test suites FAILED"
+             echo "---------------------------"
+             exit 1
+         fi
+
+    else
+
+        echo "---------------------------"
+        echo "Running samples/ping.yaml  "
+        echo "---------------------------"
+
+        if ! yardstick task start samples/ping.yaml; then
+            echo "Yardstick test FAILED"
+            exit 1
+        fi
 
-    # Just run sample ping for now.
-    if ! yardstick -d task start samples/ping.yaml; then
-        echo "Yardstick test FAILED"
-        exit 1
     fi
+
 }
 
 main()
@@ -139,6 +240,34 @@ main()
 
     cd $GITROOT
 
+    export YARDSTICK_VERSION=$(git rev-parse HEAD)
+
+    SUITE_FILES=()
+
+    # find the test suite files
+    for suite in $TEST_SUITES; do
+        if [ -f $suite ]; then
+            SUITE_FILES+=($suite)
+        else
+            tsdir=$GITROOT/tests/opnfv/test_suites
+            if [ ! -f $tsdir/$suite ]; then
+                echo "Test suite \"$suite\" does not exist"
+                exit 1
+            fi
+            SUITE_FILES+=($tsdir/$suite)
+        fi
+    done
+
+    echo
+    echo "========== Running Yardstick CI with following parameters =========="
+    echo "Script options: ${SCRIPT} $SCRIPT_ARGS"
+    echo "Result API: ${DISPATCHER_HTTP_TARGET:-$DISPATCHER_FILE_NAME}"
+    echo "YARDSTICK_VERSION: ${YARDSTICK_VERSION}"
+    echo "Number of test suites: ${#SUITE_FILES[@]}"
+    for suite in ${SUITE_FILES[*]}; do
+        echo "     $suite"
+    done
+
     # install yardstick
     install_yardstick
 
@@ -148,22 +277,6 @@ main()
         exit 1
     fi
 
-    # extract auth ip
-    ip=$(echo $OS_AUTH_URL | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+')
-
-    ## FIXME: temporarily disabling this because
-    ## of timeout errors on LF-POD2.
-    ## Maybe we need a longer timeout ??
-    # check if the auth port is open
-    # echo "Checking if tcp port $ip:5000 is open..."
-    # nc -zv -w 10 $ip 5000; rc=$?;
-    # if [ $rc -eq 0 ]; then
-    #     echo "$ip:5000 is open for tcp connections"
-    # else
-    #     echo "$ip:5000 is closed"
-    #     exit 1
-    # fi
-
     # check if the api is up
     echo "Checking if OS API is working..."
     if ! glance image-list > /dev/null; then
@@ -175,6 +288,8 @@ main()
 
     trap "error_exit" EXIT SIGTERM
 
+    QCOW_IMAGE="/tmp/workspace/yardstick/yardstick-trusty-server.img"
+
     build_yardstick_image
     load_yardstick_image
     load_cirros_image
diff --git a/docs/source/vTC/README.rst b/docs/source/vTC/README.rst
new file mode 100644 (file)
index 0000000..0185735
--- /dev/null
@@ -0,0 +1,96 @@
+=========
+Yardstick
+=========
+
+Overview of the virtual Traffic Classifier
+========
+The virtual Traffic Classifier VNF [1], comprises in the current version of
+1 VNFC [2]. The VNFC contains both the Traffic Inspection module, and the
+Traffic forwarding module, needed to run the VNF. The exploitation of DPI
+methods for traffic classification is built around two basic assumptions:
+(i) third parties unaffiliated with either source or recipient are able to
+inspect each IP packet’s payload and
+(ii) the classifier knows the relevant syntax of each application’s packet
+payloads (protocol signatures, data patterns, etc.).
+The proposed DPI based approach will only use an indicative, small number of the
+initial packets from each flow in order to identify the content and not inspect
+each packet.
+In this respect it follows the Packet Based per Flow State (PBFS).
+This method uses a table to track each session based on the 5-tuples
+(src address,dest address,src port,dest port,transport protocol)
+that is maintained for each flow.
+
+Concepts
+========
+Traffic Inspection: The process of packet analysis and application
+identification of network traffic that passes through the vTC.
+
+Traffic Forwarding: The process of packet forwarding from an incoming
+network interface to a pre-defined outgoing network interface.
+
+Traffic Rule Application: The process of packet tagging, based on a
+predefined set of rules. Packet tagging may include e.g. ToS field modification.
+
+Architecture
+============
+
+The Traffic Inspection module is the most computationally intensive component
+of the VNF. It implements filtering and packet matching algorithms in order to
+support the enhanced traffic forwarding capability of the VNF. The component
+supports a flow table (exploiting hashing algorithms for fast indexing of flows)
+and an inspection engine for traffic classification. The implementation used for
+these experiments exploits the nDPI library. The packet capturing mechanism is
+implemented using libpcap. When the DPI engine identifies a new flow, the flow
+register is updated with the appropriate information and transmitted across the
+Traffic Forwarding module, which then applies any required policy updates.
+The Traffic Forwarding moudle is responsible for routing and packet forwarding.
+It accepts incoming network traffic, consults the flow table for classification
+information for each incoming flow and then applies pre-defined policies marking
+e.g. type of Service/Differentiated Services Code Point (TOS/DSCP) multimedia
+traffic for QoS enablement on the forwarded traffic. It is assumed that the
+traffic is forwarded using the default policy until it is identified and new
+policies are enforced. The expected response delay is considered to be
+negligible,as only a small number of packets are required to identify each flow.
+
+Graphical Overview
+==================
+
++----------------------------+
+|                            |
+| Virtual Traffic Classifier |
+|                            |
+|     Analysing/Forwarding   |
+|         +-------->         |
+|     ethA          ethB     |
++------+--------------+------+
+       |              ^
+       |              |
+       |              |
+       |              |
+       v              |
++------+--------------+------+
+|                            |
+|     Virtual Switch         |
+|                            |
++----------------------------+
+
+
+Install
+=======
+
+run the build.sh with root privileges
+
+Run
+===
+
+sudo ./pfbridge -a eth1 -b eth2
+
+Custom Image
+============
+
+TBD
+
+Development Environment
+=======================
+
+Ubuntu 14.04 >= VM
diff --git a/docs/source/vTC/abbreviations.rst b/docs/source/vTC/abbreviations.rst
new file mode 100644 (file)
index 0000000..6147541
--- /dev/null
@@ -0,0 +1,6 @@
+Abbreviations for the virtual Traffic Classifier
+========
+
+[1] VNF - Virtual Network Function
+[2] VNFC - Virtual Network Function Component
+
diff --git a/etc/yardstick/nodes/pod.yaml.sample b/etc/yardstick/nodes/pod.yaml.sample
new file mode 100644 (file)
index 0000000..a374596
--- /dev/null
@@ -0,0 +1,24 @@
+---
+# Sample config file about the POD information, including the
+# name/IP/user/ssh key of Bare Metal and Controllers/Computes
+#
+# The options of this config file include:
+# name: the name of this node
+# role: node's role, support role: Master/Controller/Comupte/BareMetal
+# ip: the node's IP address
+# user: the username for login
+# key_filename:the path of the private key file for login
+
+nodes:
+-
+    name: athena
+    role: Controller
+    ip: 10.229.47.137
+    user: root
+    key_filename: /root/yardstick/yardstick/resources/files/yardstick_key
+-
+    name: ares
+    role: Controller
+    ip: 10.229.47.138
+    user: root
+    key_filename: /root/yardstick/yardstick/resources/files/yardstick_key
index 256d8c6..2b8e990 100644 (file)
@@ -1,6 +1,6 @@
 ---
 # Sample benchmark task config file
-# measure memory read latency using lmbench
+# measure memory read latency and memory bandwidth using lmbench
 
 schema: "yardstick:task:0.1"
 
@@ -8,6 +8,7 @@ scenarios:
 -
   type: Lmbench
   options:
+    test_type: "latency"
     stride: 64
     stop_size: 32
 
@@ -22,6 +23,24 @@ scenarios:
   sla:
     max_latency: 35
     action: monitor
+-
+  type: Lmbench
+  options:
+    test_type: "bandwidth"
+    size: 500
+    benchmark: "wr"
+
+  host: demeter.demo
+
+  runner:
+    type: Arithmetic
+    name: size
+    stop: 2000
+    step: 500
+
+  sla:
+    min_bandwidth: 10000
+    action: monitor
 
 context:
   name: demo
diff --git a/samples/ping-node-context.yaml b/samples/ping-node-context.yaml
new file mode 100644 (file)
index 0000000..2edc05e
--- /dev/null
@@ -0,0 +1,29 @@
+---
+# Sample benchmark task config file
+# measure network latency using ping
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Ping
+  options:
+    packetsize: 200
+  host: athena.LF
+  target: ares.LF
+
+  runner:
+    type: Duration
+    duration: 60
+    interval: 1
+
+  sla:
+    max_rtt: 10
+    action: monitor
+
+
+context:
+  type: Node
+  name: LF
+  file: /etc/yardstick/nodes/pod.yaml
+
index aad4759..8779737 100755 (executable)
@@ -31,7 +31,8 @@ class ScenarioTestCase(unittest.TestCase):
 
     def test_scenario_show_Lmbench(self):
         res = self.yardstick("scenario show Lmbench")
-        lmbench = "Execute lmbench memory read latency benchmark in a host" in res
+        lmbench = "Execute lmbench memory read latency"
+        "or memory bandwidth benchmark in a host" in res
         self.assertTrue(lmbench)
 
     def test_scenario_show_Perf(self):
diff --git a/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml b/tests/unit/benchmark/contexts/nodes_duplicate_sample.yaml
new file mode 100644 (file)
index 0000000..cdb5138
--- /dev/null
@@ -0,0 +1,13 @@
+nodes:
+-
+    name: node1
+    role: Controller
+    ip: 10.229.47.137
+    user: root
+    key_filename: /root/.yardstick_key
+-
+    name: node1
+    role: Controller
+    ip: 10.229.47.138
+    user: root
+    key_filename: /root/.yardstick_key
diff --git a/tests/unit/benchmark/contexts/nodes_sample.yaml b/tests/unit/benchmark/contexts/nodes_sample.yaml
new file mode 100644 (file)
index 0000000..59b5bb9
--- /dev/null
@@ -0,0 +1,25 @@
+nodes:
+-
+    name: node1
+    role: Controller
+    ip: 10.229.47.137
+    user: root
+    key_filename: /root/.yardstick_key
+-
+    name: node2
+    role: Controller
+    ip: 10.229.47.138
+    user: root
+    key_filename: /root/.yardstick_key
+-
+    name: node3
+    role: Compute
+    ip: 10.229.47.139
+    user: root
+    key_filename: /root/.yardstick_key
+-
+    name: node4
+    role: Baremetal
+    ip: 10.229.47.140
+    user: root
+    key_filename: /root/.yardstick_key
diff --git a/tests/unit/benchmark/contexts/test_node.py b/tests/unit/benchmark/contexts/test_node.py
new file mode 100644 (file)
index 0000000..6939b85
--- /dev/null
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.contexts.node
+
+import os
+import unittest
+
+from yardstick.benchmark.contexts import node
+
+
+class NodeContextTestCase(unittest.TestCase):
+
+    NODES_SAMPLE = "nodes_sample.yaml"
+    NODES_DUPLICATE_SAMPLE = "nodes_duplicate_sample.yaml"
+    def setUp(self):
+        self.test_context = node.NodeContext()
+
+    def test_construct(self):
+
+        self.assertIsNone(self.test_context.name)
+        self.assertIsNone(self.test_context.file_path)
+        self.assertEqual(self.test_context.nodes, [])
+        self.assertEqual(self.test_context.controllers, [])
+        self.assertEqual(self.test_context.computes, [])
+        self.assertEqual(self.test_context.baremetals, [])
+
+    def test_unsuccessful_init(self):
+
+        attrs = {
+            'name': 'foo',
+            'file': self._get_file_abspath("error_file")
+        }
+
+        self.assertRaises(SystemExit, self.test_context.init, attrs)
+
+    def test_successful_init(self):
+
+        attrs = {
+            'name': 'foo',
+            'file': self._get_file_abspath(self.NODES_SAMPLE)
+        }
+
+        self.test_context.init(attrs)
+
+        self.assertEqual(self.test_context.name, "foo")
+        self.assertEqual(len(self.test_context.nodes), 4)
+        self.assertEqual(len(self.test_context.controllers), 2)
+        self.assertEqual(len(self.test_context.computes), 1)
+        self.assertEqual(self.test_context.computes[0]["name"], "node3")
+        self.assertEqual(len(self.test_context.baremetals), 1)
+        self.assertEqual(self.test_context.baremetals[0]["name"], "node4")
+
+    def test__get_server_with_dic_attr_name(self):
+
+        attrs = {
+            'name': 'foo',
+            'file': self._get_file_abspath(self.NODES_SAMPLE)
+        }
+
+        self.test_context.init(attrs)
+
+        attr_name = {'name': 'foo.bar'}
+        result = self.test_context._get_server(attr_name)
+
+        self.assertEqual(result, None)
+
+    def test__get_server_not_found(self):
+
+        attrs = {
+            'name': 'foo',
+            'file': self._get_file_abspath(self.NODES_SAMPLE)
+        }
+
+        self.test_context.init(attrs)
+
+        attr_name = 'bar.foo'
+        result = self.test_context._get_server(attr_name)
+
+        self.assertEqual(result, None)
+
+    def test__get_server_duplicate(self):
+
+        attrs = {
+            'name': 'foo',
+            'file': self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
+        }
+
+        self.test_context.init(attrs)
+
+        attr_name = 'node1.foo'
+
+        self.assertRaises(SystemExit, self.test_context._get_server, attr_name)
+
+    def test__get_server_found(self):
+
+        attrs = {
+            'name': 'foo',
+            'file': self._get_file_abspath(self.NODES_SAMPLE)
+        }
+
+        self.test_context.init(attrs)
+
+        attr_name = 'node1.foo'
+        result = self.test_context._get_server(attr_name)
+
+        self.assertEqual(result['ip'], '10.229.47.137')
+        self.assertEqual(result['name'], 'node1.foo')
+        self.assertEqual(result['user'], 'root')
+        self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+
+    def _get_file_abspath(self, filename):
+        curr_path = os.path.dirname(os.path.abspath(__file__))
+        file_path = os.path.join(curr_path, filename)
+        return file_path
diff --git a/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/tests/unit/benchmark/scenarios/compute/test_lmbench.py
new file mode 100644 (file)
index 0000000..1b24258
--- /dev/null
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.compute.lmbench.Lmbench
+
+import mock
+import unittest
+import json
+
+from yardstick.benchmark.scenarios.compute import lmbench
+
+
+@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
+class LmbenchTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.ctx = {
+            'host': {
+                'ip': '172.16.0.137',
+                'user': 'cirros',
+                'key_filename': "mykey.key"
+            }
+        }
+
+        self.result = {}
+
+    def test_successful_setup(self, mock_ssh):
+
+        l = lmbench.Lmbench({}, self.ctx)
+        mock_ssh.SSH().execute.return_value = (0, '', '')
+
+        l.setup()
+        self.assertIsNotNone(l.client)
+        self.assertTrue(l.setup_done)
+
+    def test_unsuccessful_unknown_type_run(self, mock_ssh):
+
+        options = {
+            "test_type": "foo"
+        }
+        args = {'options': options}
+
+        l = lmbench.Lmbench(args, self.ctx)
+
+        self.assertRaises(RuntimeError, l.run, self.result)
+
+    def test_successful_latency_run_no_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "latency",
+            "stride": 64,
+            "stop_size": 16
+        }
+        args = {'options': options}
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        l.run(self.result)
+        expected_result = json.loads('{"latencies": ' + sample_output + "}")
+        self.assertEqual(self.result, expected_result)
+
+    def test_successful_bandwidth_run_no_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "bandwidth",
+            "size": 500,
+            "benchmark": "rd",
+            "warmup": 0
+        }
+        args = {"options": options}
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        l.run(self.result)
+        expected_result = json.loads(sample_output)
+        self.assertEqual(self.result, expected_result)
+
+    def test_successful_latency_run_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "latency",
+            "stride": 64,
+            "stop_size": 16
+        }
+        args = {
+            "options": options,
+            "sla": {"max_latency": 35}
+        }
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = '[{"latency": 4.944, "size": 0.00049}]'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        l.run(self.result)
+        expected_result = json.loads('{"latencies": ' + sample_output + "}")
+        self.assertEqual(self.result, expected_result)
+
+    def test_successful_bandwidth_run_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "bandwidth",
+            "size": 500,
+            "benchmark": "rd",
+            "warmup": 0
+        }
+        args = {
+            "options": options,
+            "sla": {"min_bandwidth": 10000}
+        }
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 11025.5}'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        l.run(self.result)
+        expected_result = json.loads(sample_output)
+        self.assertEqual(self.result, expected_result)
+
+    def test_unsuccessful_latency_run_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "latency",
+            "stride": 64,
+            "stop_size": 16
+        }
+        args = {
+            "options": options,
+            "sla": {"max_latency": 35}
+        }
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = '[{"latency": 37.5, "size": 0.00049}]'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        self.assertRaises(AssertionError, l.run, self.result)
+
+    def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
+
+        options = {
+            "test_type": "bandwidth",
+            "size": 500,
+            "benchmark": "rd",
+            "warmup": 0
+        }
+        args = {
+            "options": options,
+            "sla": {"min_bandwidth": 10000}
+        }
+        l = lmbench.Lmbench(args, self.ctx)
+
+        sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
+        mock_ssh.SSH().execute.return_value = (0, sample_output, '')
+        self.assertRaises(AssertionError, l.run, self.result)
+
+    def test_unsuccessful_script_error(self, mock_ssh):
+
+        options = {"test_type": "bandwidth"}
+        args = {"options": options}
+        l = lmbench.Lmbench(args, self.ctx)
+
+        mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+        self.assertRaises(RuntimeError, l.run, self.result)
diff --git a/vTC/build.sh b/vTC/build.sh
new file mode 100644 (file)
index 0000000..aa4e463
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+# Jira No.137
+
+# download and install required libraries
+apt-get update
+apt-get install -y git build-essential gcc libnuma-dev bison flex byacc libjson0-dev libcurl4-gnutls-dev jq dh-autoreconf libpcap-dev libpulse-dev libtool pkg-config
+
+# Setup for PF_RING and bridge between interfaces
+
+# Get the source code from the bitbucket repository with OAuth2 authentication
+rm resp.json
+curl -X POST -u "mPkgwvJPsTFS8hYmHk:SDczcrK4cvnkMRWSEchB3ANcWbqFXqPx" https://bitbucket.org/site/oauth2/access_token -d grant_type=refresh_token -d refresh_token=38uFQuhEdPvCTbhc7k >> resp.json
+access_token=`jq -r '.access_token' resp.json`
+git clone https://x-token-auth:${access_token}@bitbucket.org/akiskourtis/vtc.git
+cd vtc
+git checkout -b stable
+#Build nDPI library
+cd nDPI
+NDPI_DIR=$(pwd)
+echo $NDPI_DIR
+NDPI_INCLUDE=$(pwd)/src/include
+echo $NDPI_INCLUDE
+./autogen.sh
+./configure
+make
+make install
+
+#Build PF_RING library
+cd ..
+cd PF_RING
+make
+#Build PF_RING examples, including the modified pfbridge, with nDPI integrated.
+cd userland/examples/
+sed -i 's#EXTRA_LIBS =#EXTRA_LIBS='"${NDPI_DIR}"'/src/lib/.libs/libndpi.a -ljson-c#' ./Makefile
+sed -i 's# -Ithird-party# -Ithird-party/ -I'"$NDPI_INCLUDE"' -I'"$NDPI_DIR"'#' ./Makefile
+echo $NDPI_DIR
+make
+cd ../..
+cd ..
+cd ..
+#sudo rmmod pf_ring
+insmod ./vtc/PF_RING/kernel/pf_ring.ko min_num_slots=16384 enable_debug=1 quick_mode=1 enable_tx_capture=0
+#./vtc/PF_RING/userland/examples/pfbridge -a eth1 -b eth2
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
new file mode 100644 (file)
index 0000000..04c8e7c
--- /dev/null
@@ -0,0 +1,94 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import sys
+import yaml
+import logging
+
+from yardstick.benchmark.contexts.base import Context
+
+LOG = logging.getLogger(__name__)
+
+
+class NodeContext(Context):
+    '''Class that handle nodes info'''
+
+    __context_type__ = "Node"
+
+    def __init__(self):
+        self.name = None
+        self.file_path = None
+        self.nodes = []
+        self.controllers = []
+        self.computes = []
+        self.baremetals = []
+        super(self.__class__, self).__init__()
+
+    def init(self, attrs):
+        '''initializes itself from the supplied arguments'''
+        self.name = attrs["name"]
+        self.file_path = attrs.get("file", "/etc/yardstick/nodes/pod.yaml")
+
+        LOG.info("Parsing pod file: %s", self.file_path)
+
+        try:
+            with open(self.file_path) as stream:
+                cfg = yaml.load(stream)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self.nodes.extend(cfg["nodes"])
+        self.controllers.extend([node for node in cfg["nodes"]
+                                if node["role"] == "Controller"])
+        self.computes.extend([node for node in cfg["nodes"]
+                             if node["role"] == "Compute"])
+        self.baremetals.extend([node for node in cfg["nodes"]
+                               if node["role"] == "Baremetal"])
+        LOG.debug("Nodes: %r", self.nodes)
+        LOG.debug("Controllers: %r", self.controllers)
+        LOG.debug("Computes: %r", self.computes)
+        LOG.debug("BareMetals: %r", self.baremetals)
+
+    def deploy(self):
+        '''don't need to deploy'''
+        pass
+
+    def undeploy(self):
+        '''don't need to undeploy'''
+        pass
+
+    def _get_server(self, attr_name):
+        '''lookup server info by name from context
+        attr_name: a name for a server listed in nodes config file
+        '''
+        if type(attr_name) is dict:
+            return None
+
+        if self.name != attr_name.split(".")[1]:
+            return None
+        node_name = attr_name.split(".")[0]
+        nodes = [n for n in self.nodes
+                 if n["name"] == node_name]
+        if len(nodes) == 0:
+            return None
+        elif len(nodes) > 1:
+            LOG.error("Duplicate nodes!!!")
+            LOG.error("Nodes: %r" % nodes)
+            sys.exit(-1)
+
+        node = nodes[0]
+
+        server = {
+            "name": attr_name,
+            "ip": node["ip"],
+            "user": node["user"],
+            "key_filename": node["key_filename"]
+        }
+
+        return server
index 03caff5..b9adf50 100644 (file)
@@ -17,9 +17,15 @@ LOG = logging.getLogger(__name__)
 
 
 class Lmbench(base.Scenario):
-    """Execute lmbench memory read latency benchmark in a host
+    """Execute lmbench memory read latency or memory bandwidth benchmark in a host
 
     Parameters
+        test_type - specifies whether to measure memory latency or bandwidth
+            type:       string
+            unit:       na
+            default:    "latency"
+
+    Parameters for memory read latency benchmark
         stride - number of locations in memory between starts of array elements
             type:       int
             unit:       bytes
@@ -29,11 +35,28 @@ class Lmbench(base.Scenario):
             unit:       megabytes
             default:    16
 
-    Results are accurate to the ~2-5 nanosecond range.
+        Results are accurate to the ~2-5 nanosecond range.
+
+    Parameters for memory bandwidth benchmark
+        size - the amount of memory to test
+            type:       int
+            unit:       kilobyte
+            default:    128
+        benchmark - the name of the memory bandwidth benchmark test to execute.
+        Valid test names are rd, wr, rdwr, cp, frd, fwr, fcp, bzero, bcopy
+            type:       string
+            unit:       na
+            default:    "rd"
+        warmup - the number of repetitons to perform before taking measurements
+            type:       int
+            unit:       na
+            default:    0
+    more info http://manpages.ubuntu.com/manpages/trusty/lmbench.8.html
     """
     __scenario_type__ = "Lmbench"
 
-    TARGET_SCRIPT = "lmbench_benchmark.bash"
+    LATENCY_BENCHMARK_SCRIPT = "lmbench_latency_benchmark.bash"
+    BANDWIDTH_BENCHMARK_SCRIPT = "lmbench_bandwidth_benchmark.bash"
 
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
@@ -42,9 +65,12 @@ class Lmbench(base.Scenario):
 
     def setup(self):
         """scenario setup"""
-        self.target_script = pkg_resources.resource_filename(
+        self.bandwidth_target_script = pkg_resources.resource_filename(
             "yardstick.benchmark.scenarios.compute",
-            Lmbench.TARGET_SCRIPT)
+            Lmbench.BANDWIDTH_BENCHMARK_SCRIPT)
+        self.latency_target_script = pkg_resources.resource_filename(
+            "yardstick.benchmark.scenarios.compute",
+            Lmbench.LATENCY_BENCHMARK_SCRIPT)
         host = self.context_cfg["host"]
         user = host.get("user", "ubuntu")
         ip = host.get("ip", None)
@@ -54,10 +80,11 @@ class Lmbench(base.Scenario):
         self.client = ssh.SSH(user, ip, key_filename=key_filename)
         self.client.wait(timeout=600)
 
-        # copy script to host
-        self.client.run("cat > ~/lmbench.sh",
-                        stdin=open(self.target_script, 'rb'))
-
+        # copy scripts to host
+        self.client.run("cat > ~/lmbench_latency.sh",
+                        stdin=open(self.latency_target_script, 'rb'))
+        self.client.run("cat > ~/lmbench_bandwidth.sh",
+                        stdin=open(self.bandwidth_target_script, 'rb'))
         self.setup_done = True
 
     def run(self, result):
@@ -67,25 +94,48 @@ class Lmbench(base.Scenario):
             self.setup()
 
         options = self.scenario_cfg['options']
-        stride = options.get('stride', 128)
-        stop_size = options.get('stop_size', 16)
+        test_type = options.get('test_type', 'latency')
+
+        if test_type == 'latency':
+            stride = options.get('stride', 128)
+            stop_size = options.get('stop_size', 16)
+            cmd = "sudo bash lmbench_latency.sh %d %d" % (stop_size, stride)
+        elif test_type == 'bandwidth':
+            size = options.get('size', 128)
+            benchmark = options.get('benchmark', 'rd')
+            warmup_repetitions = options.get('warmup', 0)
+            cmd = "sudo bash lmbench_bandwidth.sh %d %s %d" % \
+                  (size, benchmark, warmup_repetitions)
+        else:
+            raise RuntimeError("No such test_type: %s for Lmbench scenario",
+                               test_type)
 
-        cmd = "sudo bash lmbench.sh %d %d" % (stop_size, stride)
         LOG.debug("Executing command: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
 
         if status:
             raise RuntimeError(stderr)
 
-        result.update({"latencies": json.loads(stdout)})
+        if test_type == 'latency':
+            result.update({"latencies": json.loads(stdout)})
+        else:
+            result.update(json.loads(stdout))
+
         if "sla" in self.scenario_cfg:
             sla_error = ""
-            sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
-            for t_latency in result:
-                latency = t_latency['latency']
-                if latency > sla_max_latency:
-                    sla_error += "latency %f > sla:max_latency(%f); " \
-                        % (latency, sla_max_latency)
+            if test_type == 'latency':
+                sla_max_latency = int(self.scenario_cfg['sla']['max_latency'])
+                for t_latency in result["latencies"]:
+                    latency = t_latency['latency']
+                    if latency > sla_max_latency:
+                        sla_error += "latency %f > sla:max_latency(%f); " \
+                            % (latency, sla_max_latency)
+            else:
+                sla_min_bw = int(self.scenario_cfg['sla']['min_bandwidth'])
+                bw = result["bandwidth(MBps)"]
+                if bw < sla_min_bw:
+                    sla_error += "bandwidth %f < " \
+                                 "sla:min_bandwidth(%f)" % (bw, sla_min_bw)
             assert sla_error == "", sla_error
 
 
@@ -104,8 +154,14 @@ def _test():
     logger = logging.getLogger('yardstick')
     logger.setLevel(logging.DEBUG)
 
-    options = {'stride': 128, 'stop_size': 16}
-    args = {'options': options}
+    options = {
+        'test_type': 'latency',
+        'stride': 128,
+        'stop_size': 16
+    }
+
+    sla = {'max_latency': 35, 'action': 'monitor'}
+    args = {'options': options, 'sla': sla}
     result = {}
 
     p = Lmbench(args, ctx)
diff --git a/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash b/yardstick/benchmark/scenarios/compute/lmbench_bandwidth_benchmark.bash
new file mode 100644 (file)
index 0000000..09993a0
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Run a lmbench memory bandwidth benchmark in a host and
+# output in json format the memory size in megabytes and
+# memory bandwidth in megabytes per second
+
+set -e
+
+SIZE=$1
+TEST_NAME=$2
+WARMUP=$3
+
+# write the result to stdout in json format
+output_json()
+{
+    read DATA
+    echo $DATA | awk '/ /{printf "{\"size(MB)\": %s, \"bandwidth(MBps)\": %s}", $1, $2}'
+}
+
+/usr/lib/lmbench/bin/x86_64-linux-gnu/bw_mem -W $WARMUP ${SIZE}k $TEST_NAME 2>&1 | output_json
\ No newline at end of file
index 34278b9..c62c79e 100644 (file)
@@ -67,12 +67,15 @@ class Ping(base.Scenario):
         if exit_status != 0:
             raise RuntimeError(stderr)
 
-        result["rtt"] = float(stdout)
+        if stdout:
+            result["rtt"] = float(stdout)
 
-        if "sla" in self.scenario_cfg:
-            sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
-            assert result["rtt"] <= sla_max_rtt, "rtt %f > sla:max_rtt(%f); " % \
-                (result["rtt"], sla_max_rtt)
+            if "sla" in self.scenario_cfg:
+                sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
+                assert result["rtt"] <= sla_max_rtt, "rtt %f > sla:max_rtt(%f); " % \
+                    (result["rtt"], sla_max_rtt)
+        else:
+            LOG.error("ping '%s' '%s' timeout", options, destination)
 
 
 def _test():
index af9ace4..de29588 100644 (file)
@@ -7,6 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import os
 import json
 import logging
 import requests
@@ -45,13 +46,12 @@ class HttpDispatcher(DispatchBase):
         self.timeout = CONF.dispatcher_http.timeout
         self.target = CONF.dispatcher_http.target
         self.raw_result = []
-        # TODO set pod_name, installer, version based on pod info
         self.result = {
             "project_name": "yardstick",
             "description": "yardstick test cases result",
-            "pod_name": "opnfv-jump-2",
-            "installer": "compass",
-            "version": "Brahmaputra-dev"
+            "pod_name": os.environ.get('POD_NAME', 'unknown'),
+            "installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
+            "version": os.environ.get('YARDSTICK_VERSION', 'unknown')
         }
 
     def record_result_data(self, data):