Merge "Yardstick Preliminary Documentation"
authorKubi <jean.gaoliang@huawei.com>
Thu, 9 Mar 2017 01:28:07 +0000 (01:28 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 9 Mar 2017 01:28:07 +0000 (01:28 +0000)
19 files changed:
api/resources/testsuites_action.py
nsb_setup.sh
tests/ci/load_images.sh
tests/unit/dispatcher/test_influxdb.py
tools/ubuntu-server-cloudimg-modify.sh
tools/yardstick-img-modify
yardstick/benchmark/contexts/base.py
yardstick/benchmark/contexts/dummy.py
yardstick/benchmark/contexts/heat.py
yardstick/benchmark/contexts/node.py
yardstick/benchmark/contexts/standalone.py
yardstick/benchmark/core/task.py
yardstick/benchmark/runners/base.py
yardstick/common/constants.py
yardstick/common/utils.py
yardstick/dispatcher/base.py
yardstick/dispatcher/file.py
yardstick/dispatcher/http.py
yardstick/dispatcher/influxdb.py

index f833dc2..a385290 100644 (file)
@@ -13,9 +13,11 @@ from __future__ import absolute_import
 import uuid
 import os
 import logging
+import yaml
 
-from api import conf
 from api.utils import common as common_utils
+from yardstick.common import constants as consts
+from yardstick.common.task_template import TaskTemplate
 
 logger = logging.getLogger(__name__)
 
@@ -30,8 +32,7 @@ def runTestSuite(args):
     if 'suite' not in opts:
         opts['suite'] = 'true'
 
-    testsuite = os.path.join(conf.TEST_SUITE_PATH,
-                             conf.TEST_SUITE_PRE + testsuite + '.yaml')
+    testsuite = os.path.join(consts.TESTSUITE_DIR, '{}.yaml'.format(testsuite))
 
     task_id = str(uuid.uuid4())
 
@@ -40,6 +41,22 @@ def runTestSuite(args):
     logger.debug('The command_list is: %s', command_list)
 
     logger.debug('Start to execute command list')
-    common_utils.exec_command_task(command_list, task_id)
+    task_dic = {
+        'task_id': task_id,
+        'details': _get_cases_from_suite_file(testsuite)
+    }
+    common_utils.exec_command_task(command_list, task_dic)
 
     return common_utils.result_handler('success', task_id)
+
+
+def _get_cases_from_suite_file(testsuite):
+    def get_name(full_name):
+        return os.path.splitext(full_name)[0]
+
+    with open(testsuite) as f:
+        contents = TaskTemplate.render(f.read())
+
+    suite_dic = yaml.safe_load(contents)
+    testcases = (get_name(c['file_name']) for c in suite_dic['test_cases'])
+    return ','.join(testcases)
index 02258a5..e77ffbe 100755 (executable)
@@ -28,7 +28,7 @@ if [ "$(whoami)" != "root" ]; then
 fi
 
 INSTALL_BIN_PATH="/opt/nsb_bin"
-TREX_DOWNLOAD="https://trex-tgn.cisco.com/trex/release/v2.05.tar.gz"
+TREX_DOWNLOAD="https://trex-tgn.cisco.com/trex/release/v2.20.tar.gz"
 DPDK_DOWNLOAD="http://dpdk.org/browse/dpdk/snapshot/dpdk-16.07.zip"
 VIRTUAL_VENV="$INSTALL_BIN_PATH/yardstick_venv"
 
index 6f950ec..e5a7ae0 100755 (executable)
@@ -15,6 +15,12 @@ set -e
 YARD_IMG_ARCH=amd64
 export YARD_IMG_ARCH
 
+HW_FW_TYPE=""
+if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
+    HW_FW_TYPE=uefi
+fi
+export HW_FW_TYPE
+
 if ! grep -q "Defaults env_keep += \"YARD_IMG_ARCH\"" "/etc/sudoers"; then
     sudo echo "Defaults env_keep += \"YARD_IMG_ARCH YARDSTICK_REPO_DIR\"" >> /etc/sudoers
 fi
@@ -25,10 +31,10 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
 fi
 
 UCA_HOST="cloud-images.ubuntu.com"
-if [ $YARD_IMG_ARCH = "arm64" ]; then
-    export VIVID_IMG_URL="http://${UCA_HOST}/vivid/current/vivid-server-cloudimg-arm64.tar.gz"
-    if ! grep -q "Defaults env_keep += \"VIVID_IMG_URL\"" "/etc/sudoers"; then
-        sudo echo "Defaults env_keep += \"VIVID_IMG_URL\"" >> /etc/sudoers
+if [ "${YARD_IMG_ARCH}"= "arm64" ]; then
+    export CLOUD_IMG_URL="http://${UCA_HOST}/${release}/current/${release}-server-cloudimg-${YARD_IMG_ARCH}.tar.gz"
+    if ! grep -q "Defaults env_keep += \"CLOUD_IMG_URL\"" "/etc/sudoers"; then
+        sudo echo "Defaults env_keep += \"CLOUD_IMG_URL\"" >> /etc/sudoers
     fi
 fi
 
@@ -65,24 +71,24 @@ load_yardstick_image()
     echo
     echo "========== Loading yardstick cloud image =========="
     EXTRA_PARAMS=""
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
-        VIVID_IMAGE="/tmp/vivid-server-cloudimg-arm64.tar.gz"
-        VIVID_KERNEL="/tmp/vivid-server-cloudimg-arm64-vmlinuz-generic"
+    if [[ "${YARD_IMG_ARCH}" = "arm64" && "${YARD_IMG_AKI}" = "true" ]]; then
+        CLOUD_IMAGE="/tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}.tar.gz"
+        CLOUD_KERNEL="/tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}-vmlinuz-generic"
         cd /tmp
-        if [ ! -f $VIVID_IMAGE ]; then
-            wget $VIVID_IMG_URL
+        if [ ! -f "${CLOUD_IMAGE}" ]; then
+            wget $CLOUD_IMG_URL
         fi
-        if [ ! -f $VIVID_KERNEL ]; then
-            tar zxf $VIVID_IMAGE $(basename $VIVID_KERNEL)
+        if [ ! -f "${CLOUD_KERNEL}" ]; then
+            tar zxf $CLOUD_IMAGE $(basename $CLOUD_KERNEL)
         fi
-        create_vivid_kernel=$(openstack image create \
+        create_kernel=$(openstack image create \
                 --public \
                 --disk-format qcow2 \
                 --container-format bare \
-                --file $VIVID_KERNEL \
-                yardstick-vivid-kernel)
+                --file $CLOUD_KERNEL \
+                yardstick-${release}-kernel)
 
-        GLANCE_KERNEL_ID=$(echo "$create_vivid_kernel" | grep " id " | awk '{print $(NF-1)}')
+        GLANCE_KERNEL_ID=$(echo "$create_kernel" | grep " id " | awk '{print $(NF-1)}')
         if [ -z "$GLANCE_KERNEL_ID" ]; then
             echo 'Failed uploading kernel to cloud'.
             exit 1
@@ -92,7 +98,7 @@ load_yardstick_image()
 
         EXTRA_PARAMS="--property kernel_id=$GLANCE_KERNEL_ID --property os_command_line=\"$command_line\""
 
-        rm -f $VIVID_KERNEL $VIVID_IMAGE
+        rm -f $CLOUD_KERNEL $CLOUD_IMAGE
         cd $YARDSTICK_REPO_DIR
     fi
 
@@ -101,6 +107,10 @@ load_yardstick_image()
         EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
     fi
 
+    if [[ -n "${HW_FW_TYPE}" ]]; then
+        EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_firmware_type=${HW_FW_TYPE}"
+    fi
+
     if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
         output=$(eval openstack image create \
             --public \
@@ -223,7 +233,7 @@ main()
 
     build_yardstick_image
     load_yardstick_image
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
+    if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
         sed -i 's/image: cirros-0.3.3/image: TestVM/g' tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml \
         samples/ping.yaml
         #We have overlapping IP with the real network
index b84389e..0c7b581 100644 (file)
@@ -90,19 +90,21 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
             }
         }
 
+        self.yardstick_conf = {'yardstick': {}}
+
     def test_record_result_data_no_target(self):
-        influxdb = InfluxdbDispatcher(None)
+        influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
         influxdb.target = ''
         self.assertEqual(influxdb.record_result_data(self.data1), -1)
 
     def test_record_result_data_no_case_name(self):
-        influxdb = InfluxdbDispatcher(None)
+        influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
         self.assertEqual(influxdb.record_result_data(self.data2), -1)
 
     @mock.patch('yardstick.dispatcher.influxdb.requests')
     def test_record_result_data(self, mock_requests):
         type(mock_requests.post.return_value).status_code = 204
-        influxdb = InfluxdbDispatcher(None)
+        influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
         self.assertEqual(influxdb.record_result_data(self.data1), 0)
         self.assertEqual(influxdb.record_result_data(self.data2), 0)
         self.assertEqual(influxdb.flush_result_data(), 0)
@@ -112,7 +114,7 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
                'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
         # need to sort for assert to work
         line = ",".join(sorted(line.split(',')))
-        influxdb = InfluxdbDispatcher(None)
+        influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
         flattened_data = influxdb._dict_key_flatten(
             self.data3['benchmark']['data'])
         result = ",".join(
@@ -120,7 +122,7 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
         self.assertEqual(result, line)
 
     def test__get_nano_timestamp(self):
-        influxdb = InfluxdbDispatcher(None)
+        influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
         results = {'benchmark': {'timestamp': '1451461248.925574'}}
         self.assertEqual(influxdb._get_nano_timestamp(results),
                          '1451461248925574144')
@@ -128,7 +130,7 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
     @mock.patch('yardstick.dispatcher.influxdb.time')
     def test__get_nano_timestamp_except(self, mock_time):
         results = {}
-        influxdb = InfluxdbDispatcher(None)
+        influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
         mock_time.time.return_value = 1451461248.925574
         self.assertEqual(influxdb._get_nano_timestamp(results),
                          '1451461248925574144')
index c0ae774..ce320a2 100755 (executable)
@@ -25,8 +25,8 @@ fi
 
 # iperf3 only available for trusty in backports
 if [ grep -q trusty /etc/apt/sources.list ]; then
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
-        echo "deb [arch=arm64] http://ports.ubuntu.com/ trusty-backports main restricted universe multiverse" >> /etc/apt/sources.list
+    if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
+        echo "deb [arch=${YARD_IMG_ARCH}] http://ports.ubuntu.com/ trusty-backports main restricted universe multiverse" >> /etc/apt/sources.list
     else
         echo "deb http://archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse" >> /etc/apt/sources.list
     fi
@@ -46,11 +46,11 @@ chpasswd: { expire: False }
 ssh_pwauth: True
 EOF
 apt-get update
-if [ $YARD_IMG_ARCH = "arm64" ]; then
-apt-get install -y \
-    linux-headers-$(echo $VIVID_KERNEL_VERSION | cut -d'-' -f3,4,5) \
-    unzip
-#resize root parition (/dev/vdb1) It is supposed to be default but the image is booted differently for arm64
+if [[ "${YARD_IMG_ARCH}" = "arm64" && "$release" = "vivid" ]]; then
+    apt-get install -y \
+        linux-headers-$(echo $CLOUD_KERNEL_VERSION | cut -d'-' -f3,4,5) \
+        unzip
+    #resize root partition (/dev/vdb1) It is supposed to be default but the image is booted differently for arm64
 cat <<EOF >/etc/cloud/cloud.cfg.d/15_growpart.cfg
 #cloud-config
 bootcmd:
@@ -76,7 +76,7 @@ apt-get install -y \
     stress \
     sysstat
 
-if [ $YARD_IMG_ARCH = "arm64" ]; then
+if [[ "${YARD_IMG_ARCH}" = "arm64" && "$release" = "vivid" ]]; then
     wget https://github.com/kdlucas/byte-unixbench/archive/master.zip
     unzip master.zip && rm master.zip
     mkdir /opt/tempT
@@ -88,7 +88,7 @@ else
 fi
 make --directory /opt/tempT/UnixBench/
 
-if [ $YARD_IMG_ARCH = "arm64" ]; then
+if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
     wget https://github.com/beefyamoeba5/ramspeed/archive/master.zip
     unzip master.zip && rm master.zip
     mkdir /opt/tempT/RAMspeed
@@ -100,7 +100,7 @@ cd /opt/tempT/RAMspeed/ramspeed-2.6.0
 mkdir temp
 bash build.sh
 
-if [ $YARD_IMG_ARCH = "arm64" ]; then
+if [[ "${YARD_IMG_ARCH}" = "arm64" && "$release" = "vivid" ]]; then
     wget https://github.com/beefyamoeba5/cachestat/archive/master.zip
     unzip master.zip && rm master.zip
     mv cachestat-master/cachestat /opt/tempT
index 68ce6e2..da8e1c9 100755 (executable)
@@ -23,7 +23,6 @@
 #
 # TODO: image resize is needed if the base image is too small
 #
-
 set -e
 set -x
 
@@ -42,15 +41,23 @@ mountdir="/mnt/yardstick"
 workspace=${WORKSPACE:-"/tmp/workspace/yardstick"}
 host=${HOST:-"cloud-images.ubuntu.com"}
 release=${RELEASE:-"xenial"}
-image_path="${release}/current/${release}-server-cloudimg-${YARD_IMG_ARCH}-disk1.img"
+boot_mode="disk1"
+if [[ "${YARD_IMG_ARCH}" = "arm64" ]]; then
+    boot_mode="uefi1"
+fi
+
+image_path="${release}/current/${release}-server-cloudimg-${YARD_IMG_ARCH}-${boot_mode}.img"
 image_url=${IMAGE_URL:-"https://${host}/${image_path}"}
 md5sums_path="${release}/current/MD5SUMS"
 md5sums_url=${MD5SUMS_URL:-"https://${host}/${md5sums_path}"}
 
 imgfile="${workspace}/yardstick-image.img"
-raw_imgfile="${workspace}/yardstick-${release}-server.raw"
+raw_imgfile_basename="yardstick-${release}-server.raw"
+raw_imgfile="${workspace}/${raw_imgfile_basename}"
 filename=$(basename $image_url)
 
+apt-get install -y parted
+
 # download and checksum base image, conditionally if local copy is outdated
 download() {
     test -d $workspace || mkdir -p $workspace
@@ -69,24 +76,25 @@ download() {
         [ -a /dev/loop$i ] || mknod -m 660 /dev/loop$i b 7 $i
     done
 
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
+    if [[ "${YARD_IMG_ARCH}" = "arm64"  && "$release" = "vivid" ]]; then
         cd /tmp
-        if [ ! -f /tmp/vivid-server-cloudimg-arm64-kernel-info.txt ]; then
-            wget http://cloud-images.ubuntu.com/vivid/current/vivid-server-cloudimg-arm64-kernel-info.txt
+        if [ ! -f /tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}-kernel-info.txt ]; then
+            wget http://${host}/${release}/current/${release}-server-cloudimg-${YARD_IMG_ARCH}-kernel-info.txt
         fi
-        export VIVID_KERNEL_VERSION=$(cut -d$'\t' -f4 vivid-server-cloudimg-arm64-kernel-info.txt)
-        mkdir -p /tmp/vivid-modules
-        if [ ! -f "/tmp/vivid-server-cloudimg-arm64.tar.gz" ]; then
-            wget $VIVID_IMG_URL
+        export CLOUD_KERNEL_VERSION=$(cut -d$'\t' -f4 ${release}-server-cloudimg-${YARD_IMG_ARCH}-kernel-info.txt)
+
+        mkdir -p /tmp/${release}-modules
+        if [ ! -f "/tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}.tar.gz" ]; then
+            wget $CLOUD_IMG_URL
         fi
-        if [ ! -f "/tmp/vivid-server-cloudimg-arm64.img" ]; then
-            tar zxvf vivid-server-cloudimg-arm64.tar.gz vivid-server-cloudimg-arm64.img
+        if [ ! -f "/tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}.img" ]; then
+            tar zxvf ${release}-server-cloudimg-${YARD_IMG_ARCH}.tar.gz ${release}-server-cloudimg-${YARD_IMG_ARCH}.img
         fi
-        mkdir -p /mnt/vivid
-        mount /tmp/vivid-server-cloudimg-arm64.img /mnt/vivid
-        cp -r /mnt/vivid/lib/modules/$(echo $VIVID_KERNEL_VERSION | cut -d'-' -f3,4,5) /tmp/vivid-modules
-        umount /mnt/vivid
-        rm /tmp/vivid-server-cloudimg-arm64.img
+        mkdir -p /mnt/${release}
+        mount /tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}.img /mnt/${release}
+        cp -r /mnt/${release}/lib/modules/$(echo $CLOUD_KERNEL_VERSION) /tmp/${release}-modules
+        umount /mnt/${release}
+        rm /tmp/${release}-server-cloudimg-${YARD_IMG_ARCH}.img
         cd $workspace
     fi
     qemu-img convert $filename $raw_imgfile
@@ -96,27 +104,32 @@ download() {
 # mount image
 setup() {
     # qemu-img resize $raw_imgfile +5GB
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
-        echo -e "d\nn\np\n1\n\n\nw" | fdisk $raw_imgfile
+    if [ "${YARD_IMG_ARCH}" = "arm64" ]; then
+        echo -e "d\nn\np\n1\n\n\nw" | parted -l $raw_imgfile
     fi
     mkdir -p $mountdir
 
-    loopdevice=$(kpartx -l $raw_imgfile | head -1 | cut -f1 -d ' ')
+    #kpartx fails with image paths longer than 63 characters
+    #try shortest relative path to image as temporary workaround
+    cd ${workspace}
+    loopdevice=$(kpartx -l $raw_imgfile_basename | head -1 | cut -f1 -d ' ')
+
+    kpartx -av $raw_imgfile_basename
+    cd -
 
-    kpartx -av $raw_imgfile
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
+    if [[ "${YARD_IMG_ARCH}" = "arm64" && "$release" = "vivid" ]]; then
         e2fsck -p -f /dev/mapper/$loopdevice
         resize2fs /dev/mapper/$loopdevice
     fi
     # for trouble shooting
     sleep 2
     dmsetup ls
-    fdisk -l /dev/${loopdevice:0:5} || true
+    parted -l /dev/${loopdevice:0:5} || true
     mount /dev/mapper/$loopdevice $mountdir
     mount -t proc none $mountdir/proc
 
-    if [ $YARD_IMG_ARCH = "arm64" ]; then
-        cp -r /tmp/vivid-modules/$(echo $VIVID_KERNEL_VERSION | cut -d'-' -f3,4,5) "$mountdir/lib/modules"
+    if [[ "${YARD_IMG_ARCH}" = "arm64" && "$release" = "vivid" ]]; then
+        cp -r /tmp/${release}-modules/$(echo $CLOUD_KERNEL_VERSION) "$mountdir/lib/modules"
         cp $(which "qemu-aarch64-static") "$mountdir/usr/bin"
     fi
     cp $cmd $mountdir/$(basename $cmd)
@@ -151,9 +164,9 @@ cleanup() {
     # designed to be idempotent
     mount | grep $mountdir/proc && umount $mountdir/proc
     mount | grep $mountdir && umount $mountdir
-    mount | grep "/mnt/vivid" && umount "/mnt/vivid"
+    mount | grep "/mnt/${release}" && umount "/mnt/${release}"
 
-    if [ -f $raw_imgfile ]; then
+    if [ -f "${raw_imgfile}" ]; then
         #kpartx -dv $raw_imgfile sometimes failed, we should checked it agein.
         #if [ -z "$(kpartx -l $raw_imgfile | grep 'loop deleted')" ]; then
         #    kpartx -dv $raw_imgfile
index 9f2b215..0be2eee 100644 (file)
@@ -46,6 +46,10 @@ class Context(object):
     @abc.abstractmethod
     def undeploy(self):
         """Undeploy context."""
+        self._delete_context()
+
+    def _delete_context(self):
+        Context.list.remove(self)
 
     @abc.abstractmethod
     def _get_server(self, attr_name):
index 0edc250..c658d32 100644 (file)
@@ -33,7 +33,7 @@ class DummyContext(Context):
 
     def undeploy(self):
         """don't need to undeploy"""
-        pass
+        super(DummyContext, self).undeploy()
 
     def _get_server(self, attr_name):
         return None
index 479548b..571a769 100644 (file)
@@ -58,6 +58,16 @@ class HeatContext(Context):
              get_short_key_uuid(self.key_uuid)])
         super(HeatContext, self).__init__()
 
+    def assign_external_network(self, networks):
+        sorted_networks = sorted(networks.items())
+        external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
+        have_external_network = [(name, net)
+                                 for name, net in sorted_networks if
+                                 net.get("external_network")]
+        # no external net defined, assign it to first network usig os.environ
+        if sorted_networks and not have_external_network:
+            sorted_networks[0][1]["external_network"] = external_network
+
     def init(self, attrs):     # pragma: no cover
         """initializes itself from the supplied arguments"""
         self.name = attrs["name"]
@@ -84,9 +94,10 @@ class HeatContext(Context):
                               for name, sgattrs in attrs.get(
                               "server_groups", {}).items()]
 
-        for name, netattrs in attrs["networks"].items():
-            network = Network(name, self, netattrs)
-            self.networks.append(network)
+        self.assign_external_network(attrs["networks"])
+
+        self.networks = [Network(name, self, netattrs) for name, netattrs in
+                         sorted(attrs["networks"].items())]
 
         for name, serverattrs in attrs["servers"].items():
             server = Server(name, self, serverattrs)
@@ -257,6 +268,8 @@ class HeatContext(Context):
             except OSError:
                 LOG.exception("Key filename %s", self.key_filename)
 
+        super(HeatContext, self).undeploy()
+
     def _get_server(self, attr_name):
         """lookup server info by name from context
         attr_name: either a name for a server created by yardstick or a dict
index 6fa9aa9..f8c38cb 100644 (file)
@@ -89,6 +89,8 @@ class NodeContext(Context):
             for host, info in teardown.items():
                 self._execute_script(host, info)
 
+        super(NodeContext, self).undeploy()
+
     def _get_server(self, attr_name):
         """lookup server info by name from context
         attr_name: a name for a server listed in nodes config file
index eff7009..674e57f 100644 (file)
@@ -79,7 +79,7 @@ class StandaloneContext(Context):
         """don't need to undeploy"""
 
         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
-        pass
+        super(StandaloneContext, self).undeploy()
 
     def _get_server(self, attr_name):
         """lookup server info by name from context
index aecf5bf..2794d84 100644 (file)
@@ -26,9 +26,11 @@ from yardstick.benchmark.contexts.base import Context
 from yardstick.benchmark.runners import base as base_runner
 from yardstick.common.task_template import TaskTemplate
 from yardstick.common.utils import source_env
+from yardstick.common import utils
 from yardstick.common import constants
 
 output_file_default = "/tmp/yardstick.out"
+config_file = '/etc/yardstick/yardstick.conf'
 test_cases_dir_default = "tests/opnfv/test_cases/"
 LOG = logging.getLogger(__name__)
 
@@ -39,15 +41,21 @@ class Task(object):     # pragma: no cover
        Set of commands to manage benchmark tasks.
     """
 
+    def __init__(self):
+        self.config = {}
+        self.contexts = []
+
     def start(self, args, **kwargs):
         """Start a benchmark scenario."""
 
-        atexit.register(atexit_handler)
+        atexit.register(self.atexit_handler)
 
         self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
 
         check_environment()
 
+        self.config['yardstick'] = utils.parse_ini_file(config_file)
+
         total_start_time = time.time()
         parser = TaskParser(args.inputfile[0])
 
@@ -70,8 +78,11 @@ class Task(object):     # pragma: no cover
         for i in range(0, len(task_files)):
             one_task_start_time = time.time()
             parser.path = task_files[i]
-            scenarios, run_in_parallel, meet_precondition = parser.parse_task(
-                self.task_id, task_args[i], task_args_fnames[i])
+            scenarios, run_in_parallel, meet_precondition, contexts = \
+                parser.parse_task(self.task_id, task_args[i],
+                                  task_args_fnames[i])
+
+            self.contexts.extend(contexts)
 
             if not meet_precondition:
                 LOG.info("meet_precondition is %s, please check envrionment",
@@ -83,11 +94,11 @@ class Task(object):     # pragma: no cover
             if args.keep_deploy:
                 # keep deployment, forget about stack
                 # (hide it for exit handler)
-                Context.list = []
+                self.contexts = []
             else:
-                for context in Context.list[::-1]:
+                for context in self.contexts[::-1]:
                     context.undeploy()
-                Context.list = []
+                self.contexts = []
             one_task_end_time = time.time()
             LOG.info("task %s finished in %d secs", task_files[i],
                      one_task_end_time - one_task_start_time)
@@ -100,7 +111,7 @@ class Task(object):     # pragma: no cover
 
     def _run(self, scenarios, run_in_parallel, output_file):
         """Deploys context and calls runners"""
-        for context in Context.list:
+        for context in self.contexts:
             context.deploy()
 
         background_runners = []
@@ -108,14 +119,14 @@ class Task(object):     # pragma: no cover
         # Start all background scenarios
         for scenario in filter(_is_background_scenario, scenarios):
             scenario["runner"] = dict(type="Duration", duration=1000000000)
-            runner = run_one_scenario(scenario, output_file)
+            runner = self.run_one_scenario(scenario, output_file)
             background_runners.append(runner)
 
         runners = []
         if run_in_parallel:
             for scenario in scenarios:
                 if not _is_background_scenario(scenario):
-                    runner = run_one_scenario(scenario, output_file)
+                    runner = self.run_one_scenario(scenario, output_file)
                     runners.append(runner)
 
             # Wait for runners to finish
@@ -126,7 +137,7 @@ class Task(object):     # pragma: no cover
             # run serially
             for scenario in scenarios:
                 if not _is_background_scenario(scenario):
-                    runner = run_one_scenario(scenario, output_file)
+                    runner = self.run_one_scenario(scenario, output_file)
                     runner_join(runner)
                     print("Runner ended, output in", output_file)
 
@@ -144,8 +155,91 @@ class Task(object):     # pragma: no cover
                 base_runner.Runner.release(runner)
             print("Background task ended")
 
+    def atexit_handler(self):
+        """handler for process termination"""
+        base_runner.Runner.terminate_all()
+
+        if self.contexts:
+            print("Undeploying all contexts")
+            for context in self.contexts[::-1]:
+                context.undeploy()
+
+    def run_one_scenario(self, scenario_cfg, output_file):
+        """run one scenario using context"""
+        runner_cfg = scenario_cfg["runner"]
+        runner_cfg['output_filename'] = output_file
+
+        # TODO support get multi hosts/vms info
+        context_cfg = {}
+        if "host" in scenario_cfg:
+            context_cfg['host'] = Context.get_server(scenario_cfg["host"])
+
+        if "target" in scenario_cfg:
+            if is_ip_addr(scenario_cfg["target"]):
+                context_cfg['target'] = {}
+                context_cfg['target']["ipaddr"] = scenario_cfg["target"]
+            else:
+                context_cfg['target'] = Context.get_server(
+                    scenario_cfg["target"])
+                if self._is_same_heat_context(scenario_cfg["host"],
+                                              scenario_cfg["target"]):
+                    context_cfg["target"]["ipaddr"] = \
+                        context_cfg["target"]["private_ip"]
+                else:
+                    context_cfg["target"]["ipaddr"] = \
+                        context_cfg["target"]["ip"]
+
+        if "targets" in scenario_cfg:
+            ip_list = []
+            for target in scenario_cfg["targets"]:
+                if is_ip_addr(target):
+                    ip_list.append(target)
+                    context_cfg['target'] = {}
+                else:
+                    context_cfg['target'] = Context.get_server(target)
+                    if self._is_same_heat_context(scenario_cfg["host"],
+                                                  target):
+                        ip_list.append(context_cfg["target"]["private_ip"])
+                    else:
+                        ip_list.append(context_cfg["target"]["ip"])
+            context_cfg['target']['ipaddr'] = ','.join(ip_list)
+
+        if "nodes" in scenario_cfg:
+            context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+        runner = base_runner.Runner.get(runner_cfg, self.config)
+
+        print("Starting runner of type '%s'" % runner_cfg["type"])
+        runner.run(scenario_cfg, context_cfg)
+
+        return runner
+
+    def _is_same_heat_context(self, host_attr, target_attr):
+        """check if two servers are in the same heat context
+        host_attr: either a name for a server created by yardstick or a dict
+        with attribute name mapping when using external heat templates
+        target_attr: either a name for a server created by yardstick or a dict
+        with attribute name mapping when using external heat templates
+        """
+        return True
+        host = None
+        target = None
+        for context in self.contexts:
+            if context.__context_type__ != "Heat":
+                continue
+
+            host = context._get_server(host_attr)
+            if host is None:
+                continue
+
+            target = context._get_server(target_attr)
+            if target is None:
+                return False
 
-# TODO: Move stuff below into TaskCommands class !?
+            # Both host and target is not None, then they are in the
+            # same heat context.
+            return True
+
+        return False
 
 
 class TaskParser(object):       # pragma: no cover
@@ -265,6 +359,7 @@ class TaskParser(object):       # pragma: no cover
         else:
             context_cfgs = [{"type": "Dummy"}]
 
+        contexts = []
         name_suffix = '-{}'.format(task_id[:8])
         for cfg_attrs in context_cfgs:
             try:
@@ -272,20 +367,11 @@ class TaskParser(object):       # pragma: no cover
                                                   name_suffix)
             except KeyError:
                 pass
+            # default to Heat context because we are testing OpenStack
             context_type = cfg_attrs.get("type", "Heat")
-            if "Heat" == context_type and "networks" in cfg_attrs:
-                # bugfix: if there are more than one network,
-                # only add "external_network" on first one.
-                # the name of netwrok should follow this rule:
-                # test, test2, test3 ...
-                # sort network with the length of network's name
-                sorted_networks = sorted(cfg_attrs["networks"])
-                # config external_network based on env var
-                cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
-                    = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
-
             context = Context.get(context_type)
             context.init(cfg_attrs)
+            contexts.append(context)
 
         run_in_parallel = cfg.get("run_in_parallel", False)
 
@@ -304,7 +390,7 @@ class TaskParser(object):       # pragma: no cover
                 pass
 
         # TODO we need something better here, a class that represent the file
-        return cfg["scenarios"], run_in_parallel, meet_precondition
+        return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
 
     def _check_schema(self, cfg_schema, schema_type):
         """Check if config file is using the correct schema type"""
@@ -346,16 +432,6 @@ class TaskParser(object):       # pragma: no cover
         return True
 
 
-def atexit_handler():
-    """handler for process termination"""
-    base_runner.Runner.terminate_all()
-
-    if len(Context.list) > 0:
-        print("Undeploying all contexts")
-        for context in Context.list[::-1]:
-            context.undeploy()
-
-
 def is_ip_addr(addr):
     """check if string addr is an IP address"""
     try:
@@ -371,34 +447,6 @@ def is_ip_addr(addr):
         return True
 
 
-def _is_same_heat_context(host_attr, target_attr):
-    """check if two servers are in the same heat context
-    host_attr: either a name for a server created by yardstick or a dict
-    with attribute name mapping when using external heat templates
-    target_attr: either a name for a server created by yardstick or a dict
-    with attribute name mapping when using external heat templates
-    """
-    host = None
-    target = None
-    for context in Context.list:
-        if context.__context_type__ != "Heat":
-            continue
-
-        host = context._get_server(host_attr)
-        if host is None:
-            continue
-
-        target = context._get_server(target_attr)
-        if target is None:
-            return False
-
-        # Both host and target is not None, then they are in the
-        # same heat context.
-        return True
-
-    return False
-
-
 def _is_background_scenario(scenario):
     if "run_in_background" in scenario:
         return scenario["run_in_background"]
@@ -406,54 +454,6 @@ def _is_background_scenario(scenario):
         return False
 
 
-def run_one_scenario(scenario_cfg, output_file):
-    """run one scenario using context"""
-    runner_cfg = scenario_cfg["runner"]
-    runner_cfg['output_filename'] = output_file
-
-    # TODO support get multi hosts/vms info
-    context_cfg = {}
-    if "host" in scenario_cfg:
-        context_cfg['host'] = Context.get_server(scenario_cfg["host"])
-
-    if "target" in scenario_cfg:
-        if is_ip_addr(scenario_cfg["target"]):
-            context_cfg['target'] = {}
-            context_cfg['target']["ipaddr"] = scenario_cfg["target"]
-        else:
-            context_cfg['target'] = Context.get_server(scenario_cfg["target"])
-            if _is_same_heat_context(scenario_cfg["host"],
-                                     scenario_cfg["target"]):
-                context_cfg["target"]["ipaddr"] = \
-                    context_cfg["target"]["private_ip"]
-            else:
-                context_cfg["target"]["ipaddr"] = \
-                    context_cfg["target"]["ip"]
-
-    if "targets" in scenario_cfg:
-        ip_list = []
-        for target in scenario_cfg["targets"]:
-            if is_ip_addr(target):
-                ip_list.append(target)
-                context_cfg['target'] = {}
-            else:
-                context_cfg['target'] = Context.get_server(target)
-                if _is_same_heat_context(scenario_cfg["host"], target):
-                    ip_list.append(context_cfg["target"]["private_ip"])
-                else:
-                    ip_list.append(context_cfg["target"]["ip"])
-        context_cfg['target']['ipaddr'] = ','.join(ip_list)
-
-    if "nodes" in scenario_cfg:
-        context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
-    runner = base_runner.Runner.get(runner_cfg)
-
-    print("Starting runner of type '%s'" % runner_cfg["type"])
-    runner.run(scenario_cfg, context_cfg)
-
-    return runner
-
-
 def parse_nodes_with_context(scenario_cfg):
     """paras the 'nodes' fields in scenario """
     nodes = scenario_cfg["nodes"]
index 5b90815..7c76e42 100755 (executable)
@@ -35,15 +35,18 @@ log = logging.getLogger(__name__)
 CONF = cfg.CONF
 
 
-def _output_serializer_main(filename, queue):
+def _output_serializer_main(filename, queue, config):
     """entrypoint for the singleton subprocess writing to outfile
     Use of this process enables multiple instances of a scenario without
     messing up the output file.
     """
-    config = {}
-    config["type"] = CONF.dispatcher.capitalize()
-    config["file_path"] = filename
-    dispatcher = DispatcherBase.get(config)
+    out_type = config['yardstick'].get('DEFAULT', {}).get('dispatcher', 'file')
+    conf = {
+        'type': out_type.capitalize(),
+        'file_path': filename
+    }
+
+    dispatcher = DispatcherBase.get(conf, config)
 
     while True:
         # blocks until data becomes available
@@ -123,21 +126,21 @@ class Runner(object):
         return types
 
     @staticmethod
-    def get(config):
+    def get(runner_cfg, config):
         """Returns instance of a scenario runner for execution type.
         """
         # if there is no runner, start the output serializer subprocess
         if not Runner.runners:
             log.debug("Starting dump process file '%s'",
-                      config["output_filename"])
+                      runner_cfg["output_filename"])
             Runner.queue = multiprocessing.Queue()
             Runner.dump_process = multiprocessing.Process(
                 target=_output_serializer_main,
                 name="Dumper",
-                args=(config["output_filename"], Runner.queue))
+                args=(runner_cfg["output_filename"], Runner.queue, config))
             Runner.dump_process.start()
 
-        return Runner.get_cls(config["type"])(config, Runner.queue)
+        return Runner.get_cls(runner_cfg["type"])(runner_cfg, Runner.queue)
 
     @staticmethod
     def release_dump_process():
index e068c0b..54ddf33 100644 (file)
@@ -33,6 +33,8 @@ YARDSTICK_ROOT_PATH = dirname(dirname(dirname(abspath(__file__)))) + sep
 
 TESTCASE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_cases/')
 
+TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/')
+
 YARDSTICK_REPOS_DIR = '/home/opnfv/repos/yardstick'
 
 YARDSTICK_LOG_DIR = '/tmp/yardstick/'
index 3c5895f..d2be800 100644 (file)
@@ -26,6 +26,7 @@ import sys
 from functools import reduce
 
 import yaml
+from six.moves import configparser
 from oslo_utils import importutils
 from oslo_serialization import jsonutils
 
@@ -133,7 +134,9 @@ def source_env(env_file):
 
 def read_json_from_file(path):
     with open(path, 'r') as f:
-        return jsonutils.load(f)
+        j = f.read()
+    # don't use jsonutils.load() it conflicts with already decoded input
+    return jsonutils.loads(j)
 
 
 def write_json_to_file(path, data, mode='w'):
@@ -144,3 +147,19 @@ def write_json_to_file(path, data, mode='w'):
 def write_file(path, data, mode='w'):
     with open(path, mode) as f:
         f.write(data)
+
+
+def parse_ini_file(path):
+    parser = configparser.ConfigParser()
+    parser.read(path)
+
+    try:
+        default = {k: v for k, v in parser.items('DEFAULT')}
+    except configparser.NoSectionError:
+        default = {}
+
+    config = dict(DEFAULT=default,
+                  **{s: {k: v for k, v in parser.items(
+                      s)} for s in parser.sections()})
+
+    return config
index 09ce8d1..a1c8582 100644 (file)
@@ -38,10 +38,10 @@ class Base(object):
         raise RuntimeError("No such dispatcher_type %s" % dispatcher_type)
 
     @staticmethod
-    def get(config):
+    def get(conf, config):
         """Returns instance of a dispatcher for dispatcher type.
         """
-        return Base.get_cls(config["type"])(config)
+        return Base.get_cls(conf["type"])(conf, config)
 
     @abc.abstractmethod
     def record_result_data(self, data):
index 6fc81d4..8acd5df 100644 (file)
@@ -29,7 +29,7 @@ class FileDispatcher(DispatchBase):
 
     __dispatcher_type__ = "File"
 
-    def __init__(self, conf):
+    def __init__(self, conf, config):
         super(FileDispatcher, self).__init__(conf)
         self.result = []
 
index 7900861..e3bcbc8 100644 (file)
@@ -51,7 +51,7 @@ class HttpDispatcher(DispatchBase):
 
     __dispatcher_type__ = "Http"
 
-    def __init__(self, conf):
+    def __init__(self, conf, config):
         super(HttpDispatcher, self).__init__(conf)
         self.headers = {'Content-type': 'application/json'}
         self.timeout = CONF.dispatcher_http.timeout
index d388d28..53af79c 100644 (file)
@@ -13,9 +13,9 @@ import logging
 import os
 import time
 
+import collections
 import requests
 import six
-from oslo_config import cfg
 from oslo_serialization import jsonutils
 
 from third_party.influxdb.influxdb_line_protocol import make_lines
@@ -23,30 +23,6 @@ from yardstick.dispatcher.base import Base as DispatchBase
 
 LOG = logging.getLogger(__name__)
 
-CONF = cfg.CONF
-influx_dispatcher_opts = [
-    cfg.StrOpt('target',
-               default='http://127.0.0.1:8086',
-               help='The target where the http request will be sent. '
-                    'If this is not set, no data will be posted. For '
-                    'example: target = http://hostname:1234/path'),
-    cfg.StrOpt('db_name',
-               default='yardstick',
-               help='The database name to store test results.'),
-    cfg.StrOpt('username',
-               default='root',
-               help='The user name to access database.'),
-    cfg.StrOpt('password',
-               default='root',
-               help='The user password to access database.'),
-    cfg.IntOpt('timeout',
-               default=5,
-               help='The max time in seconds to wait for a request to '
-                    'timeout.'),
-]
-
-CONF.register_opts(influx_dispatcher_opts, group="dispatcher_influxdb")
-
 
 class InfluxdbDispatcher(DispatchBase):
     """Dispatcher class for posting data into an influxdb target.
@@ -54,13 +30,14 @@ class InfluxdbDispatcher(DispatchBase):
 
     __dispatcher_type__ = "Influxdb"
 
-    def __init__(self, conf):
+    def __init__(self, conf, config):
         super(InfluxdbDispatcher, self).__init__(conf)
-        self.timeout = CONF.dispatcher_influxdb.timeout
-        self.target = CONF.dispatcher_influxdb.target
-        self.db_name = CONF.dispatcher_influxdb.db_name
-        self.username = CONF.dispatcher_influxdb.username
-        self.password = CONF.dispatcher_influxdb.password
+        db_conf = config['yardstick'].get('dispatcher_influxdb', {})
+        self.timeout = int(db_conf.get('timeout', 5))
+        self.target = db_conf.get('target', 'http://127.0.0.1:8086')
+        self.db_name = db_conf.get('db_name', 'yardstick')
+        self.username = db_conf.get('username', 'root')
+        self.password = db_conf.get('password', 'root')
         self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name)
         self.raw_result = []
         self.case_name = ""
@@ -79,15 +56,17 @@ class InfluxdbDispatcher(DispatchBase):
     def _dict_key_flatten(self, data):
         next_data = {}
 
-        if not [v for v in data.values()
-                if type(v) == dict or type(v) == list]:
+        # use list, because iterable is too generic
+        if not [v for v in data.values() if
+                isinstance(v, (collections.Mapping, list))]:
             return data
 
         for k, v in six.iteritems(data):
-            if type(v) == dict:
+            if isinstance(v, collections.Mapping):
                 for n_k, n_v in six.iteritems(v):
                     next_data["%s.%s" % (k, n_k)] = n_v
-            elif type(v) == list:
+            # use list because iterable is too generic
+            elif isinstance(v, list):
                 for index, item in enumerate(v):
                     next_data["%s%d" % (k, index)] = item
             else:
@@ -119,11 +98,12 @@ class InfluxdbDispatcher(DispatchBase):
 
     def _data_to_line_protocol(self, data):
         msg = {}
-        point = {}
-        point["measurement"] = self.tc
-        point["fields"] = self._dict_key_flatten(data["benchmark"]["data"])
-        point["time"] = self._get_nano_timestamp(data)
-        point["tags"] = self._get_extended_tags(data)
+        point = {
+            "measurement": self.tc,
+            "fields": self._dict_key_flatten(data["benchmark"]["data"]),
+            "time": self._get_nano_timestamp(data),
+            "tags": self._get_extended_tags(data),
+        }
         msg["points"] = [point]
         msg["tags"] = self.static_tags