Merge "TC042 bug fix"
authorRoss Brattain <ross.b.brattain@intel.com>
Fri, 23 Mar 2018 21:51:18 +0000 (21:51 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Fri, 23 Mar 2018 21:51:18 +0000 (21:51 +0000)
35 files changed:
ansible/build_yardstick_image.yml
api/resources/v1/testsuites.py
docs/testing/user/userguide/13-nsb_operation.rst
docs/testing/user/userguide/opnfv_yardstick_tc090.rst [new file with mode: 0644]
docs/testing/user/userguide/opnfv_yardstick_tc091.rst [new file with mode: 0644]
install.sh
requirements.txt
samples/dummy-no-context.yaml
samples/parser.yaml
samples/ping_bottlenecks.yaml
samples/storage_bottlenecks.yaml
samples/storperf.yaml
samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_bottlenecks_scale_out.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc090.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-ha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml
tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
yardstick/benchmark/core/task.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/scenarios/base.py
yardstick/benchmark/scenarios/lib/create_router.py
yardstick/benchmark/scenarios/networking/vnf_generic.py
yardstick/common/exceptions.py
yardstick/common/openstack_utils.py
yardstick/network_services/vnf_generic/vnf/sample_vnf.py
yardstick/tests/unit/apiserver/resources/v1/__init__.py [new file with mode: 0644]
yardstick/tests/unit/apiserver/resources/v1/test_testsuites.py [new file with mode: 0644]
yardstick/tests/unit/benchmark/scenarios/lib/test_create_router.py
yardstick/tests/unit/benchmark/scenarios/test_base.py
yardstick/tests/unit/common/test_openstack_utils.py

index c9b6e74..4ad6e97 100644 (file)
@@ -59,7 +59,7 @@
     - set_fact:
         raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
 
-  # cleanup non-lxd
+    # cleanup non-lxd
     - name: unmount all old mount points
       mount:
         name: "{{ item }}"
       command: kpartx -dv "{{ raw_imgfile }}"
       ignore_errors: true
 
+    - name: delete loop devices for image file
+      # use this because kpartx -dv will fail if raw_imgfile was delete
+      # but in theory we could have deleted file still attached to loopback device?
+      # use grep because of // and awk
+      shell: losetup -O NAME,BACK-FILE | grep "{{ raw_imgfile }}" | awk '{ print $1 }' | xargs -l1 losetup -d
+      ignore_errors: true
+
     - name: delete {{ raw_imgfile }}
       file:
         path: "{{ raw_imgfile }}"
       with_sequence: start=0 end=9
       tags: mknod_devices
 
+    - name: Debug dump loop devices
+      command: losetup
+      register: losetup_output
+
+    - debug:
+        var: losetup_output
+        verbosity: 2
+
     - name: find first partition device
-#      command: kpartx -l "{{ loop_device }}"
       command: kpartx -l "{{ raw_imgfile }}"
       register: kpartx_res
 
index 5f72c2e..3e14670 100644 (file)
@@ -20,6 +20,7 @@ from yardstick.common.utils import result_handler
 from yardstick.benchmark.core import Param
 from yardstick.benchmark.core.task import Task
 from api.swagger import models
+from api.database.v1.handlers import TasksHandler
 
 LOG = logging.getLogger(__name__)
 LOG.setLevel(logging.DEBUG)
@@ -58,7 +59,7 @@ class V1Testsuite(ApiResource):
         task_args.update(args.get('opts', {}))
 
         param = Param(task_args)
-        task_thread = TaskThread(Task().start, param)
+        task_thread = TaskThread(Task().start, param, TasksHandler())
         task_thread.start()
 
         return result_handler(consts.API_SUCCESS, {'task_id': task_id})
index 8c477fa..e791b04 100644 (file)
@@ -126,7 +126,7 @@ To collectd KPIs from the NFVi compute nodes:
 
 
 Scale-Up
-------------------
+--------
 
 VNFs performance data with scale-up
 
@@ -137,21 +137,59 @@ VNFs performance data with scale-up
 Heat
 ^^^^
 
-For VNF scale-up tests we increase the number for VNF worker threads.  In the case of VNFs
+For VNF scale-up tests we increase the number for VNF worker threads and ports.  In the case of VNFs
 we also need to increase the number of VCPUs and memory allocated to the VNF.
 
 An example scale-up Heat testcase is:
 
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale-up.yaml
+   :language: yaml
+
+This testcase template requires specifying the number of VCPUs, Memory and Ports.
+We set the VCPUs and memory using the ``--task-args`` options
+
 .. code-block:: console
 
-  <repo>/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
+  yardstick task start --task-args='{"mem": 10480, "vcpus": 4, "ports": 2}' \
+  samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale-up.yaml
 
-This testcase template requires specifying the number of VCPUs and Memory.
-We set the VCPUs and memory using the --task-args options
+In order to support ports scale-up, traffic and topology templates need to be used in testcase.
 
-.. code-block:: console
+A example topology template is:
+
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/nsut/vfw/vfw-tg-topology-scale-up.yaml
+   :language: yaml
+
+This template has ``vports`` as an argument. To pass this argument it needs to
+be configured in ``extra_args`` scenario definition. Please note that more
+argument can be defined in that section. All of them will be passed to topology
+and traffic profile templates
+
+For example:
+
+.. code-block:: yaml
 
-  yardstick --debug task start --task-args='{"mem": 20480, "vcpus": 10}'   samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
+   schema: yardstick:task:0.1
+   scenarios:
+   - type: NSPerf
+     traffic_profile: ../../traffic_profiles/ipv4_throughput-scale-up.yaml
+     extra_args:
+       vports: {{ vports }}
+     topology: vfw-tg-topology-scale-up.yaml
+
+A example traffic profile template is:
+
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml
+   :language: yaml
+
+There is an option to provide predefined config for SampleVNFs. Path to config
+file may by specified in ``vnf_config`` scenario section.
+
+.. code-block:: yaml
+
+   vnf__0:
+      rules: acl_1rule.yaml
+      vnf_config: {lb_config: 'SW', file: vfw_vnf_pipeline_cores_4_ports_2_lb_1_sw.conf }
 
 
 Baremetal
@@ -266,5 +304,3 @@ To enable multiple queue set the queues_per_port value in the TG VNF options sec
       options:
         tg_0:
           queues_per_port: 2
-
-
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc090.rst b/docs/testing/user/userguide/opnfv_yardstick_tc090.rst
new file mode 100644 (file)
index 0000000..1f8747b
--- /dev/null
@@ -0,0 +1,151 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC090
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node OpenStack Service High Availability - Database Instances        |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC090: Control node OpenStack service down - |
+|              | database instances                                           |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of the      |
+|              | data base instances used by OpenStack (mysql) on control     |
+|              | node.                                                        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of database service on a  |
+|              | selected control node, then checks whether the request of    |
+|              | the related OpenStack command is OK and the killed processes |
+|              | are recovered.                                               |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the specified  |
+|              | OpenStack service. If there are multiple processes use the   |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | In this case. This parameter should always set to the name   |
+|              | of the database service of OpenStack.                        |
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "mysql"                                       |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, two kinds of monitor are needed:          |
+|              | 1. the "openstack-cmd" monitor constantly request a specific |
+|              | Openstack command, which needs two parameters:               |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scritps. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request. |
+|              | In this case, the command name should be neutron related     |
+|              | commands.                                                    |
+|              |                                                              |
+|              | 2. the "process" monitor check whether a process is running  |
+|              | on a specific node, which needs three parameters:            |
+|              | 1) monitor_type: which used for finding the monitor class and|
+|              | related scripts. It should be always set to "process"        |
+|              | for this monitor.                                            |
+|              | 2) process_name: which is the process name for monitor       |
+|              | 3) host: which is the name of the node running the process   |
+|              |                                                              |
+|              | The examples of monitors show as follows, there are four     |
+|              | instance of the "openstack-cmd" monitor, in order to check   |
+|              | the database connection of different OpenStack components.   |
+|              |                                                              |
+|              | monitor1:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack image list"                            |
+|              | monitor2:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack router list"                           |
+|              | monitor3:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack stack list"                            |
+|              | monitor4:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack volume list"                           |
+|              | monitor5:                                                    |
+|              | -monitor_type: "process"                                     |
+|              | -process_name: "mysql"                                       |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified OpenStack command request.   |
+|              | 2)process_recover_time: which indicates the maximum time     |
+|              | (seconds) from the process being killed to recovered         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc090.yaml                |
+|              | -Attackers: see above "attackers" description                |
+|              | -waiting_time: which is the time (seconds) from the process  |
+|              | being killed to stopping monitors the monitors               |
+|              | -Monitors: see above "monitors" description                  |
+|              | -SLA: see above "metrics" description                        |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|post-action   | It is the action when the test cases exist. It will check the|
+|              | status of the specified process on the host, and restart the |
+|              | process if it is not running for next test cases             |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc091.rst b/docs/testing/user/userguide/opnfv_yardstick_tc091.rst
new file mode 100644 (file)
index 0000000..8e89b64
--- /dev/null
@@ -0,0 +1,138 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC091
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node Openstack Service High Availability - Heat Api                  |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC091: Control node OpenStack service down - |
+|              | heat api                                                     |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of the      |
+|              | orchestration service provided by OpenStack (heat-api) on    |
+|              | control node.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of heat-api service on a  |
+|              | selected control node, then checks whether the request of    |
+|              | the related OpenStack command is OK and the killed processes |
+|              | are recovered.                                               |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the specified  |
+|              | OpenStack service. If there are multiple processes use the   |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | In this case. This parameter should always set to "heat-api".|
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "heat-api"                                    |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, two kinds of monitor are needed:          |
+|              | 1. the "openstack-cmd" monitor constantly request a specific |
+|              | OpenStack command, which needs two parameters:               |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scripts. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request. |
+|              | In this case, the command name should be neutron related     |
+|              | commands.                                                    |
+|              |                                                              |
+|              | 2. the "process" monitor check whether a process is running  |
+|              | on a specific node, which needs three parameters:            |
+|              | 1) monitor_type: which used for finding the monitor class and|
+|              | related scripts. It should be always set to "process"        |
+|              | for this monitor.                                            |
+|              | 2) process_name: which is the process name for monitor       |
+|              | 3) host: which is the name of the node running the process   |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | monitor1:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "heat stack list"                             |
+|              | monitor2:                                                    |
+|              | -monitor_type: "process"                                     |
+|              | -process_name: "heat-api"                                    |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified OpenStack command request.   |
+|              | 2)process_recover_time: which indicates the maximum time     |
+|              | (seconds) from the process being killed to recovered         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc091.yaml                |
+|              | -Attackers: see above "attackers" description                |
+|              | -waiting_time: which is the time (seconds) from the process  |
+|              | being killed to the monitor stopped                          |
+|              | -Monitors: see above "monitors" description                  |
+|              | -SLA: see above "metrics" description                        |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|post-action   | It is the action when the test cases exist. It will check the|
+|              | status of the specified process on the host, and restart the |
+|              | process if it is not running for next test cases             |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
index 5cd8367..1dbf64d 100755 (executable)
@@ -96,7 +96,7 @@ git config --global http.sslVerify false
 
 
 # install yardstick + dependencies
-easy_install -U pip
+easy_install -U pip==9.0.1
 pip install -r requirements.txt
 pip install -e .
 
index d45e4b1..02545de 100644 (file)
@@ -43,7 +43,7 @@ oslo.utils==3.28.0      # OSI Approved  Apache Software License
 paramiko==2.2.1         # LGPL; OSI Approved  GNU Library or Lesser General Public License (LGPL)
 pbr==3.1.1              # OSI Approved  Apache Software License; Apache License, Version 2.0
 pika==0.10.0            # BSD; OSI Approved  BSD License
-pip==9.0.1;python_version=='2.7'        # MIT
+pip==9.0.1              # MIT
 positional==1.1.2       # OSI Approved  Apache Software License
 pycrypto==2.6.1         # Public Domain
 pyparsing==2.2.0        # MIT License; OSI Approved  MIT License
@@ -55,7 +55,7 @@ python-keystoneclient==3.13.0   # OSI Approved  Apache Software License
 python-neutronclient==6.5.0     # OSI Approved  Apache Software License
 python-novaclient==9.1.1        # OSI Approved  Apache Software License
 pyzmq==16.0.2           # LGPL+BSD; OSI Approved  GNU Library or Lesser General Public License (LGPL); OSI Approved  BSD License
-requests==2.18.2        # Apache 2.0; OSI Approved  Apache Software License
+requests==2.11.1        # Apache 2.0; OSI Approved  Apache Software License
 requestsexceptions==1.3.0   # OSI Approved  Apache Software License
 scp==0.10.2             # LGPL
 shade==1.22.2           # OSI Approved  Apache Software License
index 7667e5a..e4ace44 100644 (file)
@@ -14,7 +14,7 @@ schema: "yardstick:task:0.1"
 scenarios:
 -
   type: Dummy
-
+  name: Dummy
   runner:
     type: Duration
     duration: 5
index e2e4b66..682c113 100644 (file)
@@ -27,3 +27,4 @@ scenarios:
 
 context:
   type: Dummy
+  name: Dummy
index e6fef42..625d450 100644 (file)
@@ -15,6 +15,10 @@ description: >
     measure VMs latency using ping;
 run_in_parallel: true
 {% set stack_num = stack_num or 1 %}
+{% set image_name = image_name or "yardstick-image" %}
+{% set cpu_num = cpu_num or 1 %}
+{% set ram_num = ram_num or 512 %}
+{% set disk_num = disk_num or 7 %}
 
 scenarios:
 {% for num in range(stack_num) %}
@@ -34,8 +38,11 @@ contexts:
 {% for num in range(stack_num) %}
 -
   name: demo{{num}}
-  image: yardstick-image
-  flavor: yardstick-flavor
+  image: {{image_name}}
+  flavor:
+    vcpus: {{cpu_num}}
+    ram: {{ram_num}}
+    disk: {{disk_num}}
   user: ubuntu
 
   placement_groups:
index 1aa0d7e..971a307 100644 (file)
@@ -31,6 +31,11 @@ run_in_parallel: true
 {% set numjobs = numjobs or "1" %}
 {% set direct = direct or "1" %}
 {% set volume_size = volume_size or 50 %}
+{% set image_name = image_name or "yardstick-image" %}
+{% set cpu_num = cpu_num or 1 %}
+{% set ram_num = ram_num or 512 %}
+{% set disk_num = disk_num or 7 %}
+{% set run_time = run_time or 3000 %}
 
 scenarios:
 {% for num in range(stack_num) %}
@@ -51,7 +56,7 @@ scenarios:
 
   runner:
     type: Duration
-    duration: 60
+    duration: {{ run_time }}
     interval: 1
 {% endfor %}
 
@@ -59,8 +64,11 @@ contexts:
 {% for context_num in range(stack_num) %}
 -
   name: storage_bottlenecks-{{context_num}}-{{volume_num}}
-  image: yardstick-image
-  flavor: yardstick-flavor
+  image: {{image_name}}
+  flavor:
+    vcpus: {{cpu_num}}
+    ram: {{ram_num}}
+    disk: {{disk_num}}
   user: ubuntu
 
   servers:
@@ -74,4 +82,4 @@ contexts:
     test:
       cidr: "10.0.1.0/24"
       port_security_enabled: true
-{% endfor %}
\ No newline at end of file
+{% endfor %}
index 2ea0221..00f74c1 100644 (file)
@@ -38,3 +38,4 @@ scenarios:
 
 context:
   type: Dummy
+  name: Dummy
diff --git a/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_bottlenecks_scale_out.yaml b/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_bottlenecks_scale_out.yaml
new file mode 100644 (file)
index 0000000..fe3595b
--- /dev/null
@@ -0,0 +1,123 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+
+{% set num_vnfs = num_vnfs or 2 %}
+{% set image_name = image_name or "yardstick-samplevnfs" %}
+{% set cpu_num = cpu_num or 10 %}
+{% set ram_num = ram_num or 10240 %}
+{% set disk_num = disk_num or 7 %}
+
+scenarios:
+  - type: NSPerf
+    traffic_profile: ../../traffic_profiles/ipv4_throughput-{{ num_vnfs }}.yaml
+    topology: acl-tg-topology-3node-{{ num_vnfs }}.yaml
+    nodes:
+      tg__0: tg_0.yardstick
+      tg__1: tg_1.yardstick
+{% for vnf_num in range(num_vnfs|int) %}
+      vnf__{{ vnf_num }}: vnf_{{ vnf_num }}.yardstick
+{% endfor %}
+    options:
+      framesize:
+        uplink: {64B: 100}
+        downlink: {64B: 100}
+      flow:
+        src_ip:
+{% for vnf_num in range(num_vnfs|int) %}
+          - {'tg__0': 'xe{{ vnf_num }}'}
+{% endfor %}
+        dst_ip:
+{% for vnf_num in range(num_vnfs|int) %}
+          - {'tg__1': 'xe{{ vnf_num }}'}
+{% endfor %}
+        count: 1
+      traffic_type: 4
+      rfc2544:
+        allowed_drop_rate: 0.0001 - 0.0001
+        correlated_traffic: true
+{% for vnf_num in range(num_vnfs|int) %}
+      vnf__{{ vnf_num }}:
+        rules: acl_1rule.yaml
+        vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+{% endfor %}
+    runner:
+      type: Iteration
+      iterations: 10
+      interval: 35
+context:
+  name: yardstick
+  image: {{ image_name }}
+  flavor:
+    vcpus: {{ cpu_num }}
+    ram: {{ ram_num }}
+    disk: {{ disk_num }}
+    extra_specs:
+      hw:cpu_sockets: 1
+      hw:cpu_cores: {{ cpu_num }}
+      hw:cpu_threads: 1
+  user: ubuntu
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+  servers:
+    tg_0:
+      floating_ip: true
+      placement: "pgrp1"
+      network_ports:
+        mgmt:
+          - mgmt
+{% for vnf_num in range(num_vnfs|int) %}
+        uplink_{{ vnf_num }}:
+          - xe{{ vnf_num }}
+{% endfor %}
+    tg_1:
+      floating_ip: true
+      placement: "pgrp1"
+      network_ports:
+        mgmt:
+          - mgmt
+{% for vnf_num in range(num_vnfs|int) %}
+        downlink_{{ vnf_num }}:
+          - xe{{ vnf_num }}
+{% endfor %}
+{% for vnf_num in range(num_vnfs|int) %}
+    vnf_{{ vnf_num }}:
+      floating_ip: true
+      placement: "pgrp1"
+      network_ports:
+        mgmt:
+          - mgmt
+        uplink_{{ vnf_num }}:
+          - xe0
+        downlink_{{ vnf_num }}:
+          - xe1
+{% endfor %}
+  networks:
+    mgmt:
+      cidr: '10.0.1.0/24'
+{% for vnf_num in range(num_vnfs|int) %}
+    uplink_{{ vnf_num }}:
+      cidr: '10.{{ vnf_num + 1 }}.0.0/24'
+      gateway_ip: 'null'
+      port_security_enabled: False
+      enable_dhcp: 'false'
+    downlink_{{ vnf_num }}:
+      cidr: '10.{{ vnf_num + 1 }}.1.0/24'
+      gateway_ip: 'null'
+      port_security_enabled: False
+      enable_dhcp: 'false'
+{% endfor %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc090.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc090.yaml
new file mode 100644 (file)
index 0000000..4137204
--- /dev/null
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC090 config file;
+    HA test case: Control node Openstack service down - database instance.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or "node1" %}
+{% set attack_process = attack_process or "mysql" %}
+{% set monitor_time = monitor_time or 30 %}
+
+scenarios:
+-
+  type: ServiceHA
+  options:
+    attackers:
+    - fault_type: "kill-process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+
+    monitors:
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack image list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack router list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack stack list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack volume list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_recover_time: 30
+
+  nodes:
+    {{attack_host}}: {{attack_host}}.LF
+
+  runner:
+    type: Duration
+    duration: 1
+  sla:
+    outage_time: 5
+    action: monitor
+
+
+context:
+  type: Node
+  name: LF
+  file: {{file}}
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml
new file mode 100644 (file)
index 0000000..d952464
--- /dev/null
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC091 config file;
+    HA test case: Control node Openstack service down - heat-api.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or "node1" %}
+{% set attack_process = attack_process or "heat-api" %}
+
+scenarios:
+-
+  type: ServiceHA
+  options:
+    attackers:
+    - fault_type: "kill-process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+
+    monitors:
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack stack list"
+      monitor_time: 10
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+      monitor_time: 30
+      monitor_number: 3
+      sla:
+        max_recover_time: 30
+
+  nodes:
+    {{attack_host}}: {{attack_host}}.LF
+
+  runner:
+    type: Duration
+    duration: 1
+  sla:
+    outage_time: 5
+    action: monitor
+
+
+context:
+  type: Node
+  name: LF
+  file: {{file}}
+
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-ha_daily.yaml
new file mode 100644 (file)
index 0000000..cb2b131
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn stor4nfv ha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-stor4nfv-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-noha_daily.yaml
new file mode 100644 (file)
index 0000000..961b8da
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn stor4nfv noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-stor4nfv-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
index fa3c789..7c213e2 100644 (file)
@@ -93,12 +93,10 @@ test_cases:
     file_name: opnfv_yardstick_tc027.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod1,lf-pod2,ericsson-pod3,ericsson-pod4
+        pod: huawei-pod1,lf-pod2
     task_args:
         huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
         lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
-        ericsson-pod3: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
-        ericsson-pod4: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
 -
     file_name: opnfv_yardstick_tc074.yaml
     constraint:
@@ -115,72 +113,82 @@ test_cases:
     file_name: opnfv_yardstick_tc045.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc046.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc047.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc048.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc049.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc050.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc051.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc052.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc053.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc019.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc025.yaml
     constraint:
index c7d2abc..26bd1da 100644 (file)
@@ -533,10 +533,12 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
     @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
     @mock.patch.object(utils, 'read_meminfo',
                        return_value={'Hugepagesize': '2048'})
-    def test__setup_hugepages(self, mock_meminfo, *args):
+    def test__setup_hugepages_no_hugepages_defined(self, mock_meminfo, *args):
         ssh_helper = mock.Mock()
+        scenario_helper = mock.Mock()
+        scenario_helper.all_options = {}
         dpdk_setup_helper = DpdkVnfSetupEnvHelper(
-            mock.ANY, ssh_helper, mock.ANY)
+            mock.ANY, ssh_helper, scenario_helper)
         with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
             dpdk_setup_helper._setup_hugepages()
             mock_info.assert_called_once_with(
@@ -544,6 +546,22 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
                 '%s', 2048, 8192, 100)
         mock_meminfo.assert_called_once_with(ssh_helper)
 
+    @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
+    @mock.patch.object(utils, 'read_meminfo',
+                       return_value={'Hugepagesize': '1048576'})
+    def test__setup_hugepages_8gb_hugepages_defined(self, mock_meminfo, *args):
+        ssh_helper = mock.Mock()
+        scenario_helper = mock.Mock()
+        scenario_helper.all_options = {'hugepages_gb': 8}
+        dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+            mock.ANY, ssh_helper, scenario_helper)
+        with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
+            dpdk_setup_helper._setup_hugepages()
+            mock_info.assert_called_once_with(
+                'Hugepages size (kB): %s, number claimed: %s, number set: '
+                '%s', 1048576, 8, 100)
+        mock_meminfo.assert_called_once_with(ssh_helper)
+
     @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
     @mock.patch.object(utils, 'find_relative_file')
     @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
index 4272a6d..955b8ca 100644 (file)
@@ -614,15 +614,25 @@ class TaskParser(object):       # pragma: no cover
             vnf__0: vnf_0.yardstick
         """
         def qualified_name(name):
-            node_name, context_name = name.split('.')
+            try:
+                # for openstack
+                node_name, context_name = name.split('.')
+                sep = '.'
+            except ValueError:
+                # for kubernetes, some kubernetes resources don't support
+                # name format like 'xxx.xxx', so we use '-' instead
+                # need unified later
+                node_name, context_name = name.split('-')
+                sep = '-'
+
             try:
                 ctx = next((context for context in contexts
-                       if context.assigned_name == context_name))
+                            if context.assigned_name == context_name))
             except StopIteration:
                 raise y_exc.ScenarioConfigContextNameNotFound(
                     context_name=context_name)
 
-            return '{}.{}'.format(node_name, ctx.name)
+            return '{}{}{}'.format(node_name, sep, ctx.name)
 
         if 'host' in scenario:
             scenario['host'] = qualified_name(scenario['host'])
index fbf72a7..60b0348 100644 (file)
@@ -66,6 +66,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
         data = {}
         errors = ""
 
+        benchmark.pre_run_wait_time(interval)
+
         try:
             result = method(data)
         except AssertionError as assertion:
@@ -77,7 +79,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 errors = assertion.args
         # catch all exceptions because with multiprocessing we can have un-picklable exception
         # problems  https://bugs.python.org/issue9400
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception("")
         else:
@@ -86,7 +88,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 # if we do timeout we don't care about dropping individual KPIs
                 output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-        time.sleep(interval)
+        benchmark.post_run_wait_time(interval)
 
         benchmark_output = {
             'timestamp': time.time(),
index cb04243..20d6da0 100644 (file)
@@ -71,6 +71,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             data = {}
             errors = ""
 
+            benchmark.pre_run_wait_time(interval)
+
             try:
                 result = method(data)
             except AssertionError as assertion:
@@ -90,7 +92,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                     scenario_cfg['options']['rate'] -= delta
                     sequence = 1
                     continue
-            except Exception:
+            except Exception:  # pylint: disable=broad-except
                 errors = traceback.format_exc()
                 LOG.exception("")
             else:
@@ -99,7 +101,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                     # if we do timeout we don't care about dropping individual KPIs
                     output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-            time.sleep(interval)
+            benchmark.post_run_wait_time(interval)
 
             benchmark_output = {
                 'timestamp': time.time(),
index 10a7288..58a0280 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-# yardstick comment: this is a modified copy of
-# rally/rally/benchmark/scenarios/base.py
+import abc
+import time
 
+import six
 from stevedore import extension
 
 import yardstick.common.utils as utils
@@ -37,20 +38,29 @@ def _iter_scenario_classes(scenario_type=None):
             yield scenario
 
 
+@six.add_metaclass(abc.ABCMeta)
 class Scenario(object):
 
     def setup(self):
-        """ default impl for scenario setup """
+        """Default setup implementation for Scenario classes"""
         pass
 
+    @abc.abstractmethod
     def run(self, *args):
-        """ catcher for not implemented run methods in subclasses """
-        raise RuntimeError("run method not implemented")
+        """Entry point for scenario classes, called from runner worker"""
 
     def teardown(self):
-        """ default impl for scenario teardown """
+        """Default teardown implementation for Scenario classes"""
         pass
 
+    def pre_run_wait_time(self, time_seconds):
+        """Time waited before executing the run method"""
+        pass
+
+    def post_run_wait_time(self, time_seconds):
+        """Time waited after executing the run method"""
+        time.sleep(time_seconds)
+
     @staticmethod
     def get_types():
         """return a list of known runner type (class) names"""
@@ -88,10 +98,14 @@ class Scenario(object):
         """
         return cls.__doc__.splitlines()[0] if cls.__doc__ else str(None)
 
-    def _push_to_outputs(self, keys, values):
+    @staticmethod
+    def _push_to_outputs(keys, values):
+        """Return a dictionary given the keys and the values"""
         return dict(zip(keys, values))
 
-    def _change_obj_to_dict(self, obj):
+    @staticmethod
+    def _change_obj_to_dict(obj):
+        """Return a dictionary from the __dict__ attribute of an object"""
         dic = {}
         for k, v in vars(obj).items():
             try:
index 9aa57eb..34252f6 100644 (file)
@@ -7,13 +7,11 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -28,9 +26,14 @@ class CreateRouter(base.Scenario):
         self.context_cfg = context_cfg
         self.options = self.scenario_cfg['options']
 
-        self.openstack = self.options.get("openstack_paras", None)
+        self.name = self.options.get('name')
+        self.admin_state_up = self.options.get('admin_state_up', True)
+        self.ext_gateway_net_id = self.options.get('ext_gateway_net_id')
+        self.enable_snat = self.options.get('enable_snat')
+        self.ext_fixed_ips = self.options.get('ext_fixed_ips')
+        self.project_id = self.options.get('project_id')
 
-        self.neutron_client = op_utils.get_neutron_client()
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -45,22 +48,19 @@ class CreateRouter(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        openstack_paras = {'router': self.openstack}
-        router_id = op_utils.create_neutron_router(self.neutron_client,
-                                                   openstack_paras)
-        if router_id:
-            result.update({"network_create": 1})
-            LOG.info("Create router successful!")
-        else:
-            result.update({"network_create": 0})
+        router_id = openstack_utils.create_neutron_router(
+            self.shade_client, name=self.name,
+            admin_state_up=self.admin_state_up,
+            ext_gateway_net_id=self.ext_gateway_net_id,
+            enable_snat=self.enable_snat, ext_fixed_ips=self.ext_fixed_ips,
+            project_id=self.project_id)
+        if not router_id:
+            result.update({"router_create": 0})
             LOG.error("Create router failed!")
+            raise exceptions.ScenarioCreateRouterError
 
-        check_result = router_id
-
-        try:
-            keys = self.scenario_cfg.get('output', '').split()
-        except KeyError:
-            pass
-        else:
-            values = [check_result]
-            return self._push_to_outputs(keys, values)
+        result.update({"router_create": 1})
+        LOG.info("Create router successful!")
+        keys = self.scenario_cfg.get('output', '').split()
+        values = [router_id]
+        return self._push_to_outputs(keys, values)
index 0e47852..be2fa3f 100644 (file)
@@ -14,6 +14,7 @@
 
 import copy
 import logging
+import time
 
 import ipaddress
 from itertools import chain
@@ -484,3 +485,11 @@ class NetworkServiceTestCase(scenario_base.Scenario):
             # https://bugs.python.org/issue9400
             LOG.exception("")
             raise RuntimeError("Error in teardown")
+
+    def pre_run_wait_time(self, time_seconds):
+        """Time waited before executing the run method"""
+        time.sleep(time_seconds)
+
+    def post_run_wait_time(self, time_seconds):
+        """Time waited after executing the run method"""
+        pass
index 633b36f..8160c5b 100644 (file)
@@ -120,3 +120,7 @@ class MissingPodInfoError(YardstickException):
 
 class UnsupportedPodFormatError(YardstickException):
     message = 'Failed to load pod info, unsupported format'
+
+
+class ScenarioCreateRouterError(YardstickException):
+    message = 'Create Neutron Router Scenario failed'
index 84bfbbb..a4fd4e5 100644 (file)
@@ -519,13 +519,29 @@ def create_neutron_subnet(shade_client, network_name_or_id, cidr=None,
         return None
 
 
-def create_neutron_router(neutron_client, json_body):      # pragma: no cover
+def create_neutron_router(shade_client, name=None, admin_state_up=True,
+                          ext_gateway_net_id=None, enable_snat=None,
+                          ext_fixed_ips=None, project_id=None):
+    """Create a logical router.
+
+    :param name:(string) the router name.
+    :param admin_state_up:(bool) the administrative state of the router.
+    :param ext_gateway_net_id:(string) network ID for the external gateway.
+    :param enable_snat:(bool) enable Source NAT (SNAT) attribute.
+    :param ext_fixed_ips: List of dictionaries of desired IP and/or subnet
+                          on the external network.
+    :param project_id:(string) project ID for the router.
+
+    :returns:(string) the router id.
+    """
     try:
-        router = neutron_client.create_router(json_body)
-        return router['router']['id']
-    except Exception:  # pylint: disable=broad-except
-        log.error("Error [create_neutron_router(neutron_client)]")
-        raise Exception("operation error")
+        router = shade_client.create_router(
+            name, admin_state_up, ext_gateway_net_id, enable_snat,
+            ext_fixed_ips, project_id)
+        return router['id']
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [create_neutron_router(shade_client)]. "
+                  "Exception message: %s", o_exc.orig_message)
 
 
 def delete_neutron_router(shade_client, router_id):
index addbd9a..77488c4 100644 (file)
@@ -79,7 +79,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
     APP_NAME = 'DpdkVnf'
     FIND_NET_CMD = "find /sys/class/net -lname '*{}*' -printf '%f'"
     NR_HUGEPAGES_PATH = '/proc/sys/vm/nr_hugepages'
-    HUGEPAGES_KB = 1024 * 1024 * 16
 
     @staticmethod
     def _update_packet_type(ip_pipeline_cfg, traffic_options):
@@ -118,7 +117,8 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
     def _setup_hugepages(self):
         meminfo = utils.read_meminfo(self.ssh_helper)
         hp_size_kb = int(meminfo['Hugepagesize'])
-        nr_hugepages = int(abs(self.HUGEPAGES_KB / hp_size_kb))
+        hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16)
+        nr_hugepages = int(abs(hugepages_gb * 1024 * 1024 / hp_size_kb))
         self.ssh_helper.execute('echo %s | sudo tee %s' %
                                 (nr_hugepages, self.NR_HUGEPAGES_PATH))
         hp = six.BytesIO()
diff --git a/yardstick/tests/unit/apiserver/resources/v1/__init__.py b/yardstick/tests/unit/apiserver/resources/v1/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/yardstick/tests/unit/apiserver/resources/v1/test_testsuites.py b/yardstick/tests/unit/apiserver/resources/v1/test_testsuites.py
new file mode 100644 (file)
index 0000000..85c045f
--- /dev/null
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import mock
+
+import unittest
+
+from yardstick.tests.unit.apiserver import APITestCase
+from api.utils.thread import TaskThread
+
+
+class TestsuiteTestCase(APITestCase):
+
+    def test_run_test_suite(self):
+        if self.app is None:
+            unittest.skip('host config error')
+            return
+
+        TaskThread.start = mock.MagicMock()
+
+        url = 'yardstick/testsuites/action'
+        data = {
+            'action': 'run_test_suite',
+            'args': {
+                'opts': {},
+                'testsuite': 'opnfv_smoke'
+            }
+        }
+        resp = self._post(url, data)
+        self.assertEqual(resp.get('status'), 1)
index 3469a2a..8d6f119 100644 (file)
@@ -6,25 +6,52 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.create_router import CreateRouter
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_router
 
 
 class CreateRouterTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
-    @mock.patch('yardstick.common.openstack_utils.create_neutron_router')
-    def test_create_router(self, mock_get_neutron_client, mock_create_neutron_router):
-        options = {
-            'openstack_paras': {
-                'admin_state_up': 'True',
-                'name': 'yardstick_router'
-            }
-        }
-        args = {"options": options}
-        obj = CreateRouter(args, {})
-        obj.run({})
-        mock_get_neutron_client.assert_called_once()
-        mock_create_neutron_router.assert_called_once()
+    def setUp(self):
+
+        self._mock_create_neutron_router = mock.patch.object(
+            openstack_utils, 'create_neutron_router')
+        self.mock_create_neutron_router = (
+            self._mock_create_neutron_router.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(create_router, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'name': 'yardstick_net'}}
+        self.result = {}
+
+        self.crouter_obj = create_router.CreateRouter(self.args, mock.ANY)
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_create_neutron_router.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        _uuid = uuidutils.generate_uuid()
+        self.crouter_obj.scenario_cfg = {'output': 'id'}
+        self.mock_create_neutron_router.return_value = _uuid
+        output = self.crouter_obj.run(self.result)
+        self.assertEqual({"router_create": 1}, self.result)
+        self.assertEqual({'id': _uuid}, output)
+        self.mock_log.info.asset_called_once_with('Create router successful!')
+
+    def test_run_fail(self):
+        self.mock_create_neutron_router.return_value = None
+        with self.assertRaises(exceptions.ScenarioCreateRouterError):
+            self.crouter_obj.run(self.result)
+        self.assertEqual({"router_create": 0}, self.result)
+        self.mock_log.error.assert_called_once_with('Create router failed!')
index 9853385..284a71c 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
+import mock
+
 from yardstick.benchmark.scenarios import base
 from yardstick.tests.unit import base as ut_base
 
 
+class _TestScenario(base.Scenario):
+    __scenario_type__ = 'Test Scenario'
+
+    def run(self):
+        pass
+
+
 class ScenarioTestCase(ut_base.BaseUnitTestCase):
 
     def test_get_scenario_type(self):
@@ -85,6 +96,25 @@ class ScenarioTestCase(ut_base.BaseUnitTestCase):
         self.assertEqual('No such scenario type %s' % wrong_scenario_name,
                          str(exc.exception))
 
+    def test_scenario_abstract_class(self):
+        # pylint: disable=abstract-class-instantiated
+        with self.assertRaises(TypeError):
+            base.Scenario()
+
+    @mock.patch.object(time, 'sleep')
+    def test_pre_run_wait_time(self, mock_sleep):
+        """Ensure default behaviour (backwards compatibility): no wait time"""
+        test_scenario = _TestScenario()
+        test_scenario.pre_run_wait_time(mock.ANY)
+        mock_sleep.assert_not_called()
+
+    @mock.patch.object(time, 'sleep')
+    def test_post_run_wait_time(self, mock_sleep):
+        """Ensure default behaviour (backwards compatibility): wait time"""
+        test_scenario = _TestScenario()
+        test_scenario.post_run_wait_time(100)
+        mock_sleep.assert_called_once_with(100)
+
 
 class IterScenarioClassesTestCase(ut_base.BaseUnitTestCase):
 
index c6b0f46..e39a13f 100644 (file)
@@ -161,3 +161,27 @@ class DeleteNeutronRouterTestCase(unittest.TestCase):
                                                        'router_id')
         mock_logger.error.assert_called_once()
         self.assertFalse(output)
+
+
+class CreateNeutronRouterTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.mock_shade_client.create_subnet = mock.Mock()
+
+    def test_create_neutron_router(self):
+        _uuid = uuidutils.generate_uuid()
+        self.mock_shade_client.create_router.return_value = {'id': _uuid}
+        output = openstack_utils.create_neutron_router(
+            self.mock_shade_client)
+        self.assertEqual(_uuid, output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_neutron_subnet_exception(self, mock_logger):
+        self.mock_shade_client.create_router.side_effect = (
+            exc.OpenStackCloudException('error message'))
+
+        output = openstack_utils.create_neutron_router(
+            self.mock_shade_client)
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)