Add test case description and task file for TC050 09/17709/3
authortjuyinkanglin <14_ykl@tongji.edu.cn>
Thu, 28 Jul 2016 11:53:22 +0000 (19:53 +0800)
committertjuyinkanglin <14_ykl@tongji.edu.cn>
Fri, 29 Jul 2016 10:02:42 +0000 (18:02 +0800)
JIRA: YARDSTICK-297

Change-Id: I8030ba5a09f80db1e0a87a7650f08b944f281613
Signed-off-by: tjuyinkanglin <14_ykl@tongji.edu.cn>
docs/userguide/opnfv_yardstick_tc050.rst [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml [new file with mode: 0644]

diff --git a/docs/userguide/opnfv_yardstick_tc050.rst b/docs/userguide/opnfv_yardstick_tc050.rst
new file mode 100644 (file)
index 0000000..8890c9d
--- /dev/null
@@ -0,0 +1,135 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC050
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Node Network High Availability                          |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC050: OpenStack Controller Node Network     |
+|              | High Availability                                            |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of control  |
+|              | node. When one of the controller failed to connect the       |
+|              | network, which breaks down the Openstack services on this    |
+|              | node. These Openstack service should able to be accessed by  |
+|              | other controller nodes, and the services on failed           |
+|              | controller node should be isolated.                          |
++--------------+--------------------------------------------------------------+
+|test method   | This test case turns off the network interfaces of a         |
+|              | specified control node, then checks whether all services     |
+|              | provided by the control node are OK with some monitor tools. |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "close-interface" is   |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "close-interface" in     |
+|              | this test case.                                              |
+|              | 2) host: which is the name of a control node being attacked. |
+|              | 3) interface: the network interface to be turned off.        |
+|              |                                                              |
+|              | There are four instance of the "close-interface" monitor:    |
+|              | attacker1(for public netork):                                |
+|              | -fault_type: "close-interface"                               |
+|              | -host: node1                                                 |
+|              | -interface: "br-ex"                                          |
+|              | attacker2(for management netork):                            |
+|              | -fault_type: "close-interface"                               |
+|              | -host: node1                                                 |
+|              | -interface: "br-mgmt"                                        |
+|              | attacker3(for storage netork):                               |
+|              | -fault_type: "close-interface"                               |
+|              | -host: node1                                                 |
+|              | -interface: "br-storage"                                     |
+|              | attacker4(for private netork):                               |
+|              | -fault_type: "close-interface"                               |
+|              | -host: node1                                                 |
+|              | -interface: "br-mesh"                                        |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, the monitor named "openstack-cmd" is      |
+|              | needed. The monitor needs needs two parameters:              |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scritps. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request  |
+|              |                                                              |
+|              | There are four instance of the "openstack-cmd" monitor:      |
+|              | monitor1:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "nova image-list"                             |
+|              | monitor2:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "neutron router-list"                         |
+|              | monitor3:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "heat stack-list"                             |
+|              | monitor4:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "cinder list"                                 |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there is one metric:                      |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified Openstack command request.   |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc050.yaml                |
+|              | -Attackers: see above "attackers" discription                |
+|              | -waiting_time: which is the time (seconds) from the process  |
+|              | being killed to stoping monitors the monitors                |
+|              | -Monitors: see above "monitors" discription                  |
+|              | -SLA: see above "metrics" discription                        |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the turnoff network interface script with param value        |
+|              | specified by  "interface".                                   |
+|              |                                                              |
+|              | Result: Network interfaces will be turned down.              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|post-action   | It is the action when the test cases exist. It turns up the  |
+|              | network interface of the control node if it is not turned    |
+|              | up.                                                          |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
new file mode 100644 (file)
index 0000000..0b21f88
--- /dev/null
@@ -0,0 +1,139 @@
+---
+# Test case for TC050 :OpenStack Controller Node Network High Availability
+# This test case is written by new scenario-based HA testing framework
+
+schema: "yardstick:task:0.1"
+scenarios:
+  -
+    type: "GeneralHA"
+    options:
+      attackers:
+        -
+          fault_type: "general-attacker"
+          host: node1
+          key: "close-br-public"
+          attack_key: "close-interface"
+          action_parameter:
+            interface: "br-ex"
+          rollback_parameter:
+            interface: "br-ex"
+
+        -
+          fault_type: "general-attacker"
+          host: node1
+          key: "close-br-mgmt"
+          attack_key: "close-interface"
+          action_parameter:
+            interface: "br-mgmt"
+          rollback_parameter:
+            interface: "br-mgmt"
+
+        -
+          fault_type: "general-attacker"
+          host: node1
+          key: "close-br-storage"
+          attack_key: "close-interface"
+          action_parameter:
+            interface: "br-storage"
+          rollback_parameter:
+            interface: "br-storage"
+
+        -
+          fault_type: "general-attacker"
+          host: node1
+          key: "close-br-private"
+          attack_key: "close-interface"
+          action_parameter:
+            interface: "br-mesh"
+          rollback_parameter:
+            interface: "br-mesh"
+
+      monitors:
+        -
+          monitor_type: "openstack-cmd"
+          key: "nova-image-list"
+          command_name: "nova image-list"
+          monitor_time: 10
+          sla:
+            max_outage_time: 5
+
+        -
+          monitor_type: "openstack-cmd"
+          key: "neutron-router-list"
+          command_name: "neutron router-list"
+          monitor_time: 10
+          sla:
+            max_outage_time: 5
+
+        -
+          monitor_type: "openstack-cmd"
+          key: "heat-stack-list"
+          command_name: "heat stack-list"
+          monitor_time: 10
+          sla:
+            max_outage_time: 5
+
+        -
+          monitor_type: "openstack-cmd"
+          key: "cinder-list"
+          command_name: "cinder list"
+          monitor_time: 10
+          sla:
+            max_outage_time: 5
+
+
+      steps:
+        -
+          actionKey: "close-br-public"
+          actionType: "attacker"
+          index: 1
+
+        -
+          actionKey: "close-br-mgmt"
+          actionType: "attacker"
+          index: 2
+
+        -
+          actionKey: "close-br-storage"
+          actionType: "attacker"
+          index: 3
+
+        -
+          actionKey: "close-br-private"
+          actionType: "attacker"
+          index: 4
+
+        -
+          actionKey: "nova-image-list"
+          actionType: "monitor"
+          index: 5
+
+        -
+          actionKey: "neutron-router-list"
+          actionType: "monitor"
+          index: 6
+
+        -
+          actionKey: "heat-stack-list"
+          actionType: "monitor"
+          index: 7
+
+        -
+          actionKey: "cinder-list"
+          actionType: "monitor"
+          index: 8
+
+
+    nodes:
+      node1: node1.LF
+    runner:
+      type: Duration
+      duration: 1
+    sla:
+      outage_time: 5
+      action: monitor
+
+context:
+  type: Node
+  name: LF
+  file: etc/yardstick/nodes/fuel_virtual/pod.yaml