# nfvi, vping_ssh, vping_userdata
- dovetail.nfvi.tc001
- dovetail.nfvi.tc002
- # HA, ha.tc002, ha.tc012, will kill the host and can't restart, not ready yet, skip.
+ # HA
- dovetail.ha.tc001
+ - dovetail.ha.tc002
- dovetail.ha.tc003
- # - dovetail.ha.tc004
+ - dovetail.ha.tc004
- dovetail.ha.tc005
- dovetail.ha.tc006
- # - dovetail.ha.tc007
- # - dovetail.ha.tc008
- - dovetail.ha.tc009
- # - dovetail.ha.tc010
- # - dovetail.ha.tc011
+ - dovetail.ha.tc007
+ - dovetail.ha.tc008
# sdnvpn
- dovetail.sdnvpn.tc001
- dovetail.sdnvpn.tc002
---
dovetail.ha.tc002:
name: dovetail.ha.tc002
- objective: > # This test case will verify the high availability of controller node.
- # When one of the controller node abnormally shutdown, the service provided by it should be OK
+ objective: > # This test case will verify the high availability of the
+ # network service provided by OpenStack (neutro-server) on control node.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc025
+ testcase: opnfv_yardstick_tc045
report:
sub_testcase_list:
dovetail.ha.tc003:
name: dovetail.ha.tc003
objective: > # This test case will verify the high availability of the
- # network service provided by OpenStack (neutro-server) on control node.
+ # user service provided by OpenStack (keystone) on control node.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc045
+ testcase: opnfv_yardstick_tc046
report:
sub_testcase_list:
dovetail.ha.tc004:
name: dovetail.ha.tc004
objective: > # This test case will verify the high availability of the
- # user service provided by OpenStack (keystone) on control node.
+ # image service provided by OpenStack (glance-api) on control node.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc046
+ testcase: opnfv_yardstick_tc047
report:
sub_testcase_list:
dovetail.ha.tc005:
name: dovetail.ha.tc005
objective: > # This test case will verify the high availability of the
- # image service provided by OpenStack (glance-api) on control node.
+ # volume service provided by OpenStack (cinder-api) on control node.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc047
+ testcase: opnfv_yardstick_tc048
report:
sub_testcase_list:
---
dovetail.ha.tc006:
name: dovetail.ha.tc006
- objective: > # This test case will verify the high availability of the
- # volume service provided by OpenStack (cinder-api) on control node.
+ objective: > # This test case will verify the high availability of control node.
+ # When the CPU usage of a specified controller node is stressed to 100%,
+ # which breaks down the Openstack services on this node. These Openstack service
+ # should able to be accessed by other controller nodes, and the services on
+ # failed controller node should be isolated.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc048
+ testcase: opnfv_yardstick_tc051
report:
sub_testcase_list:
---
dovetail.ha.tc007:
name: dovetail.ha.tc007
- objective: > # This test case will verify the high availability of the
- # storage service provided by OpenStack (swift-proxy) on control node.
+ objective: > # This test case will verify the high availability of control node.
+ # When the disk I/O of a specified disk is blocked, which breaks down the Openstack
+ # services on this node. Read and write services should still be accessed by other
+ # controller nodes, and the services on failed controller node should be isolated.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc049
+ testcase: opnfv_yardstick_tc052
report:
sub_testcase_list:
---
dovetail.ha.tc008:
name: dovetail.ha.tc008
- objective: > # This test case will verify the high availability of control node.
- # When one of the controller failed to connect the network, which breaks down
- # the Openstack services on this node. These Openstack service should able to
- # be accessed by other controller nodes, and the services on failed controller
- # node should be isolated
+ objective: > # This test case will verify the high availability of the load balance
+ # service(current is HAProxy) that supports OpenStack on controller node. When the
+ # load balance service of a specified controller node is killed, whether other
+ # load balancers on other controller nodes will work, and whether the controller node
+ # will restart the load balancer are checked.
validate:
type: yardstick
- testcase: opnfv_yardstick_tc050
+ testcase: opnfv_yardstick_tc053
report:
sub_testcase_list:
+++ /dev/null
----
-dovetail.ha.tc009:
- name: dovetail.ha.tc009
- objective: > # This test case will verify the high availability of control node.
- # When the CPU usage of a specified controller node is stressed to 100%,
- # which breaks down the Openstack services on this node. These Openstack service
- # should able to be accessed by other controller nodes, and the services on
- # failed controller node should be isolated.
- validate:
- type: yardstick
- testcase: opnfv_yardstick_tc051
- report:
- sub_testcase_list:
+++ /dev/null
----
-dovetail.ha.tc010:
- name: dovetail.ha.tc010
- objective: > # This test case will verify the high availability of control node.
- # When the disk I/O of a specified disk is blocked, which breaks down the Openstack
- # services on this node. Read and write services should still be accessed by other
- # controller nodes, and the services on failed controller node should be isolated.
- validate:
- type: yardstick
- testcase: opnfv_yardstick_tc052
- report:
- sub_testcase_list:
+++ /dev/null
----
-dovetail.ha.tc011:
- name: dovetail.ha.tc011
- objective: > # This test case will verify the high availability of the load balance
- # service(current is HAProxy) that supports OpenStack on controller node. When the
- # load balance service of a specified controller node is killed, whether other
- # load balancers on other controller nodes will work, and whether the controller node
- # will restart the load balancer are checked.
- validate:
- type: yardstick
- testcase: opnfv_yardstick_tc053
- report:
- sub_testcase_list:
+++ /dev/null
----
-dovetail.ha.tc012:
- name: dovetail.ha.tc012
- objective: > # This test case will verify the high availability for virtual ip in the environment.
- # When master node of virtual ip is abnormally shutdown, connection to virtual ip and the
- # services binded to the virtual IP it should be OK.
- validate:
- type: yardstick
- testcase: opnfv_yardstick_tc054
- report:
- sub_testcase_list: