Merge "Fix instance boot when metadata exists"
authorTim Irnich <tim.irnich@ericsson.com>
Thu, 9 Nov 2017 08:05:15 +0000 (08:05 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 9 Nov 2017 08:05:15 +0000 (08:05 +0000)
15 files changed:
docs/development/overview/index.rst
docs/release/scenarios/os-odl-bgpvpn/scenario.description.rst
sdnvpn/lib/config.py
sdnvpn/lib/utils.py
sdnvpn/test/functest/config.yaml
sdnvpn/test/functest/run_sdnvpn_tests.py
sdnvpn/test/functest/testcase_1.py
sdnvpn/test/functest/testcase_10.py
sdnvpn/test/functest/testcase_11.py [new file with mode: 0644]
sdnvpn/test/functest/testcase_2.py
sdnvpn/test/functest/testcase_3.py
sdnvpn/test/functest/testcase_4.py
sdnvpn/test/functest/testcase_7.py
sdnvpn/test/functest/testcase_8.py
sdnvpn/test/functest/testcase_9.py

index c7c9e7f..279d4a1 100644 (file)
@@ -21,3 +21,211 @@ definition of the related semantics. The BGPVPN framework supports a backend
 driver model with currently available drivers for Bagpipe, OpenContrail, Nuage
 and OpenDaylight. The OPNFV scenario makes use of the OpenDaylight driver and backend
 implementation through the ODL NetVirt project.
+
+====================
+SDNVPN Testing Suite
+====================
+
+An overview of the SDNVPN Test is depicted here. More details for each test case are provided:
+https://wiki.opnfv.org/display/sdnvpn/SDNVPN+Testing
+
+    BGPVPN Tempest test cases
+        Create BGPVPN passes
+        Create BGPVPN as non-admin fails
+        Delete BGPVPN as non-admin fails
+        Show BGPVPN as non-owner fails
+        List BGPVPNs as non-owner fails
+        Show network associated BGPVPNs as non-owner fails
+        List network associated BGPVPNs as non-owner fails
+        Associate/Deassociate a network to a BGPVPN resource passes
+        Update route targets on a BGPVPN passes
+        Update route targets on a BGPVPN as non-admin fails
+        Reject the creation of BGPVPN with invalid route targets passes
+        Reject the update of BGPVPN with invalid route targets passes
+        Reject the association on an invalid network to a BGPVPN passes
+        Reject the diassociation on an invalid network to a BGPVPN passes
+        Associate/Deassociate a router to a BGPVPN resource passes
+        Attach the subnet of an associated network to an associated router of the same BGVPN passes
+
+
+
+    Functest scenario specific tests:
+
+    Test Case 1 - VPN provides connectivity between subnets, using network association
+    Name: VPN connecting Neutron networks and subnets
+    Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
+
+    Test setup procedure:
+    Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1
+    Moreover all ports have 10.10.10/24 addresses (this subnet is denoted SN1 in the following)
+    Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
+    Moreover all ports have 10.10.11/24 addresses (this subnet is denoted SN2 in the following)
+
+    Test execution:
+        Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
+        Associate SN1 to VPN1
+        Ping from VM1 to VM2 should work
+        Ping from VM1 to VM3 should work
+        Ping from VM1 to VM4 should not work
+        Associate SN2 to VPN1
+        Ping from VM4 to VM5 should work
+        Ping from VM1 to VM4 should not work (disabled until isolation fixed upstream)
+        Ping from VM1 to VM5 should not work (disabled until isolation fixed upstream)
+        Change VPN 1 so that iRT=eRT
+        Ping from VM1 to VM4 should work
+        Ping from VM1 to VM5 should work
+
+    Test Case 2 - tenant separation
+    Name: Using VPNs for tenant separation
+    Description: Using VPNs to isolate tenants so that overlapping IP address ranges can be used
+
+    Test setup procedure:
+    Set up VM1 and VM2 on Node1 and VM3 on Node2, all having ports in the same Neutron Network N1.
+    VM1 and VM2 have IP addresses in a subnet SN1 with range 10.10.10/24
+        VM1: 10.10.10.11, running an HTTP server which returns "I am VM1" for any HTTP request
+        (or something else than an HTTP server)
+        VM2: 10.10.10.12, running an HTTP server which returns "I am VM2" for any HTTP request
+    VM3 has an IP address in a subnet SN2 with range 10.10.11/24
+        VM3: 10.10.11.13, running an HTTP server which returns "I am VM3" for any HTTP request
+    Set up VM4 on Node1 and VM5 on Node2, both having ports in Neutron Network N2
+    VM4 has an address in a subnet SN1b with range 10.10.10/24
+        VM4: 10.10.10.12 (the same as VM2), running an HTTP server which returns "I am VM4" for any HTTP request
+    VM5 has an address in a subnet SN2b with range 10.10.11/24
+        VM5: 10.10.11.13 (the same as VM3), running an HTTP server which returns "I am VM5" for any HTTP request
+
+    Test execution:
+        Create VPN 1 with iRT=eRT=RT1 and associate N1 to it
+        HTTP from VM1 to VM2 and VM3 should work
+            It returns "I am VM2" and "I am VM3" respectively
+        HTTP from VM1 to VM4 and VM5 should not work
+            It never returns "I am VM4" or "I am VM5"
+        Create VPN2 with iRT=eRT=RT2 and associate N2 to it
+        HTTP from VM4 to VM5 should work
+            It returns "I am VM5"
+        HTTP from VM4 to VM1 and VM3 should not work
+            It never returns "I am VM1" or "I am VM3"
+
+
+    Test Case 3 - Data Center Gateway integration
+    Name: Data Center Gateway integration
+    Description: Investigate the peering functionality of BGP protocol,
+    using a Zrpcd/Quagga router and OpenDaylight Controller
+
+    Test setup procedure:
+    Search in the pool of nodes and find one Compute node and one Controller nodes, that have OpenDaylight controller running
+    Start an instance using ubuntu-16.04-server-cloudimg-amd64-disk1.img image and in it run the Quagga setup script
+    Start bgp router in the Controller node, using odl:configure-bgp
+
+    Test execution:
+    Set up a Quagga instance in a nova compute node
+    Start a BGP router with OpenDaylight in a controller node
+    Add the Quagga running in the instance as a neighbor
+    Check that bgpd is running
+    Verify that the OpenDaylight and gateway Quagga peer each other
+    Start an instance in a second  nova compute node and connect it with a new network, (Network 3-3).
+    Create a bgpvpn (include parameters route-distinguisher and route-targets) and associate it with the network created
+    Define the same route-distinguisher and route-targets on the simulated quagga side
+    Check that the routes from the Network 3-3 are advertised towards simulated Quagga VM
+
+    Test Case 4 - VPN provides connectivity between subnets using router association
+    Functest: variant of Test Case 1.
+    Set up a Router R1 with one connected network/subnet N1/S1.
+    Set up a second network N2.
+    Create VPN1 and associate Router R1 and Network N2 to it.
+        Hosts from N2 should be able to reach hosts in N1.
+
+    Name: VPN connecting Neutron networks and subnets using router association
+    Description: VPNs provide connectivity across Neutron networks and subnets if configured accordingly.
+
+    Test setup procedure:
+    Set up VM1 and VM2 on Node1 and VM3 on Node2,
+    All VMs have ports in the same Neutron Network N1 and 10.10.10/24 addresses
+    (this subnet is denoted SN1 in the following).
+    N1/SN1 are connected to router R1.
+    Set up VM4 on Node1 and VM5 on Node2,
+    Both VMs have ports in Neutron Network N2 and having 10.10.11/24 addresses
+    (this subnet is denoted SN2 in the following)
+
+    Test execution:
+    Create VPN1 with eRT<>iRT (so that connected subnets should not reach each other)
+    Associate R1 to VPN1
+        Ping from VM1 to VM2 should work
+        Ping from VM1 to VM3 should work
+        Ping from VM1 to VM4 should not work
+     Associate SN2 to VPN1
+        Ping from VM4 to VM5 should work
+        Ping from VM1 to VM4 should not work
+        Ping from VM1 to VM5 should not work
+    Change VPN1 so that iRT=eRT
+        Ping from VM1 to VM4 should work
+        Ping from VM1 to VM5 should work
+
+    Test Case 7 - Network associate a subnet with a router attached to a VPN and
+    verify floating IP functionality (disabled, because of ODL Bug 6962)
+
+    A test for https://bugs.opendaylight.org/show_bug.cgi?id=6962
+
+    Setup procedure:
+    Create VM1 in a subnet with a router attached.
+    Create VM2 in a different subnet with another router attached.
+    Network associate them to a VPN with iRT=eRT
+    Ping from VM1 to VM2 should work
+    Assign a floating IP to VM1
+    Pinging the floating IP should work
+
+    Test Case 8 - Router associate a subnet with a router attached to a VPN and
+    verify floating IP functionality
+
+    Setup procedure:
+    Create VM1 in a subnet with a router which is connected with the gateway
+    Create VM2 in a different subnet without a router attached.
+    Assoc the two networks in a VPN iRT=eRT
+    One with router assoc, other with net assoc
+    Try to ping from one VM to the other
+    Assign a floating IP to the VM in the router assoc network
+    Ping it
+
+    Test Case 9 - Check fail mode in OVS br-int interfaces
+    This testcase checks if the fail mode is always “secure”.
+    To accomplish it, a check is performed on all OVS br-int interfaces, for all OpenStack nodes.
+    The testcase is considered as successful if all OVS br-int interfaces have fail_mode=secure
+
+
+    Test Case 10 - Check the communication between a group of VMs
+    This testcase investigates if communication between a group of VMs is interrupted upon deletion
+    and creation of VMs inside this group.
+
+    Test case flow:
+        Create 3  VMs:  VM_1  on compute 1, VM_2 on compute 1, VM_3 on compute 2.
+        All VMs ping each other.
+        VM_2  is deleted.
+        Traffic is still flying between VM_ 1 and VM_3.
+        A new VM, VM_ 4  is added to compute 1.
+        Traffic is not interrupted and VM_4 can be reached as well.
+
+
+    Testcase 11: test Opendaylight resync and group_add_mod feature mechanisms
+    This is testcase to test Opendaylight resync and group_add_mod feature functionalities
+
+    Sub-testcase 11-1:
+    Create and start 2 VMs, connected to a common Network.
+        New groups should appear in OVS dump
+    OVS disconnects and the VMs and the networks are cleaned.
+        The new groups are still in the OVS dump,
+        cause OVS  is not connected anymore, so it is not notified that the groups are deleted
+    OVS re-connects.
+        The new groups should be deleted, as Opendaylight has to resync the groups totally and
+        should remove the groups since VMS are deleted.
+
+    Sub-testcase 11-2:
+    Create and start 2 VMs, connected to a common Network.
+        New groups should appear in OVS dump
+    OVS disconnects.
+        The new groups are still in the OVS dump, cause OVS is not connected anymore,
+        so it is not notified that the groups are deleted
+    OVS re-connects.
+        The new groups should be still there, as the topology remains. Opendaylight Carbon's
+        group_add_mod mechanism should handle the already existing group.
+    OVS re-connects.
+        The new groups should be still there, as the topology remains.
+        Opendaylight Carbon’ group_add_mod mechanism should handle the already existing group.
index af102cb..2641d82 100644 (file)
@@ -91,8 +91,9 @@ should set up the cluster ready for BGPVPNs being created.
 APEX installer configuration
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To install the SDNVPN feature using the APEX installer, follow the APEX installation guide and
-activate the SDNVPN feature when prompted.
+To install the SDNVPN feature using the APEX installer, follow the APEX installation guide
+(https://wiki.opnfv.org/display/apex/Integration+Guide) and activate the SDNVPN feature when prompted (step "# Now execute a deployment")
+i.e. python3 deploy.py -v -n ../config/network/network_settings.yaml -d ../config/deploy/os-odl-bgpvpn-noha.yaml --deploy-dir ../build --lib-dir ../lib --image-dir ../.build --virtual-computes 2 --virtual-default-ram 16
 
 Limitations, Issues and Workarounds
 ===================================
@@ -112,4 +113,4 @@ References
 ==========
 
 For more information on the OPNFV Danube release, please visit
-http://www.opnfv.org/danube
+https://www.opnfv.org/software
index 99e5d00..9659fc3 100644 (file)
@@ -54,6 +54,10 @@ class CommonConfig(object):
             self.custom_flavor_ram = 1024
             self.custom_flavor_disk = 10
             self.custom_flavor_vcpus = 1
+            self.neutron_nw_quota = -1
+            self.neutron_subnet_quota = -1
+            self.neutron_port_quota = -1
+            self.nova_instances_quota_class = -1
 
     commonCfgInstance = None
 
index f5cd9dc..67b75d0 100644 (file)
@@ -595,7 +595,7 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
     return True
 
 
-def cleanup_nova(nova_client, instance_ids, image_ids):
+def cleanup_nova(nova_client, instance_ids):
     if len(instance_ids) != 0:
         for instance_id in instance_ids:
             if not os_utils.delete_instance(nova_client, instance_id):
@@ -603,10 +603,13 @@ def cleanup_nova(nova_client, instance_ids, image_ids):
                               'Instance with id {} was not deleted.'.
                               format(instance_id))
                 return False
+    return True
+
 
+def cleanup_glance(glance_client, image_ids):
     if len(image_ids) != 0:
         for image_id in image_ids:
-            if not os_utils.delete_glance_image(nova_client, image_id):
+            if not os_utils.delete_glance_image(glance_client, image_id):
                 logging.error('Fail to delete all images. '
                               'Image with id {} was not deleted.'.
                               format(image_id))
@@ -684,3 +687,66 @@ def is_fail_mode_secure():
                               'in {} node'.format(openstack_node.name))
                 is_secure[openstack_node.name] = False
     return is_secure
+
+
+def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
+                                subnet_quota, port_quota):
+    json_body = {"quota": {
+        "network": nw_quota,
+        "subnet": subnet_quota,
+        "port": port_quota
+    }}
+
+    try:
+        neutron_client.update_quota(tenant_id=tenant_id,
+                                    body=json_body)
+        return True
+    except Exception as e:
+        logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
+                     " '%s', '%s', '%s', '%s')]: %s" %
+                     (tenant_id, nw_quota, subnet_quota, port_quota, e))
+        return False
+
+
+def update_instance_quota_class(nova_client, instances_quota):
+    try:
+        nova_client.quota_classes.update("default", instances=instances_quota)
+        return True
+    except Exception as e:
+        logger.error("Error [update_instance_quota_class(nova_client,"
+                     " '%s' )]: %s" % (instances_quota, e))
+        return False
+
+
+def get_neutron_quota(neutron_client, tenant_id):
+    try:
+        return neutron_client.show_quota(tenant_id=tenant_id)['quota']
+    except Exception as e:
+        logger.error("Error in getting neutron quota for tenant "
+                     " '%s' )]: %s" % (tenant_id, e))
+        raise
+
+
+def get_nova_instances_quota(nova_client):
+    try:
+        return nova_client.quota_classes.get("default").instances
+    except Exception as e:
+        logger.error("Error in getting nova instances quota: %s" % e)
+        raise
+
+
+def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+    """
+    Gets, as input, a list of compute nodes and a list of OVS bridges
+    and returns the command console output, as a list of lines, that
+    contains all the OVS groups from all bridges and nodes in lists.
+    """
+    cmd_out_lines = []
+    for compute_node in compute_node_list:
+        for ovs_br in ovs_br_list:
+            if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+                ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
+                                  "grep group".format(ovs_br, of_protocol))
+                cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
+                                  split("\n"))
+    return cmd_out_lines
index d3063c1..2b26077 100644 (file)
@@ -164,3 +164,16 @@ testcases:
       router_1_name: sdnvpn-10-1-router
       secgroup_name: sdnvpn-sg
       secgroup_descr: Security group for SDNVPN test cases
+
+  sdnvpn.test.functest.testcase_11:
+      enabled: true
+      description: Check relevant OVS groups are removed upon deletion of OpenStack topology
+      instance_1_name: sdnvpn-11-1
+      instance_2_name: sdnvpn-11-2
+      image_name: sdnvpn-image
+      net_1_name: sdnvpn-11-1-net
+      subnet_1_name: sdnvpn-11-1-subnet
+      subnet_1_cidr: 10.10.10.0/24
+      router_1_name: sdnvpn-11-1-router
+      secgroup_name: sdnvpn-sg
+      secgroup_descr: Security group for SDNVPN test cases
index cff6a27..1a1d8f3 100644 (file)
@@ -16,8 +16,10 @@ import traceback
 import yaml
 
 from functest.core import feature as base
+from functest.utils import openstack_utils as os_utils
 from sdnvpn.lib import config as sdnvpn_config
 from sdnvpn.lib.gather_logs import gather_logs
+from sdnvpn.lib import utils as test_utils
 
 COMMON_CONFIG = sdnvpn_config.CommonConfig()
 
@@ -28,21 +30,32 @@ class SdnvpnFunctest(base.Feature):
 
     def execute(self):
 
-        cmd_line = "neutron quota-update --subnet -1 --network -1 --port -1"
-        self.__logger.info("Setting subnet/net quota to unlimited : %s"
-                           % cmd_line)
-        cmd = os.popen(cmd_line)
-        output = cmd.read()
-        self.__logger.debug(output)
+        nova_client = os_utils.get_nova_client()
+        neutron_client = os_utils.get_neutron_client()
+
+        tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(),
+                                           os.environ['OS_PROJECT_NAME'])
+
+        neutron_quota = test_utils.get_neutron_quota(neutron_client, tenant_id)
+        (neutron_nw_quota, neutron_subnet_quota, neutron_port_quota) = (
+            neutron_quota['network'], neutron_quota['subnet'],
+            neutron_quota['port'])
+        instances_quota = test_utils.get_nova_instances_quota(nova_client)
+
+        self.__logger.info("Setting net/subnet/port quota to unlimited")
+        test_utils.update_nw_subnet_port_quota(
+            neutron_client,
+            tenant_id,
+            COMMON_CONFIG.neutron_nw_quota,
+            COMMON_CONFIG.neutron_subnet_quota,
+            COMMON_CONFIG.neutron_port_quota)
 
         # Workaround for
         # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115
-        cmd_line = "nova quota-class-update --instances -1 default"
-        self.__logger.info("Setting instances quota to unlimited : %s"
-                           % cmd_line)
-        cmd = os.popen(cmd_line)
-        output = cmd.read()
-        self.__logger.debug(output)
+        self.__logger.info("Setting instances quota class to unlimited")
+        test_utils.update_instance_quota_class(
+            nova_client,
+            COMMON_CONFIG.nova_instances_quota_class)
 
         with open(COMMON_CONFIG.config_file) as f:
             config_yaml = yaml.safe_load(f)
@@ -80,6 +93,16 @@ class SdnvpnFunctest(base.Feature):
                     if status == "FAIL":
                         overall_status = "FAIL"
 
+        self.__logger.info("Resetting subnet/net/port quota")
+        test_utils.update_nw_subnet_port_quota(neutron_client,
+                                               tenant_id,
+                                               neutron_nw_quota,
+                                               neutron_subnet_quota,
+                                               neutron_port_quota)
+
+        self.__logger.info("Resetting instances quota class")
+        test_utils.update_instance_quota_class(nova_client, instances_quota)
+
         try:
             installer_type = str(os.environ['INSTALLER_TYPE'].lower())
             if installer_type in ["fuel", "apex"]:
index 2c4ddbe..89011cd 100644 (file)
@@ -209,7 +209,8 @@ def main():
         logger.error("exception occurred while executing testcase_1: %s", e)
         raise
     finally:
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids,
                                    bgpvpn_ids, interfaces, subnet_ids,
                                    router_ids, network_ids)
index 5a88603..02956c4 100644 (file)
@@ -253,7 +253,8 @@ def main():
         for thread in threads:
             thread.join()
 
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                    interfaces, subnet_ids, router_ids,
                                    network_ids)
diff --git a/sdnvpn/test/functest/testcase_11.py b/sdnvpn/test/functest/testcase_11.py
new file mode 100644 (file)
index 0000000..40de205
--- /dev/null
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import logging
+import sys
+
+from functest.utils import openstack_utils as os_utils
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+    'sdnvpn.test.functest.testcase_11')
+
+
+def main():
+    results = Results(COMMON_CONFIG.line_length)
+
+    results.add_to_summary(0, "=")
+    results.add_to_summary(2, "STATUS", "SUBTEST")
+    results.add_to_summary(0, "=")
+
+    nova_client = os_utils.get_nova_client()
+    neutron_client = os_utils.get_neutron_client()
+    glance_client = os_utils.get_glance_client()
+    openstack_nodes = test_utils.get_nodes()
+
+    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
+     subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
+
+    try:
+        image_id = os_utils.create_glance_image(
+            glance_client, TESTCASE_CONFIG.image_name,
+            COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+            container="bare", public='public')
+        image_ids.append(image_id)
+
+        network_1_id = test_utils.create_net(neutron_client,
+                                             TESTCASE_CONFIG.net_1_name)
+        subnet_1_id = test_utils.create_subnet(neutron_client,
+                                               TESTCASE_CONFIG.subnet_1_name,
+                                               TESTCASE_CONFIG.subnet_1_cidr,
+                                               network_1_id)
+
+        network_ids.append(network_1_id)
+        subnet_ids.append(subnet_1_id)
+
+        sg_id = os_utils.create_security_group_full(
+            neutron_client, TESTCASE_CONFIG.secgroup_name,
+            TESTCASE_CONFIG.secgroup_descr)
+
+        # Check required number of compute nodes
+        compute_hostname = (
+            nova_client.hypervisors.list()[0].hypervisor_hostname)
+        compute_nodes = [node for node in openstack_nodes
+                         if node.is_compute()]
+
+        av_zone_1 = "nova:" + compute_hostname
+        # List of OVS bridges to get groups
+        ovs_br = "br-int"
+        # Get a list of groups, before start topology
+        initial_ovs_groups = test_utils.get_ovs_groups(compute_nodes,
+                                                       [ovs_br])
+
+        # boot INSTANCES
+        vm_2 = test_utils.create_instance(
+            nova_client,
+            TESTCASE_CONFIG.instance_2_name,
+            image_id,
+            network_1_id,
+            sg_id,
+            secgroup_name=TESTCASE_CONFIG.secgroup_name,
+            compute_node=av_zone_1)
+
+        vm_1 = test_utils.create_instance(
+            nova_client,
+            TESTCASE_CONFIG.instance_1_name,
+            image_id,
+            network_1_id,
+            sg_id,
+            secgroup_name=TESTCASE_CONFIG.secgroup_name,
+            compute_node=av_zone_1)
+        instance_ids.extend([vm_1.id, vm_2.id])
+
+        # Wait for VMs to get ips.
+        instances_up = test_utils.wait_for_instances_up(vm_1, vm_2)
+
+        if not instances_up:
+            logger.error("One or more instances is down")
+            # TODO: Handle this appropriately
+
+        logging.info("Wait before subtest")
+        test_utils.wait_before_subtest()
+        # Get added OVS groups
+        added_ovs_groups = (len(initial_ovs_groups) -
+                            len(test_utils.get_ovs_groups(
+                                compute_nodes, [ovs_br])))
+        # Check if group added successfully
+        results.record_action("Check if a new group was added to OVS")
+        msg = "New OVS group added"
+        results.add_to_summary(0, "-")
+        if added_ovs_groups != 0:
+            results.add_success(msg)
+        else:
+            results.add_failure(msg)
+        results.add_to_summary(0, "=")
+        # Backup OVS controller connection info.
+        # To support HA changes should be made here.
+        get_ext_ip_cmd = "sudo ovs-vsctl get-controller {}".format(ovs_br)
+        ovs_controller_conn = (compute_nodes[0].run_cmd(get_ext_ip_cmd).
+                               strip().split('\n')[0])
+        # Disconnect OVS from controller
+        for compute_node in compute_nodes:
+            compute_node.run_cmd("sudo ovs-vsctl del-controller {}".
+                                 format(ovs_br))
+    except Exception as e:
+        logger.error("exception occurred while executing testcase_1: %s", e)
+        raise
+    finally:
+        # Cleanup topology
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
+        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
+                                   interfaces, subnet_ids, router_ids,
+                                   network_ids)
+    # Connect again OVS to Controller
+    for compute_node in compute_nodes:
+        compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}".
+                             format(ovs_br, ovs_controller_conn))
+    logging.info("Wait before subtest")
+    test_utils.wait_before_subtest()
+    # Get OVS groups added after the reconnection
+    added_ovs_groups = (len(initial_ovs_groups) -
+                        len(test_utils.get_ovs_groups(
+                            compute_nodes, [ovs_br])))
+
+    # Check if group removed successfully
+    results.record_action("Check if group was removed from OVS "
+                          "after deleting the topology.")
+    msg = ""
+    # After removing the topology, groups must be equal to the initial
+    if added_ovs_groups != 0:
+        msg += " Additional group was not deleted from OVS"
+    results.add_to_summary(0, "-")
+    if len(msg) == 0:
+        msg = "Group was deleted from ovs"
+        results.add_success(msg)
+    else:
+        results.add_failure(msg)
+
+    return results.compile_summary()
+
+
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.INFO)
+    sys.exit(main())
index 928656e..d136d8f 100644 (file)
@@ -267,7 +267,8 @@ def main():
         logger.error("exception occurred while executing testcase_2: %s", e)
         raise
     finally:
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids,
                                    bgpvpn_ids, interfaces, subnet_ids,
                                    router_ids, network_ids)
index 7e14505..3dc1e7c 100644 (file)
@@ -278,7 +278,8 @@ def main():
         logger.error("exception occurred while executing testcase_3: %s", e)
         raise
     finally:
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids,
                                    bgpvpn_ids, interfaces, subnet_ids,
                                    router_ids, network_ids)
index 9b1c1fa..cc429c3 100644 (file)
@@ -219,7 +219,8 @@ def main():
         logger.error("exception occurred while executing testcase_4: %s", e)
         raise
     finally:
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids,
                                    bgpvpn_ids, interfaces, subnet_ids,
                                    router_ids, network_ids)
index e018022..0e3a8f5 100644 (file)
@@ -167,7 +167,8 @@ def main():
         logger.error("exception occurred while executing testcase_7: %s", e)
         raise
     finally:
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids,
                                    bgpvpn_ids, interfaces, subnet_ids,
                                    router_ids, network_ids)
index b166362..e372fe1 100644 (file)
@@ -169,7 +169,8 @@ def main():
         logger.error("exception occurred while executing testcase_8: %s", e)
         raise
     finally:
-        test_utils.cleanup_nova(nova_client, instance_ids, image_ids)
+        test_utils.cleanup_nova(nova_client, instance_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
         test_utils.cleanup_neutron(neutron_client, floatingip_ids,
                                    bgpvpn_ids, interfaces, subnet_ids,
                                    router_ids, network_ids)
index 45e9d5b..1489a5a 100644 (file)
@@ -65,6 +65,7 @@ def main():
 
     return results.compile_summary()
 
+
 if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
     sys.exit(main())