Host openstack utils in SDNVPN
[sdnvpn.git] / sdnvpn / test / functest / testcase_3.py
index 0253444..6e7cbae 100644 (file)
@@ -1,4 +1,3 @@
-#
 # Copyright (c) 2017 All rights reserved
 # This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 #   - Set up a Quagga instance in the functest container
 #   - Start a BGP router with OpenDaylight
 #   - Add the functest Quagga as a neighbor
-#   - Verify that the OpenDaylight and functest Quaggas peer
-# - Exchange routing information with Quagga:
-#   - Create a network, instance and BGPVPN in OpenStack
-#   - Verify the route to the instance is present in the OpenDaylight FIB
-#   - Verify that the functest Quagga also learns these routes
+#   - Verify that the OpenDaylight and gateway Quagga peer
+
+import logging
 import os
-import argparse
+import sys
 
+from functest.utils import functest_utils as ft_utils
 from sdnvpn.lib import quagga
-import sdnvpn.lib.utils as test_utils
-import sdnvpn.lib.config as sdnvpn_config
-
-import functest.utils.openstack_utils as os_utils
-import functest.utils.functest_utils as ft_utils
-import functest.utils.functest_logger as ft_logger
-
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib import config as sdnvpn_config
 from sdnvpn.lib.results import Results
 
-COMMON_CONFIG = sdnvpn_config.CommonConfig()
-TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig("testcase_3")
-
-logger = ft_logger.Logger("sdnvpn-testcase-3").getLogger()
-
-parser = argparse.ArgumentParser()
 
-parser.add_argument("-r", "--report",
-                    help="Create json result file",
-                    action="store_true")
+logger = logging.getLogger(__name__)
 
-args = parser.parse_args()
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+    "sdnvpn.test.functest.testcase_3")
 
 
 def main():
@@ -51,9 +39,13 @@ def main():
 
     openstack_nodes = test_utils.get_nodes()
 
+    # node.is_odl() doesn't work in Apex
+    # https://jira.opnfv.org/browse/RELENG-192
     controllers = [node for node in openstack_nodes
-                   if node.is_odl()]
+                   if "running" in
+                   node.run_cmd("sudo systemctl status opendaylight")]
     computes = [node for node in openstack_nodes if node.is_compute()]
+
     msg = ("Verify that OpenDaylight can start/communicate with zrpcd/Quagga")
     results.record_action(msg)
     results.add_to_summary(0, "-")
@@ -67,76 +59,89 @@ def main():
         logger.info(msg)
         results.add_success(msg)
 
-    for controller in controllers:
-        logger.info("Starting bgp speaker of controller at IP %s "
-                    % controller.ip)
-        logger.info("Checking if zrpcd is "
-                    "running on the controller node")
+    controller = controllers[0]  # We don't handle HA well
+    get_ext_ip_cmd = "sudo ip a | grep br-ex | grep inet | awk '{print $2}'"
+    ext_net_cidr = controller.run_cmd(get_ext_ip_cmd).strip().split('\n')
+    ext_net_mask = ext_net_cidr[0].split('/')[1]
+    controller_ext_ip = ext_net_cidr[0].split('/')[0]
 
-        cmd = "systemctl status zrpcd"
-        output = controller.run_cmd(cmd)
-        msg = ("zrpcd is running")
+    logger.info("Starting bgp speaker of controller at IP %s "
+                % controller_ext_ip)
+    logger.info("Checking if zrpcd is "
+                "running on the controller node")
 
-        if not output:
-            logger.info("zrpcd is not running on the controller node")
-            results.add_failure(msg)
-        else:
-            logger.info("zrpcd is running on the controller node")
-            results.add_success(msg)
+    output_zrpcd = controller.run_cmd("ps --no-headers -C "
+                                      "zrpcd -o state")
+    states = output_zrpcd.split()
+    running = any([s != 'Z' for s in states])
 
-        results.add_to_summary(0, "-")
+    msg = ("zrpcd is running")
 
-        start_quagga = "odl:configure-bgp -op start-bgp-server " \
-                       "--as-num 100 --router-id {0}".format(controller.ip)
-        test_utils.run_odl_cmd(controller, start_quagga)
+    if not running:
+        logger.info("zrpcd is not running on the controller node")
+        results.add_failure(msg)
+    else:
+        logger.info("zrpcd is running on the controller node")
+        results.add_success(msg)
 
-        logger.info("Checking if bgpd is running"
-                    " on the controller node")
+    results.add_to_summary(0, "-")
 
-        # Check if there is a non-zombie bgpd process
-        output_bgpd = controller.run_cmd("ps --no-headers -C "
-                                         "bgpd -o state")
-        states = output_bgpd.split()
-        running = any([s != 'Z' for s in states])
+    # Ensure that ZRPCD ip & port are well configured within ODL
+    add_client_conn_to_bgp = "bgp-connect -p 7644 -h 127.0.0.1 add"
+    test_utils.run_odl_cmd(controller, add_client_conn_to_bgp)
 
-        msg = ("bgpd is running")
-        if not running:
-            logger.info("bgpd is not running on the controller node")
-            results.add_failure(msg)
-        else:
-            logger.info("bgpd is running on the controller node")
-            results.add_success(msg)
+    # Start bgp daemon
+    start_quagga = "odl:configure-bgp -op start-bgp-server " \
+                   "--as-num 100 --router-id {0}".format(controller_ext_ip)
+    test_utils.run_odl_cmd(controller, start_quagga)
 
-        results.add_to_summary(0, "-")
+    logger.info("Checking if bgpd is running"
+                " on the controller node")
 
-        stop_quagga = 'odl:configure-bgp -op stop-bgp-server'
+    # Check if there is a non-zombie bgpd process
+    output_bgpd = controller.run_cmd("ps --no-headers -C "
+                                     "bgpd -o state")
+    states = output_bgpd.split()
+    running = any([s != 'Z' for s in states])
 
-        test_utils.run_odl_cmd(controller, stop_quagga)
+    msg = ("bgpd is running")
+    if not running:
+        logger.info("bgpd is not running on the controller node")
+        results.add_failure(msg)
+    else:
+        logger.info("bgpd is running on the controller node")
+        results.add_success(msg)
+
+    results.add_to_summary(0, "-")
 
-        # disabled because of buggy upstream
-        # https://github.com/6WIND/zrpcd/issues/15
-        # logger.info("Checking if bgpd is still running"
-        #             " on the controller node")
+    # We should be able to restart the speaker
+    # but the test is disabled because of buggy upstream
+    # https://github.com/6WIND/zrpcd/issues/15
+    # stop_quagga = 'odl:configure-bgp -op stop-bgp-server'
+    # test_utils.run_odl_cmd(controller, stop_quagga)
 
-        # output_bgpd = controller.run_cmd("ps --no-headers -C " \
-        #                                  "bgpd -o state")
-        # states = output_bgpd.split()
-        # running = any([s != 'Z' for s in states])
+    # logger.info("Checking if bgpd is still running"
+    #             " on the controller node")
 
-        # msg = ("bgpd is stopped")
-        # if not running:
-        #     logger.info("bgpd is not running on the controller node")
-        #     results.add_success(msg)
-        # else:
-        #     logger.info("bgpd is still running on the controller node")
-        #     results.add_failure(msg)
+    # output_bgpd = controller.run_cmd("ps --no-headers -C " \
+    #                                  "bgpd -o state")
+    # states = output_bgpd.split()
+    # running = any([s != 'Z' for s in states])
+
+    # msg = ("bgpd is stopped")
+    # if not running:
+    #     logger.info("bgpd is not running on the controller node")
+    #     results.add_success(msg)
+    # else:
+    #     logger.info("bgpd is still running on the controller node")
+    #     results.add_failure(msg)
 
     # Taken from the sfc tests
     if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path):
         logger.info("Downloading image")
         ft_utils.download_url(
-            "https://cloud-images.ubuntu.com/releases/16.04/"
-            "release/ubuntu-16.04-server-cloudimg-amd64-disk1.img",
+            "http://artifacts.opnfv.org/sdnvpn/"
+            "ubuntu-16.04-server-cloudimg-amd64-disk1.img",
             "/home/opnfv/functest/data/")
     else:
         logger.info("Using old image")
@@ -145,144 +150,152 @@ def main():
     nova_client = os_utils.get_nova_client()
     neutron_client = os_utils.get_neutron_client()
 
-    sg_id = os_utils.create_security_group_full(neutron_client,
-                                                TESTCASE_CONFIG.secgroup_name,
-                                                TESTCASE_CONFIG.secgroup_descr)
-    test_utils.open_icmp_ssh(neutron_client, sg_id)
-    test_utils.open_bgp_port(neutron_client, sg_id)
-    net_id, _, _ = test_utils.create_network(neutron_client,
-                                             TESTCASE_CONFIG.net_1_name,
-                                             TESTCASE_CONFIG.subnet_1_name,
-                                             TESTCASE_CONFIG.subnet_1_cidr,
-                                             TESTCASE_CONFIG.router_1_name)
-
-    quagga_net_id, _, _ = test_utils.create_network(
-        neutron_client,
-        TESTCASE_CONFIG.quagga_net_name,
-        TESTCASE_CONFIG.quagga_subnet_name,
-        TESTCASE_CONFIG.quagga_subnet_cidr,
-        TESTCASE_CONFIG.quagga_router_name)
-
-    ubuntu_image_id = os_utils.create_glance_image(
-        glance_client,
-        COMMON_CONFIG.ubuntu_image_name,
-        COMMON_CONFIG.ubuntu_image_path,
-        disk="raw",
-        container="bare",
-        public="public")
-
-    # NOTE(rski) The order of this seems a bit weird but
-    # there is a reason for this, namely
-    # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
-    # so we create the quagga instance using cloud-init
-    # and immediately give it a floating IP.
-    # The cloud-init script should contain a small sleep for
-    # this to work.
-    # We also create the FIP first because it is used in the
-    # cloud-init script.
-    fip = os_utils.create_floating_ip(neutron_client)
-    # fake_fip is needed to bypass NAT
-    # see below for the reason why.
-    fake_fip = os_utils.create_floating_ip(neutron_client)
-    # pin quagga to some compute
-    compute_node = nova_client.hypervisors.list()[0]
-    quagga_compute_node = "nova:" + compute_node.hypervisor_hostname
-    # Map the hypervisor used above to a compute handle
-    # returned by releng's manager
-    for comp in computes:
-        if compute_node.host_ip in comp.run_cmd("ip a"):
-            compute = comp
-            break
-    # Get the mask of ext net of the compute where quagga is running
-    # TODO check this works on apex
-    cmd = "ip a | grep br-ex | grep inet | awk '{print $2}'"
-    ext_cidr = compute.run_cmd(cmd).split("/")
-    ext_net_mask = ext_cidr[1]
-    quagga_bootstrap_script = quagga.gen_quagga_setup_script(
-        controllers[0].ip,
-        fake_fip['fip_addr'],
-        ext_net_mask)
-    quagga_vm = test_utils.create_instance(
-        nova_client,
-        TESTCASE_CONFIG.quagga_instance_name,
-        ubuntu_image_id,
-        quagga_net_id,
-        sg_id,
-        fixed_ip=TESTCASE_CONFIG.quagga_instance_ip,
-        flavor=TESTCASE_CONFIG.quagga_instance_flavor,
-        userdata=quagga_bootstrap_script,
-        compute_node=quagga_compute_node)
-
-    fip_added = os_utils.add_floating_ip(nova_client,
-                                         quagga_vm.id,
-                                         fip['fip_addr'])
-
-    msg = "Assign a Floating IP to %s " % TESTCASE_CONFIG.quagga_instance_name
-    if fip_added:
-        results.add_success(msg)
-    else:
-        results.add_failure(msg)
-
-    testcase = "Bootstrap quagga inside an OpenStack instance"
-    success = False
-    if success:
-        results.add_success(testcase)
-    else:
-        results.add_failure(testcase)
-    results.add_to_summary(0, "=")
-
-    # This part works around NAT
-    # What we do is attach the instance directly to the OpenStack
-    # external network. This way is is directly accessible from the
-    # controller without NAT. We assign a floating IP for this
-    # to make sure no overlaps happen.
-    libvirt_instance_name = getattr(quagga_vm, "OS-EXT-SRV-ATTR:instance_name")
-    compute.run_cmd("virsh attach-interface %s"
-                    " bridge br-ex" % libvirt_instance_name)
-
-    results.add_to_summary(0, '-')
-    results.add_to_summary(1, "Peer Quagga with OpenDaylight")
-    results.add_to_summary(0, '-')
-
-    neighbor = quagga.odl_add_neighbor(fip['fip_addr'], controller)
-    peer = quagga.check_for_peering(controller)
-
-    image_id = os_utils.create_glance_image(glance_client,
-                                            TESTCASE_CONFIG.image_name,
-                                            COMMON_CONFIG.image_path,
-                                            disk=COMMON_CONFIG.image_format,
-                                            container="bare",
-                                            public="public")
-
-    instance = test_utils.create_instance(
-        nova_client,
-        TESTCASE_CONFIG.instance_1_name,
-        image_id,
-        net_id,
-        sg_id,
-        fixed_ip=TESTCASE_CONFIG.instance_1_ip,
-        secgroup_name=TESTCASE_CONFIG.secgroup_name)
-
-    kwargs = {"import_targets": TESTCASE_CONFIG.import_targets,
-              "export_targets": TESTCASE_CONFIG.export_targets,
-              "route_targets": TESTCASE_CONFIG.export_targets,
-              "name": "bgpvpn-3-1"}
-
-    bgpvpn = os_utils.create_bgpvpn(neutron_client, **kwargs)
-    bgpvpn_id = bgpvpn['bgpvpn']['id']
-    os_utils.create_network_association(
-        neutron_client, bgpvpn_id, net_id)
-
-    test_utils.wait_for_instance(instance)
-
-    exchange = quagga.check_for_route_exchange(fip['fip_addr'])
-    if neighbor and peer and exchange:
-        results.add_success("Peering with quagga")
-    else:
-        results.add_failure("Peering with quagga")
+    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
+     subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))
+
+    try:
+        sg_id = os_utils.create_security_group_full(
+            neutron_client, TESTCASE_CONFIG.secgroup_name,
+            TESTCASE_CONFIG.secgroup_descr)
+        test_utils.open_icmp(neutron_client, sg_id)
+        test_utils.open_http_port(neutron_client, sg_id)
+
+        test_utils.open_bgp_port(neutron_client, sg_id)
+        net_id, subnet_1_id, router_1_id = test_utils.create_network(
+            neutron_client,
+            TESTCASE_CONFIG.net_1_name,
+            TESTCASE_CONFIG.subnet_1_name,
+            TESTCASE_CONFIG.subnet_1_cidr,
+            TESTCASE_CONFIG.router_1_name)
+
+        quagga_net_id, subnet_quagga_id, \
+            router_quagga_id = test_utils.create_network(
+                neutron_client,
+                TESTCASE_CONFIG.quagga_net_name,
+                TESTCASE_CONFIG.quagga_subnet_name,
+                TESTCASE_CONFIG.quagga_subnet_cidr,
+                TESTCASE_CONFIG.quagga_router_name)
+
+        interfaces.append(tuple((router_1_id, subnet_1_id)))
+        interfaces.append(tuple((router_quagga_id, subnet_quagga_id)))
+        network_ids.extend([net_id, quagga_net_id])
+        router_ids.extend([router_1_id, router_quagga_id])
+        subnet_ids.extend([subnet_1_id, subnet_quagga_id])
+
+        installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+        if installer_type == "fuel":
+            disk = 'raw'
+        elif installer_type == "apex":
+            disk = 'qcow2'
+        else:
+            logger.error("Incompatible installer type")
+
+        ubuntu_image_id = os_utils.create_glance_image(
+            glance_client,
+            COMMON_CONFIG.ubuntu_image_name,
+            COMMON_CONFIG.ubuntu_image_path,
+            disk,
+            container="bare",
+            public="public")
+
+        image_ids.append(ubuntu_image_id)
+
+        # NOTE(rski) The order of this seems a bit weird but
+        # there is a reason for this, namely
+        # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
+        # so we create the quagga instance using cloud-init
+        # and immediately give it a floating IP.
+        # The cloud-init script should contain a small sleep for
+        # this to work.
+        # We also create the FIP first because it is used in the
+        # cloud-init script.
+        fip = os_utils.create_floating_ip(neutron_client)
+        # fake_fip is needed to bypass NAT
+        # see below for the reason why.
+        fake_fip = os_utils.create_floating_ip(neutron_client)
+
+        floatingip_ids.extend([fip['fip_id'], fake_fip['fip_id']])
+        # pin quagga to some compute
+        compute_node = nova_client.hypervisors.list()[0]
+        quagga_compute_node = "nova:" + compute_node.hypervisor_hostname
+        # Map the hypervisor used above to a compute handle
+        # returned by releng's manager
+        for comp in computes:
+            if compute_node.host_ip in comp.run_cmd("sudo ip a"):
+                compute = comp
+                break
+        quagga_bootstrap_script = quagga.gen_quagga_setup_script(
+            controller_ext_ip,
+            fake_fip['fip_addr'],
+            ext_net_mask)
+
+        _, flavor_id = test_utils.create_custom_flavor()
+        flavor_ids.append(flavor_id)
+
+        quagga_vm = test_utils.create_instance(
+            nova_client,
+            TESTCASE_CONFIG.quagga_instance_name,
+            ubuntu_image_id,
+            quagga_net_id,
+            sg_id,
+            fixed_ip=TESTCASE_CONFIG.quagga_instance_ip,
+            flavor=COMMON_CONFIG.custom_flavor_name,
+            userdata=quagga_bootstrap_script,
+            compute_node=quagga_compute_node)
+
+        instance_ids.append(quagga_vm)
+
+        fip_added = os_utils.add_floating_ip(nova_client,
+                                             quagga_vm.id,
+                                             fip['fip_addr'])
+
+        msg = ("Assign a Floating IP to %s " %
+               TESTCASE_CONFIG.quagga_instance_name)
+        if fip_added:
+            results.add_success(msg)
+        else:
+            results.add_failure(msg)
+        test_utils.attach_instance_to_ext_br(quagga_vm, compute)
+
+        try:
+            testcase = "Bootstrap quagga inside an OpenStack instance"
+            cloud_init_success = test_utils.wait_for_cloud_init(quagga_vm)
+            if cloud_init_success:
+                results.add_success(testcase)
+            else:
+                results.add_failure(testcase)
+            results.add_to_summary(0, "=")
+
+            results.add_to_summary(0, '-')
+            results.add_to_summary(1, "Peer Quagga with OpenDaylight")
+            results.add_to_summary(0, '-')
+
+            neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'],
+                                               controller_ext_ip,
+                                               controller)
+            peer = quagga.check_for_peering(controller)
+
+        finally:
+            test_utils.detach_instance_from_ext_br(quagga_vm, compute)
+
+        if neighbor and peer:
+            results.add_success("Peering with quagga")
+        else:
+            results.add_failure("Peering with quagga")
+
+    except Exception as e:
+        logger.error("exception occurred while executing testcase_3: %s", e)
+        raise
+    finally:
+        test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
+        test_utils.cleanup_glance(glance_client, image_ids)
+        test_utils.cleanup_neutron(neutron_client, floatingip_ids,
+                                   bgpvpn_ids, interfaces, subnet_ids,
+                                   router_ids, network_ids)
 
     return results.compile_summary()
 
 
 if __name__ == '__main__':
-    main()
+    logging.basicConfig(level=logging.INFO)
+    sys.exit(main())