Merge "Make tight the way an instance is considered as UP"
[sdnvpn.git] / sdnvpn / lib / utils.py
index 27fd5b3..ad0714e 100644 (file)
@@ -7,6 +7,7 @@
 #
 # http://www.apache.org/licenses/LICENSE-2.0
 #
+import logging
 import os
 import sys
 import time
@@ -14,13 +15,12 @@ import requests
 import re
 import subprocess
 
-import functest.utils.functest_logger as ft_logger
 import functest.utils.openstack_utils as os_utils
 from opnfv.deployment.factory import Factory as DeploymentFactory
 
 from sdnvpn.lib import config as sdnvpn_config
 
-logger = ft_logger.Logger("sndvpn_test_utils").getLogger()
+logger = logging.getLogger('sndvpn_test_utils')
 
 common_config = sdnvpn_config.CommonConfig()
 
@@ -62,7 +62,6 @@ def create_subnet(neutron_client, name, cidr, net_id):
 def create_network(neutron_client, net, subnet1, cidr1,
                    router, subnet2=None, cidr2=None):
     """Network assoc won't work for networks/subnets created by this function.
-
     It is an ODL limitation due to it handling routers as vpns.
     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
     network_dic = os_utils.create_network_full(neutron_client,
@@ -143,7 +142,7 @@ def create_instance(nova_client,
     return instance
 
 
-def generate_ping_userdata(ips_array):
+def generate_ping_userdata(ips_array, ping_count=10):
     ips = ""
     for ip in ips_array:
         ips = ("%s %s" % (ips, ip))
@@ -154,7 +153,7 @@ def generate_ping_userdata(ips_array):
             "while true; do\n"
             " for i do\n"
             "  ip=$i\n"
-            "  ping -c 1 $ip 2>&1 >/dev/null\n"
+            "  ping -c %s $ip 2>&1 >/dev/null\n"
             "  RES=$?\n"
             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
             "   echo ping $ip OK\n"
@@ -163,7 +162,7 @@ def generate_ping_userdata(ips_array):
             " done\n"
             " sleep 1\n"
             "done\n"
-            % ips)
+            % (ips, ping_count))
 
 
 def generate_userdata_common():
@@ -247,11 +246,12 @@ def get_instance_ip(instance):
 
 
 def wait_for_instance(instance):
-    logger.info("Waiting for instance %s to get a DHCP lease..." % instance.id)
+    logger.info("Waiting for instance %s to get a DHCP lease and "
+                "prompt for login..." % instance.id)
     # The sleep this function replaced waited for 80s
     tries = 40
     sleep_time = 2
-    pattern = "Lease of .* obtained, lease time"
+    pattern = ".* login:"
     expected_regex = re.compile(pattern)
     console_log = ""
     while tries > 0 and not expected_regex.search(console_log):
@@ -260,7 +260,7 @@ def wait_for_instance(instance):
         tries -= 1
 
     if not expected_regex.search(console_log):
-        logger.error("Instance %s seems to have failed leasing an IP."
+        logger.error("Instance %s seems not to boot up properly."
                      % instance.id)
         return False
     return True
@@ -279,7 +279,7 @@ def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
                  % (bgpvpn_id, net_id))
 
     while tries > 0 and net_id not in nets:
-        nets = os_utils.get_bgpvpn_networks(neutron_client, bgpvpn_id)
+        nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
         time.sleep(sleep_time)
         tries -= 1
     if net_id not in nets:
@@ -303,7 +303,7 @@ def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
     logger.debug("Waiting for router %s to associate with BGPVPN %s "
                  % (bgpvpn_id, router_id))
     while tries > 0 and router_id not in routers:
-        routers = os_utils.get_bgpvpn_routers(neutron_client, bgpvpn_id)
+        routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
         time.sleep(sleep_time)
         tries -= 1
     if router_id not in routers:
@@ -328,7 +328,6 @@ def wait_before_subtest(*args, **kwargs):
 
 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
     """Get the compute nodes in the deployment
-
     Exit if the deployment doesn't have enough compute nodes"""
     compute_nodes = os_utils.get_hypervisors(nova_client)
 
@@ -343,22 +342,57 @@ def assert_and_get_compute_nodes(nova_client, required_node_number=2):
     return compute_nodes
 
 
-def open_icmp_ssh(neutron_client, security_group_id):
-    os_utils.create_secgroup_rule(neutron_client,
-                                  security_group_id,
-                                  'ingress',
-                                  'icmp')
-    os_utils.create_secgroup_rule(neutron_client,
-                                  security_group_id,
-                                  'tcp',
-                                  80, 80)
+def open_icmp(neutron_client, security_group_id):
+    if os_utils.check_security_group_rules(neutron_client,
+                                           security_group_id,
+                                           'ingress',
+                                           'icmp'):
+
+        if not os_utils.create_secgroup_rule(neutron_client,
+                                             security_group_id,
+                                             'ingress',
+                                             'icmp'):
+            logger.error("Failed to create icmp security group rule...")
+    else:
+        logger.info("This rule exists for security group: %s"
+                    % security_group_id)
+
+
+def open_http_port(neutron_client, security_group_id):
+    if os_utils.check_security_group_rules(neutron_client,
+                                           security_group_id,
+                                           'ingress',
+                                           'tcp',
+                                           80, 80):
+
+        if not os_utils.create_secgroup_rule(neutron_client,
+                                             security_group_id,
+                                             'ingress',
+                                             'tcp',
+                                             80, 80):
+
+            logger.error("Failed to create http security group rule...")
+    else:
+        logger.info("This rule exists for security group: %s"
+                    % security_group_id)
 
 
 def open_bgp_port(neutron_client, security_group_id):
-    os_utils.create_secgroup_rule(neutron_client,
-                                  security_group_id,
-                                  'tcp',
-                                  179, 179)
+    if os_utils.check_security_group_rules(neutron_client,
+                                           security_group_id,
+                                           'ingress',
+                                           'tcp',
+                                           179, 179):
+
+        if not os_utils.create_secgroup_rule(neutron_client,
+                                             security_group_id,
+                                             'ingress',
+                                             'tcp',
+                                             179, 179):
+            logger.error("Failed to create bgp security group rule...")
+    else:
+        logger.info("This rule exists for security group: %s"
+                    % security_group_id)
 
 
 def exec_cmd(cmd, verbose):
@@ -399,16 +433,14 @@ def check_odl_fib(ip, controller_ip):
 
 def run_odl_cmd(odl_node, cmd):
     '''Run a command in the OpenDaylight Karaf shell
-
     This is a bit flimsy because of shell quote escaping, make sure that
     the cmd passed does not have any top level double quotes or this
     function will break.
-
     The /dev/null is used because client works, but outputs something
     that contains "ERROR" and run_cmd doesn't like that.
-
     '''
-    karaf_cmd = '/opt/opendaylight/bin/client "%s" 2>/dev/null' % cmd
+    karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
+                 ' 2>/dev/null' % cmd)
     return odl_node.run_cmd(karaf_cmd)
 
 
@@ -417,6 +449,8 @@ def wait_for_cloud_init(instance):
     # ubuntu images take a long time to start
     tries = 20
     sleep_time = 30
+    logger.info("Waiting for cloud init of instance: {}"
+                "".format(instance.name))
     while tries > 0:
         instance_log = instance.get_console_output()
         if "Failed to run module" in instance_log:
@@ -435,5 +469,213 @@ def wait_for_cloud_init(instance):
                      ". Reason: %s",
                      instance_log)
         success = False
-
+    logger.info("Finished waiting for cloud init of instance {} result was {}"
+                "".format(instance.name, success))
     return success
+
+
+def attach_instance_to_ext_br(instance, compute_node):
+    libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+    installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+    if installer_type == "fuel":
+        bridge = "br-ex"
+    elif installer_type == "apex":
+        # In Apex, br-ex is an ovs bridge and virsh attach-interface
+        # won't just work. We work around it by creating a linux
+        # bridge, attaching that to br-ex with a veth pair
+        # and virsh-attaching the instance to the linux-bridge
+        bridge = "br-quagga"
+        cmd = """
+        set -e
+        if ! sudo brctl show |grep -q ^{bridge};then
+          sudo brctl addbr {bridge}
+          sudo ip link set {bridge} up
+          sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+          sudo ip link set dev ovs-quagga-tap up
+          sudo ip link set dev quagga-tap up
+          sudo ovs-vsctl add-port br-ex ovs-quagga-tap
+          sudo brctl addif {bridge} quagga-tap
+        fi
+        """
+        compute_node.run_cmd(cmd.format(bridge=bridge))
+
+    compute_node.run_cmd("sudo virsh attach-interface %s"
+                         " bridge %s" % (libvirt_instance_name, bridge))
+
+
+def detach_instance_from_ext_br(instance, compute_node):
+    libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+    mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
+                               "grep running | awk '{print $2}'); "
+                               "do echo -n ; sudo virsh dumpxml $vm| "
+                               "grep -oP '52:54:[\da-f:]+' ;done")
+    compute_node.run_cmd("sudo virsh detach-interface --domain %s"
+                         " --type bridge --mac %s"
+                         % (libvirt_instance_name, mac))
+
+    installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+    if installer_type == "fuel":
+        bridge = "br-ex"
+    elif installer_type == "apex":
+        # In Apex, br-ex is an ovs bridge and virsh attach-interface
+        # won't just work. We work around it by creating a linux
+        # bridge, attaching that to br-ex with a veth pair
+        # and virsh-attaching the instance to the linux-bridge
+        bridge = "br-quagga"
+        cmd = """
+            sudo brctl delif {bridge} quagga-tap &&
+            sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
+            sudo ip link set dev quagga-tap down &&
+            sudo ip link set dev ovs-quagga-tap down &&
+            sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+            sudo ip link set {bridge} down &&
+            sudo brctl delbr {bridge}
+        """
+        compute_node.run_cmd(cmd.format(bridge=bridge))
+
+
+def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
+                    subnet_ids, router_ids, network_ids):
+
+    if len(floatingip_ids) != 0:
+        for floatingip_id in floatingip_ids:
+            if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
+                logging.error('Fail to delete all floating ips. '
+                              'Floating ip with id {} was not deleted.'.
+                              format(floatingip_id))
+                return False
+
+    if len(bgpvpn_ids) != 0:
+        for bgpvpn_id in bgpvpn_ids:
+            delete_bgpvpn(neutron_client, bgpvpn_id)
+
+    if len(interfaces) != 0:
+        for router_id, subnet_id in interfaces:
+            if not os_utils.remove_interface_router(neutron_client,
+                                                    router_id, subnet_id):
+                logging.error('Fail to delete all interface routers. '
+                              'Interface router with id {} was not deleted.'.
+                              format(router_id))
+
+    if len(router_ids) != 0:
+        for router_id in router_ids:
+            if not os_utils.remove_gateway_router(neutron_client, router_id):
+                logging.error('Fail to delete all gateway routers. '
+                              'Gateway router with id {} was not deleted.'.
+                              format(router_id))
+
+    if len(subnet_ids) != 0:
+        for subnet_id in subnet_ids:
+            if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
+                logging.error('Fail to delete all subnets. '
+                              'Subnet with id {} was not deleted.'.
+                              format(subnet_id))
+                return False
+
+    if len(router_ids) != 0:
+        for router_id in router_ids:
+            if not os_utils.delete_neutron_router(neutron_client, router_id):
+                logging.error('Fail to delete all routers. '
+                              'Router with id {} was not deleted.'.
+                              format(router_id))
+                return False
+
+    if len(network_ids) != 0:
+        for network_id in network_ids:
+            if not os_utils.delete_neutron_net(neutron_client, network_id):
+                logging.error('Fail to delete all networks. '
+                              'Network with id {} was not deleted.'.
+                              format(network_id))
+                return False
+    return True
+
+
+def cleanup_nova(nova_client, instance_ids, image_ids):
+    if len(instance_ids) != 0:
+        for instance_id in instance_ids:
+            if not os_utils.delete_instance(nova_client, instance_id):
+                logging.error('Fail to delete all instances. '
+                              'Instance with id {} was not deleted.'.
+                              format(instance_id))
+                return False
+
+    if len(image_ids) != 0:
+        for image_id in image_ids:
+            if not os_utils.delete_glance_image(nova_client, image_id):
+                logging.error('Fail to delete all images. '
+                              'Image with id {} was not deleted.'.
+                              format(image_id))
+                return False
+    return True
+
+
+def create_bgpvpn(neutron_client, **kwargs):
+    # route_distinguishers
+    # route_targets
+    json_body = {"bgpvpn": kwargs}
+    return neutron_client.create_bgpvpn(json_body)
+
+
+def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
+    json_body = {"bgpvpn": kwargs}
+    return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
+
+
+def delete_bgpvpn(neutron_client, bgpvpn_id):
+    return neutron_client.delete_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn(neutron_client, bgpvpn_id):
+    return neutron_client.show_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn_routers(neutron_client, bgpvpn_id):
+    return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
+
+
+def get_bgpvpn_networks(neutron_client, bgpvpn_id):
+    return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
+
+
+def create_router_association(neutron_client, bgpvpn_id, router_id):
+    json_body = {"router_association": {"router_id": router_id}}
+    return neutron_client.create_router_association(bgpvpn_id, json_body)
+
+
+def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
+    json_body = {"network_association": {"network_id": neutron_network_id}}
+    return neutron_client.create_network_association(bgpvpn_id, json_body)
+
+
+def is_fail_mode_secure():
+    """
+    Checks the value of the attribute fail_mode,
+    if it is set to secure. This check is performed
+    on all OVS br-int interfaces, for all OpenStack nodes.
+    """
+    is_secure = {}
+    openstack_nodes = get_nodes()
+    get_ovs_int_cmd = ("sudo ovs-vsctl show | "
+                       "grep -i bridge | "
+                       "awk '{print $2}'")
+    # Define OVS get fail_mode command
+    get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
+    for openstack_node in openstack_nodes:
+        if not openstack_node.is_active():
+            continue
+
+        ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
+                        strip().split('\n'))
+        if 'br-int' in ovs_int_list:
+            # Execute get fail_mode command
+            br_int_fail_mode = (openstack_node.
+                                run_cmd(get_ovs_fail_mode_cmd).strip())
+            if br_int_fail_mode == 'secure':
+                # success
+                is_secure[openstack_node.name] = True
+            else:
+                # failure
+                logging.error('The fail_mode for br-int was not secure '
+                              'in {} node'.format(openstack_node.name))
+                is_secure[openstack_node.name] = False
+    return is_secure