Merge "Fix instance boot when metadata exists"
[sdnvpn.git] / sdnvpn / lib / utils.py
index b551954..67b75d0 100644 (file)
@@ -1,25 +1,39 @@
 #!/usr/bin/python
 #
-# Copyright (c) 2015 All rights reserved
+# Copyright (c) 2017 All rights reserved
 # This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 #
 # http://www.apache.org/licenses/LICENSE-2.0
 #
+import logging
+import os
 import sys
 import time
+import requests
+import re
+import subprocess
 
-import functest.utils.functest_logger as ft_logger
 import functest.utils.openstack_utils as os_utils
-import re
+from opnfv.deployment.factory import Factory as DeploymentFactory
 
 from sdnvpn.lib import config as sdnvpn_config
 
-logger = ft_logger.Logger("sndvpn_test_utils").getLogger()
+logger = logging.getLogger('sdnvpn_test_utils')
 
 common_config = sdnvpn_config.CommonConfig()
 
+ODL_USER = 'admin'
+ODL_PASS = 'admin'
+
+
+def create_custom_flavor():
+    return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
+                                         common_config.custom_flavor_ram,
+                                         common_config.custom_flavor_disk,
+                                         common_config.custom_flavor_vcpus)
+
 
 def create_net(neutron_client, name):
     logger.debug("Creating network %s", name)
@@ -47,8 +61,7 @@ def create_subnet(neutron_client, name, cidr, net_id):
 
 def create_network(neutron_client, net, subnet1, cidr1,
                    router, subnet2=None, cidr2=None):
-    """Network assoc will not work for networks/subnets created by this function.
-
+    """Network assoc won't work for networks/subnets created by this function.
     It is an ODL limitation due to it handling routers as vpns.
     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
     network_dic = os_utils.create_network_full(neutron_client,
@@ -129,7 +142,7 @@ def create_instance(nova_client,
     return instance
 
 
-def generate_ping_userdata(ips_array):
+def generate_ping_userdata(ips_array, ping_count=10):
     ips = ""
     for ip in ips_array:
         ips = ("%s %s" % (ips, ip))
@@ -140,7 +153,7 @@ def generate_ping_userdata(ips_array):
             "while true; do\n"
             " for i do\n"
             "  ip=$i\n"
-            "  ping -c 1 $ip 2>&1 >/dev/null\n"
+            "  ping -c %s $ip 2>&1 >/dev/null\n"
             "  RES=$?\n"
             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
             "   echo ping $ip OK\n"
@@ -149,7 +162,7 @@ def generate_ping_userdata(ips_array):
             " done\n"
             " sleep 1\n"
             "done\n"
-            % ips)
+            % (ips, ping_count))
 
 
 def generate_userdata_common():
@@ -195,12 +208,49 @@ def generate_userdata_with_ssh(ips_array):
     return (u1 + u2)
 
 
-def wait_for_instance(instance):
-    logger.info("Waiting for instance %s to get a DHCP lease..." % instance.id)
-    # The sleep this function replaced waited for 80s
+def get_installerHandler():
+    installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+    installer_ip = get_installer_ip()
+
+    if installer_type not in ["fuel", "apex"]:
+        logger.warn("installer type %s is neither fuel nor apex."
+                    "returning None for installer handler" % installer_type)
+        return None
+    else:
+        if installer_type in ["apex"]:
+            developHandler = DeploymentFactory.get_handler(
+                installer_type,
+                installer_ip,
+                'root',
+                pkey_file="/root/.ssh/id_rsa")
+
+        if installer_type in ["fuel"]:
+            developHandler = DeploymentFactory.get_handler(
+                installer_type,
+                installer_ip,
+                'root',
+                'r00tme')
+        return developHandler
+
+
+def get_nodes():
+    developHandler = get_installerHandler()
+    return developHandler.get_nodes()
+
+
+def get_installer_ip():
+    return str(os.environ['INSTALLER_IP'])
+
+
+def get_instance_ip(instance):
+    instance_ip = instance.networks.itervalues().next()[0]
+    return instance_ip
+
+
+def wait_for_instance(instance, pattern=".* login:"):
+    logger.info("Waiting for instance %s to boot up" % instance.id)
     tries = 40
     sleep_time = 2
-    pattern = "Lease of .* obtained, lease time"
     expected_regex = re.compile(pattern)
     console_log = ""
     while tries > 0 and not expected_regex.search(console_log):
@@ -209,14 +259,20 @@ def wait_for_instance(instance):
         tries -= 1
 
     if not expected_regex.search(console_log):
-        logger.error("Instance %s seems to have failed leasing an IP."
+        logger.error("Instance %s does not boot up properly."
                      % instance.id)
         return False
     return True
 
 
-def wait_for_instances_up(*args):
-    check = [wait_for_instance(instance) for instance in args]
+def wait_for_instances_up(*instances):
+    check = [wait_for_instance(instance) for instance in instances]
+    return all(check)
+
+
+def wait_for_instances_get_dhcp(*instances):
+    check = [wait_for_instance(instance, "Lease of .* obtained")
+             for instance in instances]
     return all(check)
 
 
@@ -228,7 +284,7 @@ def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
                  % (bgpvpn_id, net_id))
 
     while tries > 0 and net_id not in nets:
-        nets = os_utils.get_bgpvpn_networks(neutron_client, bgpvpn_id)
+        nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
         time.sleep(sleep_time)
         tries -= 1
     if net_id not in nets:
@@ -252,7 +308,7 @@ def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
     logger.debug("Waiting for router %s to associate with BGPVPN %s "
                  % (bgpvpn_id, router_id))
     while tries > 0 and router_id not in routers:
-        routers = os_utils.get_bgpvpn_routers(neutron_client, bgpvpn_id)
+        routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
         time.sleep(sleep_time)
         tries -= 1
     if router_id not in routers:
@@ -277,7 +333,6 @@ def wait_before_subtest(*args, **kwargs):
 
 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
     """Get the compute nodes in the deployment
-
     Exit if the deployment doesn't have enough compute nodes"""
     compute_nodes = os_utils.get_hypervisors(nova_client)
 
@@ -292,12 +347,406 @@ def assert_and_get_compute_nodes(nova_client, required_node_number=2):
     return compute_nodes
 
 
-def open_icmp_ssh(neutron_client, security_group_id):
-    os_utils.create_secgroup_rule(neutron_client,
-                                  security_group_id,
-                                  'ingress',
-                                  'icmp')
-    os_utils.create_secgroup_rule(neutron_client,
-                                  security_group_id,
-                                  'tcp',
-                                  80, 80)
+def open_icmp(neutron_client, security_group_id):
+    if os_utils.check_security_group_rules(neutron_client,
+                                           security_group_id,
+                                           'ingress',
+                                           'icmp'):
+
+        if not os_utils.create_secgroup_rule(neutron_client,
+                                             security_group_id,
+                                             'ingress',
+                                             'icmp'):
+            logger.error("Failed to create icmp security group rule...")
+    else:
+        logger.info("This rule exists for security group: %s"
+                    % security_group_id)
+
+
+def open_http_port(neutron_client, security_group_id):
+    if os_utils.check_security_group_rules(neutron_client,
+                                           security_group_id,
+                                           'ingress',
+                                           'tcp',
+                                           80, 80):
+
+        if not os_utils.create_secgroup_rule(neutron_client,
+                                             security_group_id,
+                                             'ingress',
+                                             'tcp',
+                                             80, 80):
+
+            logger.error("Failed to create http security group rule...")
+    else:
+        logger.info("This rule exists for security group: %s"
+                    % security_group_id)
+
+
+def open_bgp_port(neutron_client, security_group_id):
+    if os_utils.check_security_group_rules(neutron_client,
+                                           security_group_id,
+                                           'ingress',
+                                           'tcp',
+                                           179, 179):
+
+        if not os_utils.create_secgroup_rule(neutron_client,
+                                             security_group_id,
+                                             'ingress',
+                                             'tcp',
+                                             179, 179):
+            logger.error("Failed to create bgp security group rule...")
+    else:
+        logger.info("This rule exists for security group: %s"
+                    % security_group_id)
+
+
+def exec_cmd(cmd, verbose):
+    success = True
+    logger.debug("Executing '%s'" % cmd)
+    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+                         stderr=subprocess.STDOUT)
+    output = ""
+    for line in iter(p.stdout.readline, b''):
+        output += line
+
+    if verbose:
+        logger.debug(output)
+
+    p.stdout.close()
+    returncode = p.wait()
+    if returncode != 0:
+        logger.error("Command %s failed to execute." % cmd)
+        success = False
+
+    return output, success
+
+
+def check_odl_fib(ip, controller_ip):
+    """Check that there is an entry in the ODL Fib for `ip`"""
+    url = "http://" + controller_ip + \
+          ":8181/restconf/config/odl-fib:fibEntries/"
+    logger.debug("Querring '%s' for FIB entries", url)
+    res = requests.get(url, auth=(ODL_USER, ODL_PASS))
+    if res.status_code != 200:
+        logger.error("OpenDaylight response status code: %s", res.status_code)
+        return False
+    logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
+                 % controller_ip)
+    logger.debug("OpenDaylight FIB: \n%s" % res.text)
+    return ip in res.text
+
+
+def run_odl_cmd(odl_node, cmd):
+    '''Run a command in the OpenDaylight Karaf shell
+    This is a bit flimsy because of shell quote escaping, make sure that
+    the cmd passed does not have any top level double quotes or this
+    function will break.
+    The /dev/null is used because client works, but outputs something
+    that contains "ERROR" and run_cmd doesn't like that.
+    '''
+    karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
+                 ' 2>/dev/null' % cmd)
+    return odl_node.run_cmd(karaf_cmd)
+
+
+def wait_for_cloud_init(instance):
+    success = True
+    # ubuntu images take a long time to start
+    tries = 20
+    sleep_time = 30
+    logger.info("Waiting for cloud init of instance: {}"
+                "".format(instance.name))
+    while tries > 0:
+        instance_log = instance.get_console_output()
+        if "Failed to run module" in instance_log:
+            success = False
+            logger.error("Cloud init failed to run. Reason: %s",
+                         instance_log)
+            break
+        if re.search(r"Cloud-init v. .+ finished at", instance_log):
+            success = True
+            break
+        time.sleep(sleep_time)
+        tries = tries - 1
+
+    if tries == 0:
+        logger.error("Cloud init timed out"
+                     ". Reason: %s",
+                     instance_log)
+        success = False
+    logger.info("Finished waiting for cloud init of instance {} result was {}"
+                "".format(instance.name, success))
+    return success
+
+
+def attach_instance_to_ext_br(instance, compute_node):
+    libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+    installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+    if installer_type == "fuel":
+        bridge = "br-ex"
+    elif installer_type == "apex":
+        # In Apex, br-ex is an ovs bridge and virsh attach-interface
+        # won't just work. We work around it by creating a linux
+        # bridge, attaching that to br-ex with a veth pair
+        # and virsh-attaching the instance to the linux-bridge
+        bridge = "br-quagga"
+        cmd = """
+        set -e
+        if ! sudo brctl show |grep -q ^{bridge};then
+          sudo brctl addbr {bridge}
+          sudo ip link set {bridge} up
+          sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+          sudo ip link set dev ovs-quagga-tap up
+          sudo ip link set dev quagga-tap up
+          sudo ovs-vsctl add-port br-ex ovs-quagga-tap
+          sudo brctl addif {bridge} quagga-tap
+        fi
+        """
+        compute_node.run_cmd(cmd.format(bridge=bridge))
+
+    compute_node.run_cmd("sudo virsh attach-interface %s"
+                         " bridge %s" % (libvirt_instance_name, bridge))
+
+
+def detach_instance_from_ext_br(instance, compute_node):
+    libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+    mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
+                               "grep running | awk '{print $2}'); "
+                               "do echo -n ; sudo virsh dumpxml $vm| "
+                               "grep -oP '52:54:[\da-f:]+' ;done")
+    compute_node.run_cmd("sudo virsh detach-interface --domain %s"
+                         " --type bridge --mac %s"
+                         % (libvirt_instance_name, mac))
+
+    installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+    if installer_type == "fuel":
+        bridge = "br-ex"
+    elif installer_type == "apex":
+        # In Apex, br-ex is an ovs bridge and virsh attach-interface
+        # won't just work. We work around it by creating a linux
+        # bridge, attaching that to br-ex with a veth pair
+        # and virsh-attaching the instance to the linux-bridge
+        bridge = "br-quagga"
+        cmd = """
+            sudo brctl delif {bridge} quagga-tap &&
+            sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
+            sudo ip link set dev quagga-tap down &&
+            sudo ip link set dev ovs-quagga-tap down &&
+            sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+            sudo ip link set {bridge} down &&
+            sudo brctl delbr {bridge}
+        """
+        compute_node.run_cmd(cmd.format(bridge=bridge))
+
+
+def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
+                    subnet_ids, router_ids, network_ids):
+
+    if len(floatingip_ids) != 0:
+        for floatingip_id in floatingip_ids:
+            if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
+                logging.error('Fail to delete all floating ips. '
+                              'Floating ip with id {} was not deleted.'.
+                              format(floatingip_id))
+                return False
+
+    if len(bgpvpn_ids) != 0:
+        for bgpvpn_id in bgpvpn_ids:
+            delete_bgpvpn(neutron_client, bgpvpn_id)
+
+    if len(interfaces) != 0:
+        for router_id, subnet_id in interfaces:
+            if not os_utils.remove_interface_router(neutron_client,
+                                                    router_id, subnet_id):
+                logging.error('Fail to delete all interface routers. '
+                              'Interface router with id {} was not deleted.'.
+                              format(router_id))
+
+    if len(router_ids) != 0:
+        for router_id in router_ids:
+            if not os_utils.remove_gateway_router(neutron_client, router_id):
+                logging.error('Fail to delete all gateway routers. '
+                              'Gateway router with id {} was not deleted.'.
+                              format(router_id))
+
+    if len(subnet_ids) != 0:
+        for subnet_id in subnet_ids:
+            if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
+                logging.error('Fail to delete all subnets. '
+                              'Subnet with id {} was not deleted.'.
+                              format(subnet_id))
+                return False
+
+    if len(router_ids) != 0:
+        for router_id in router_ids:
+            if not os_utils.delete_neutron_router(neutron_client, router_id):
+                logging.error('Fail to delete all routers. '
+                              'Router with id {} was not deleted.'.
+                              format(router_id))
+                return False
+
+    if len(network_ids) != 0:
+        for network_id in network_ids:
+            if not os_utils.delete_neutron_net(neutron_client, network_id):
+                logging.error('Fail to delete all networks. '
+                              'Network with id {} was not deleted.'.
+                              format(network_id))
+                return False
+    return True
+
+
+def cleanup_nova(nova_client, instance_ids):
+    if len(instance_ids) != 0:
+        for instance_id in instance_ids:
+            if not os_utils.delete_instance(nova_client, instance_id):
+                logging.error('Fail to delete all instances. '
+                              'Instance with id {} was not deleted.'.
+                              format(instance_id))
+                return False
+    return True
+
+
+def cleanup_glance(glance_client, image_ids):
+    if len(image_ids) != 0:
+        for image_id in image_ids:
+            if not os_utils.delete_glance_image(glance_client, image_id):
+                logging.error('Fail to delete all images. '
+                              'Image with id {} was not deleted.'.
+                              format(image_id))
+                return False
+    return True
+
+
+def create_bgpvpn(neutron_client, **kwargs):
+    # route_distinguishers
+    # route_targets
+    json_body = {"bgpvpn": kwargs}
+    return neutron_client.create_bgpvpn(json_body)
+
+
+def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
+    json_body = {"bgpvpn": kwargs}
+    return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
+
+
+def delete_bgpvpn(neutron_client, bgpvpn_id):
+    return neutron_client.delete_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn(neutron_client, bgpvpn_id):
+    return neutron_client.show_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn_routers(neutron_client, bgpvpn_id):
+    return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
+
+
+def get_bgpvpn_networks(neutron_client, bgpvpn_id):
+    return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
+
+
+def create_router_association(neutron_client, bgpvpn_id, router_id):
+    json_body = {"router_association": {"router_id": router_id}}
+    return neutron_client.create_router_association(bgpvpn_id, json_body)
+
+
+def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
+    json_body = {"network_association": {"network_id": neutron_network_id}}
+    return neutron_client.create_network_association(bgpvpn_id, json_body)
+
+
+def is_fail_mode_secure():
+    """
+    Checks the value of the attribute fail_mode,
+    if it is set to secure. This check is performed
+    on all OVS br-int interfaces, for all OpenStack nodes.
+    """
+    is_secure = {}
+    openstack_nodes = get_nodes()
+    get_ovs_int_cmd = ("sudo ovs-vsctl show | "
+                       "grep -i bridge | "
+                       "awk '{print $2}'")
+    # Define OVS get fail_mode command
+    get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
+    for openstack_node in openstack_nodes:
+        if not openstack_node.is_active():
+            continue
+
+        ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
+                        strip().split('\n'))
+        if 'br-int' in ovs_int_list:
+            # Execute get fail_mode command
+            br_int_fail_mode = (openstack_node.
+                                run_cmd(get_ovs_fail_mode_cmd).strip())
+            if br_int_fail_mode == 'secure':
+                # success
+                is_secure[openstack_node.name] = True
+            else:
+                # failure
+                logging.error('The fail_mode for br-int was not secure '
+                              'in {} node'.format(openstack_node.name))
+                is_secure[openstack_node.name] = False
+    return is_secure
+
+
+def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
+                                subnet_quota, port_quota):
+    json_body = {"quota": {
+        "network": nw_quota,
+        "subnet": subnet_quota,
+        "port": port_quota
+    }}
+
+    try:
+        neutron_client.update_quota(tenant_id=tenant_id,
+                                    body=json_body)
+        return True
+    except Exception as e:
+        logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
+                     " '%s', '%s', '%s', '%s')]: %s" %
+                     (tenant_id, nw_quota, subnet_quota, port_quota, e))
+        return False
+
+
+def update_instance_quota_class(nova_client, instances_quota):
+    try:
+        nova_client.quota_classes.update("default", instances=instances_quota)
+        return True
+    except Exception as e:
+        logger.error("Error [update_instance_quota_class(nova_client,"
+                     " '%s' )]: %s" % (instances_quota, e))
+        return False
+
+
+def get_neutron_quota(neutron_client, tenant_id):
+    try:
+        return neutron_client.show_quota(tenant_id=tenant_id)['quota']
+    except Exception as e:
+        logger.error("Error in getting neutron quota for tenant "
+                     " '%s' )]: %s" % (tenant_id, e))
+        raise
+
+
+def get_nova_instances_quota(nova_client):
+    try:
+        return nova_client.quota_classes.get("default").instances
+    except Exception as e:
+        logger.error("Error in getting nova instances quota: %s" % e)
+        raise
+
+
+def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+    """
+    Gets, as input, a list of compute nodes and a list of OVS bridges
+    and returns the command console output, as a list of lines, that
+    contains all the OVS groups from all bridges and nodes in lists.
+    """
+    cmd_out_lines = []
+    for compute_node in compute_node_list:
+        for ovs_br in ovs_br_list:
+            if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+                ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
+                                  "grep group".format(ovs_br, of_protocol))
+                cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
+                                  split("\n"))
+    return cmd_out_lines