Fix error on querying neutron quota
[sdnvpn.git] / sdnvpn / lib / utils.py
index 371f3ed..9a5e181 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 #
 # Copyright (c) 2017 All rights reserved
 # This program and the accompanying materials
@@ -7,18 +7,21 @@
 #
 # http://www.apache.org/licenses/LICENSE-2.0
 #
+import json
 import logging
 import os
-import sys
 import time
 import requests
 import re
 import subprocess
+from concurrent.futures import ThreadPoolExecutor
+from openstack.exceptions import ResourceNotFound
+from requests.auth import HTTPBasicAuth
 
-import functest.utils.openstack_utils as os_utils
 from opnfv.deployment.factory import Factory as DeploymentFactory
 
 from sdnvpn.lib import config as sdnvpn_config
+import sdnvpn.lib.openstack_utils as os_utils
 
 logger = logging.getLogger('sdnvpn_test_utils')
 
@@ -27,6 +30,35 @@ common_config = sdnvpn_config.CommonConfig()
 ODL_USER = 'admin'
 ODL_PASS = 'admin'
 
+executor = ThreadPoolExecutor(5)
+
+
+class ExtraRoute(object):
+    """
+    Class to represent extra route for a router
+    """
+
+    def __init__(self, destination, nexthop):
+        self.destination = destination
+        self.nexthop = nexthop
+
+
+class AllowedAddressPair(object):
+    """
+    Class to represent allowed address pair for a neutron port
+    """
+
+    def __init__(self, ipaddress, macaddress):
+        self.ipaddress = ipaddress
+        self.macaddress = macaddress
+
+
+def create_default_flavor():
+    return os_utils.get_or_create_flavor(common_config.default_flavor,
+                                         common_config.default_flavor_ram,
+                                         common_config.default_flavor_disk,
+                                         common_config.default_flavor_vcpus)
+
 
 def create_custom_flavor():
     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
@@ -35,36 +67,38 @@ def create_custom_flavor():
                                          common_config.custom_flavor_vcpus)
 
 
-def create_net(neutron_client, name):
+def create_net(conn, name):
     logger.debug("Creating network %s", name)
-    net_id = os_utils.create_neutron_net(neutron_client, name)
+    net_id = os_utils.create_neutron_net(conn, name)
     if not net_id:
         logger.error(
             "There has been a problem when creating the neutron network")
-        sys.exit(-1)
+        raise Exception("There has been a problem when creating"
+                        " the neutron network {}".format(name))
     return net_id
 
 
-def create_subnet(neutron_client, name, cidr, net_id):
+def create_subnet(conn, name, cidr, net_id):
     logger.debug("Creating subnet %s in network %s with cidr %s",
                  name, net_id, cidr)
-    subnet_id = os_utils.create_neutron_subnet(neutron_client,
+    subnet_id = os_utils.create_neutron_subnet(conn,
                                                name,
                                                cidr,
                                                net_id)
     if not subnet_id:
         logger.error(
             "There has been a problem when creating the neutron subnet")
-        sys.exit(-1)
+        raise Exception("There has been a problem when creating"
+                        " the neutron subnet {}".format(name))
     return subnet_id
 
 
-def create_network(neutron_client, net, subnet1, cidr1,
+def create_network(conn, net, subnet1, cidr1,
                    router, subnet2=None, cidr2=None):
     """Network assoc won't work for networks/subnets created by this function.
     It is an ODL limitation due to it handling routers as vpns.
     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
-    network_dic = os_utils.create_network_full(neutron_client,
+    network_dic = os_utils.create_network_full(conn,
                                                net,
                                                subnet1,
                                                router,
@@ -72,7 +106,8 @@ def create_network(neutron_client, net, subnet1, cidr1,
     if not network_dic:
         logger.error(
             "There has been a problem when creating the neutron network")
-        sys.exit(-1)
+        raise Exception("There has been a problem when creating"
+                        " the neutron network {}".format(net))
     net_id = network_dic["net_id"]
     subnet_id = network_dic["subnet_id"]
     router_id = network_dic["router_id"]
@@ -80,25 +115,53 @@ def create_network(neutron_client, net, subnet1, cidr1,
     if subnet2 is not None:
         logger.debug("Creating and attaching a second subnet...")
         subnet_id = os_utils.create_neutron_subnet(
-            neutron_client, subnet2, cidr2, net_id)
+            conn, subnet2, cidr2, net_id)
         if not subnet_id:
             logger.error(
                 "There has been a problem when creating the second subnet")
-            sys.exit(-1)
+            raise Exception("There has been a problem when creating"
+                            " the second subnet {}".format(subnet2))
         logger.debug("Subnet '%s' created successfully" % subnet_id)
     return net_id, subnet_id, router_id
 
 
-def create_instance(nova_client,
+def get_port(conn, instance_id):
+    ports = os_utils.get_port_list(conn)
+    for port in ports:
+        if port.device_id == instance_id:
+            return port
+    return None
+
+
+def update_port_allowed_address_pairs(conn, port_id, address_pairs):
+    if len(address_pairs) <= 0:
+        return
+    allowed_address_pairs = []
+    for address_pair in address_pairs:
+        address_pair_dict = {'ip_address': address_pair.ipaddress,
+                             'mac_address': address_pair.macaddress}
+        allowed_address_pairs.append(address_pair_dict)
+
+    try:
+        port = conn.network.\
+            update_port(port_id, allowed_address_pairs=allowed_address_pairs)
+        return port.id
+    except Exception as e:
+        logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
+                     " %s" % (port_id, address_pairs, e))
+        return None
+
+
+def create_instance(conn,
                     name,
                     image_id,
                     network_id,
                     sg_id,
                     secgroup_name=None,
                     fixed_ip=None,
-                    compute_node='',
+                    compute_node=None,
                     userdata=None,
-                    files=None,
+                    files=[],
                     **kwargs
                     ):
     if 'flavor' not in kwargs:
@@ -124,12 +187,14 @@ def create_instance(nova_client,
 
     if instance is None:
         logger.error("Error while booting instance.")
-        sys.exit(-1)
+        raise Exception("Error while booting instance {}".format(name))
     else:
+        # Retrieve IP of INSTANCE
+        network_name = conn.network.get_network(network_id).name
+        instance_ip = conn.compute.get_server(instance).\
+            addresses.get(network_name)[0]['addr']
         logger.debug("Instance '%s' booted successfully. IP='%s'." %
-                     (name, instance.networks.itervalues().next()[0]))
-    # Retrieve IP of INSTANCE
-    # instance_ip = instance.networks.get(network_id)[0]
+                     (name, instance_ip))
 
     if secgroup_name:
         logger.debug("Adding '%s' to security group '%s'..."
@@ -137,7 +202,7 @@ def create_instance(nova_client,
     else:
         logger.debug("Adding '%s' to security group '%s'..."
                      % (name, sg_id))
-    os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+    os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
 
     return instance
 
@@ -208,6 +273,19 @@ def generate_userdata_with_ssh(ips_array):
     return (u1 + u2)
 
 
+def generate_userdata_interface_create(interface_name, interface_number,
+                                       ip_Address, net_mask):
+    return ("#!/bin/sh\n"
+            "set -xe\n"
+            "sudo useradd -m sdnvpn\n"
+            "sudo adduser sdnvpn sudo\n"
+            "sudo echo sdnvpn:opnfv | chpasswd\n"
+            "sleep 20\n"
+            "sudo ifconfig %s:%s %s netmask %s up\n"
+            % (interface_name, interface_number,
+               ip_Address, net_mask))
+
+
 def get_installerHandler():
     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
     installer_ip = get_installer_ip()
@@ -242,37 +320,74 @@ def get_installer_ip():
     return str(os.environ['INSTALLER_IP'])
 
 
-def get_instance_ip(instance):
-    instance_ip = instance.networks.itervalues().next()[0]
+def get_instance_ip(conn, instance):
+    instance_ip = conn.compute.get_server(instance).\
+        addresses.values()[0][0]['addr']
     return instance_ip
 
 
-def wait_for_instance(instance):
-    logger.info("Waiting for instance %s to get a DHCP lease and "
-                "prompt for login..." % instance.id)
-    # The sleep this function replaced waited for 80s
-    tries = 40
+def wait_for_instance(instance, pattern=".* login:", tries=40):
+    logger.info("Waiting for instance %s to boot up" % instance.id)
+    conn = os_utils.get_os_connection()
     sleep_time = 2
-    pattern = ".* login:"
     expected_regex = re.compile(pattern)
     console_log = ""
     while tries > 0 and not expected_regex.search(console_log):
-        console_log = instance.get_console_output()
+        console_log = conn.compute.\
+            get_server_console_output(instance)['output']
         time.sleep(sleep_time)
         tries -= 1
 
     if not expected_regex.search(console_log):
-        logger.error("Instance %s seems not to boot up properly."
+        logger.error("Instance %s does not boot up properly."
                      % instance.id)
         return False
     return True
 
 
-def wait_for_instances_up(*args):
-    check = [wait_for_instance(instance) for instance in args]
+def wait_for_instances_up(*instances):
+    check = [wait_for_instance(instance) for instance in instances]
+    return all(check)
+
+
+def wait_for_instances_get_dhcp(*instances):
+    check = [wait_for_instance(instance, "Lease of .* obtained")
+             for instance in instances]
     return all(check)
 
 
+def async_Wait_for_instances(instances, tries=40):
+    if len(instances) <= 0:
+        return
+    futures = []
+    for instance in instances:
+        future = executor.submit(wait_for_instance,
+                                 instance,
+                                 ".* login:",
+                                 tries)
+        futures.append(future)
+    results = []
+    for future in futures:
+        results.append(future.result())
+    if False in results:
+        logger.error("one or more instances is not yet booted up")
+
+
+def wait_for_instance_delete(conn, instance_id, tries=30):
+    sleep_time = 2
+    instances = [instance_id]
+    logger.debug("Waiting for instance %s to be deleted"
+                 % (instance_id))
+    while tries > 0 and instance_id in instances:
+        instances = [instance.id for instance in
+                     os_utils.get_instances(conn)]
+        time.sleep(sleep_time)
+        tries -= 1
+    if instance_id in instances:
+        logger.error("Deletion of instance %s failed" %
+                     (instance_id))
+
+
 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
     tries = 30
     sleep_time = 1
@@ -328,29 +443,31 @@ def wait_before_subtest(*args, **kwargs):
     time.sleep(30)
 
 
-def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+def assert_and_get_compute_nodes(conn, required_node_number=2):
     """Get the compute nodes in the deployment
     Exit if the deployment doesn't have enough compute nodes"""
-    compute_nodes = os_utils.get_hypervisors(nova_client)
+    compute_nodes = os_utils.get_hypervisors(conn)
 
     num_compute_nodes = len(compute_nodes)
     if num_compute_nodes < 2:
         logger.error("There are %s compute nodes in the deployment. "
                      "Minimum number of nodes to complete the test is 2."
                      % num_compute_nodes)
-        sys.exit(-1)
+        raise Exception("There are {} compute nodes in the deployment. "
+                        "Minimum number of nodes to complete the test"
+                        " is 2.".format(num_compute_nodes))
 
     logger.debug("Compute nodes: %s" % compute_nodes)
     return compute_nodes
 
 
-def open_icmp(neutron_client, security_group_id):
-    if os_utils.check_security_group_rules(neutron_client,
+def open_icmp(conn, security_group_id):
+    if os_utils.check_security_group_rules(conn,
                                            security_group_id,
                                            'ingress',
                                            'icmp'):
 
-        if not os_utils.create_secgroup_rule(neutron_client,
+        if not os_utils.create_secgroup_rule(conn,
                                              security_group_id,
                                              'ingress',
                                              'icmp'):
@@ -360,14 +477,14 @@ def open_icmp(neutron_client, security_group_id):
                     % security_group_id)
 
 
-def open_http_port(neutron_client, security_group_id):
-    if os_utils.check_security_group_rules(neutron_client,
+def open_http_port(conn, security_group_id):
+    if os_utils.check_security_group_rules(conn,
                                            security_group_id,
                                            'ingress',
                                            'tcp',
                                            80, 80):
 
-        if not os_utils.create_secgroup_rule(neutron_client,
+        if not os_utils.create_secgroup_rule(conn,
                                              security_group_id,
                                              'ingress',
                                              'tcp',
@@ -379,14 +496,14 @@ def open_http_port(neutron_client, security_group_id):
                     % security_group_id)
 
 
-def open_bgp_port(neutron_client, security_group_id):
-    if os_utils.check_security_group_rules(neutron_client,
+def open_bgp_port(conn, security_group_id):
+    if os_utils.check_security_group_rules(conn,
                                            security_group_id,
                                            'ingress',
                                            'tcp',
                                            179, 179):
 
-        if not os_utils.create_secgroup_rule(neutron_client,
+        if not os_utils.create_secgroup_rule(conn,
                                              security_group_id,
                                              'ingress',
                                              'tcp',
@@ -446,7 +563,7 @@ def run_odl_cmd(odl_node, cmd):
     return odl_node.run_cmd(karaf_cmd)
 
 
-def wait_for_cloud_init(instance):
+def wait_for_cloud_init(conn, instance):
     success = True
     # ubuntu images take a long time to start
     tries = 20
@@ -454,7 +571,8 @@ def wait_for_cloud_init(instance):
     logger.info("Waiting for cloud init of instance: {}"
                 "".format(instance.name))
     while tries > 0:
-        instance_log = instance.get_console_output()
+        instance_log = conn.compute.\
+            get_server_console_output(instance)['output']
         if "Failed to run module" in instance_log:
             success = False
             logger.error("Cloud init failed to run. Reason: %s",
@@ -477,7 +595,7 @@ def wait_for_cloud_init(instance):
 
 
 def attach_instance_to_ext_br(instance, compute_node):
-    libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+    libvirt_instance_name = instance.instance_name
     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
     if installer_type == "fuel":
         bridge = "br-ex"
@@ -506,7 +624,7 @@ def attach_instance_to_ext_br(instance, compute_node):
 
 
 def detach_instance_from_ext_br(instance, compute_node):
-    libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+    libvirt_instance_name = instance.instance_name
     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
                                "grep running | awk '{print $2}'); "
                                "do echo -n ; sudo virsh dumpxml $vm| "
@@ -536,15 +654,14 @@ def detach_instance_from_ext_br(instance, compute_node):
         compute_node.run_cmd(cmd.format(bridge=bridge))
 
 
-def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
-                    subnet_ids, router_ids, network_ids):
-
+def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
+                    interfaces, subnet_ids, router_ids, network_ids):
     if len(floatingip_ids) != 0:
         for floatingip_id in floatingip_ids:
-            if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
-                logging.error('Fail to delete all floating ips. '
-                              'Floating ip with id {} was not deleted.'.
-                              format(floatingip_id))
+            if not os_utils.delete_floating_ip(conn, floatingip_id):
+                logger.error('Fail to delete all floating ips. '
+                             'Floating ip with id {} was not deleted.'.
+                             format(floatingip_id))
                 return False
 
     if len(bgpvpn_ids) != 0:
@@ -553,60 +670,67 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
 
     if len(interfaces) != 0:
         for router_id, subnet_id in interfaces:
-            if not os_utils.remove_interface_router(neutron_client,
+            if not os_utils.remove_interface_router(conn,
                                                     router_id, subnet_id):
-                logging.error('Fail to delete all interface routers. '
-                              'Interface router with id {} was not deleted.'.
-                              format(router_id))
+                logger.error('Fail to delete all interface routers. '
+                             'Interface router with id {} was not deleted.'.
+                             format(router_id))
 
     if len(router_ids) != 0:
         for router_id in router_ids:
-            if not os_utils.remove_gateway_router(neutron_client, router_id):
-                logging.error('Fail to delete all gateway routers. '
-                              'Gateway router with id {} was not deleted.'.
-                              format(router_id))
+            if not os_utils.remove_gateway_router(conn, router_id):
+                logger.error('Fail to delete all gateway routers. '
+                             'Gateway router with id {} was not deleted.'.
+                             format(router_id))
 
     if len(subnet_ids) != 0:
         for subnet_id in subnet_ids:
-            if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
-                logging.error('Fail to delete all subnets. '
-                              'Subnet with id {} was not deleted.'.
-                              format(subnet_id))
+            if not os_utils.delete_neutron_subnet(conn, subnet_id):
+                logger.error('Fail to delete all subnets. '
+                             'Subnet with id {} was not deleted.'.
+                             format(subnet_id))
                 return False
 
     if len(router_ids) != 0:
         for router_id in router_ids:
-            if not os_utils.delete_neutron_router(neutron_client, router_id):
-                logging.error('Fail to delete all routers. '
-                              'Router with id {} was not deleted.'.
-                              format(router_id))
+            if not os_utils.delete_neutron_router(conn, router_id):
+                logger.error('Fail to delete all routers. '
+                             'Router with id {} was not deleted.'.
+                             format(router_id))
                 return False
 
     if len(network_ids) != 0:
         for network_id in network_ids:
-            if not os_utils.delete_neutron_net(neutron_client, network_id):
-                logging.error('Fail to delete all networks. '
-                              'Network with id {} was not deleted.'.
-                              format(network_id))
+            if not os_utils.delete_neutron_net(conn, network_id):
+                logger.error('Fail to delete all networks. '
+                             'Network with id {} was not deleted.'.
+                             format(network_id))
                 return False
     return True
 
 
-def cleanup_nova(nova_client, instance_ids, image_ids):
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
+    if flavor_ids is not None and len(flavor_ids) != 0:
+        for flavor_id in flavor_ids:
+            conn.compute.delete_flavor(flavor_id)
     if len(instance_ids) != 0:
         for instance_id in instance_ids:
-            if not os_utils.delete_instance(nova_client, instance_id):
-                logging.error('Fail to delete all instances. '
-                              'Instance with id {} was not deleted.'.
-                              format(instance_id))
-                return False
+            if not os_utils.delete_instance(conn, instance_id):
+                logger.error('Fail to delete all instances. '
+                             'Instance with id {} was not deleted.'.
+                             format(instance_id))
+            else:
+                wait_for_instance_delete(conn, instance_id)
+    return True
+
 
+def cleanup_glance(conn, image_ids):
     if len(image_ids) != 0:
         for image_id in image_ids:
-            if not os_utils.delete_glance_image(nova_client, image_id):
-                logging.error('Fail to delete all images. '
-                              'Image with id {} was not deleted.'.
-                              format(image_id))
+            if not os_utils.delete_glance_image(conn, image_id):
+                logger.error('Fail to delete all images. '
+                             'Image with id {} was not deleted.'.
+                             format(image_id))
                 return False
     return True
 
@@ -677,53 +801,189 @@ def is_fail_mode_secure():
                 is_secure[openstack_node.name] = True
             else:
                 # failure
-                logging.error('The fail_mode for br-int was not secure '
-                              'in {} node'.format(openstack_node.name))
+                logger.error('The fail_mode for br-int was not secure '
+                             'in {} node'.format(openstack_node.name))
                 is_secure[openstack_node.name] = False
     return is_secure
 
 
-def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
-                                subnet_quota, port_quota):
-    json_body = {"quota": {
-        "network": nw_quota,
-        "subnet": subnet_quota,
-        "port": port_quota
-    }}
-
+def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
+                                subnet_quota, port_quota, router_quota):
     try:
-        neutron_client.update_quota(tenant_id=tenant_id,
-                                    body=json_body)
+        conn.network.update_quota(tenant_id, networks=nw_quota,
+                                  subnets=subnet_quota, ports=port_quota,
+                                  routers=router_quota)
         return True
     except Exception as e:
-        logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
-                     " '%s', '%s', '%s', '%s')]: %s" %
-                     (tenant_id, nw_quota, subnet_quota, port_quota, e))
+        logger.error("Error [update_nw_subnet_port_quota(network,"
+                     " '%s', '%s', '%s', '%s, %s')]: %s" %
+                     (tenant_id, nw_quota, subnet_quota,
+                      port_quota, router_quota, e))
         return False
 
 
-def update_instance_quota_class(nova_client, instances_quota):
+def update_instance_quota_class(cloud, instances_quota):
     try:
-        nova_client.quota_classes.update("default", instances=instances_quota)
+        cloud.set_compute_quotas('admin', instances=instances_quota)
         return True
     except Exception as e:
-        logger.error("Error [update_instance_quota_class(nova_client,"
+        logger.error("Error [update_instance_quota_class(compute,"
                      " '%s' )]: %s" % (instances_quota, e))
         return False
 
 
-def get_neutron_quota(neutron_client, tenant_id):
+def get_neutron_quota(conn, tenant_id):
     try:
-        return neutron_client.show_quota(tenant_id=tenant_id)['quota']
-    except Exception as e:
-        logger.error("Error in getting neutron quota for tenant "
+        return conn.network.get_quota(tenant_id)
+    except ResourceNotFound as e:
+        logger.error("Error in getting network quota for tenant "
                      " '%s' )]: %s" % (tenant_id, e))
         raise
 
 
-def get_nova_instances_quota(nova_client):
+def get_nova_instances_quota(cloud):
     try:
-        return nova_client.quota_classes.get("default").instances
+        return cloud.get_compute_quotas('admin').instances
     except Exception as e:
         logger.error("Error in getting nova instances quota: %s" % e)
         raise
+
+
+def update_router_extra_route(conn, router_id, extra_routes):
+    if len(extra_routes) <= 0:
+        return
+    routes_list = []
+    for extra_route in extra_routes:
+        route_dict = {'destination': extra_route.destination,
+                      'nexthop': extra_route.nexthop}
+        routes_list.append(route_dict)
+
+    try:
+        conn.network.update_router(router_id, routes=routes_list)
+        return True
+    except Exception as e:
+        logger.error("Error in updating router with extra route: %s" % e)
+        raise
+
+
+def update_router_no_extra_route(conn, router_ids):
+    for router_id in router_ids:
+        try:
+            conn.network.update_router(router_id, routes=[])
+            return True
+        except Exception as e:
+            logger.error("Error in clearing extra route: %s" % e)
+
+
+def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+    """
+    Gets, as input, a list of compute nodes and a list of OVS bridges
+    and returns the command console output, as a list of lines, that
+    contains all the OVS groups from all bridges and nodes in lists.
+    """
+    cmd_out_lines = []
+    for compute_node in compute_node_list:
+        for ovs_br in ovs_br_list:
+            if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+                ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
+                                  "grep group".format(ovs_br, of_protocol))
+                cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
+                                  split("\n"))
+    return cmd_out_lines
+
+
+def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+    """
+    Gets, as input, a list of compute nodes and a list of OVS bridges
+    and returns the command console output, as a list of lines, that
+    contains all the OVS flows from all bridges and nodes in lists.
+    """
+    cmd_out_lines = []
+    for compute_node in compute_node_list:
+        for ovs_br in ovs_br_list:
+            if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+                ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
+                                 "grep table=".format(ovs_br, of_protocol))
+                cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
+                                  split("\n"))
+    return cmd_out_lines
+
+
+def get_odl_bgp_entity_owner(controllers):
+    """ Finds the ODL owner of the BGP entity in the cluster.
+
+    When ODL runs in clustering mode we need to execute the BGP speaker
+    related commands to that ODL which is the owner of the BGP entity.
+
+    :param controllers: list of OS controllers
+    :return controller: OS controller in which ODL BGP entity owner runs
+    """
+    if len(controllers) == 1:
+        return controllers[0]
+    else:
+        url = ('http://admin:admin@{ip}:8081/restconf/'
+               'operational/entity-owners:entity-owners/entity-type/bgp'
+               .format(ip=controllers[0].ip))
+
+        remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
+                                'initial/akka.conf')
+        remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
+        local_tmp_akka_conf = '/tmp/akka.conf'
+        try:
+            json_output = requests.get(url).json()
+        except Exception:
+            logger.error('Failed to find the ODL BGP '
+                         'entity owner through REST')
+            return None
+        odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
+
+        for controller in controllers:
+
+            controller.run_cmd('sudo cp {0} /home/heat-admin/'
+                               .format(remote_odl_akka_conf))
+            controller.run_cmd('sudo chmod 777 {0}'
+                               .format(remote_odl_home_akka_conf))
+            controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+
+            for line in open(local_tmp_akka_conf):
+                if re.search(odl_bgp_owner, line):
+                    return controller
+        return None
+
+
+def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
+    json_body = {'input':
+                 {'destination-ip': remote_tep_ip,
+                  'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
+                 }
+    url = ('http://{ip}:8081/restconf/operations/'
+           'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
+    headers = {'Content-type': 'application/yang.data+json',
+               'Accept': 'application/yang.data+json'}
+    try:
+        requests.post(url, data=json.dumps(json_body),
+                      headers=headers,
+                      auth=HTTPBasicAuth('admin', 'admin'))
+    except Exception as e:
+        logger.error("Failed to create external tunnel endpoint on"
+                     " ODL for external tep ip %s with error %s"
+                     % (remote_tep_ip, e))
+    return None
+
+
+def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
+    url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
+           'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
+    logger.error("url is %s" % url)
+    try:
+        vrf_table = requests.get(url).json()
+        is_ipprefix_exists = False
+        for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
+            if vrf_entry['destPrefix'] == ip_prefix:
+                is_ipprefix_exists = True
+                break
+        return is_ipprefix_exists
+    except Exception as e:
+        logger.error('Failed to find ip prefix %s with error %s'
+                     % (ip_prefix, e))
+    return False