-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
#
# http://www.apache.org/licenses/LICENSE-2.0
#
+import json
import logging
import os
-import sys
import time
import requests
import re
import subprocess
+from concurrent.futures import ThreadPoolExecutor
+from openstack.exceptions import ResourceNotFound
+from requests.auth import HTTPBasicAuth
-import functest.utils.openstack_utils as os_utils
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
+import sdnvpn.lib.openstack_utils as os_utils
-logger = logging.getLogger('sndvpn_test_utils')
+logger = logging.getLogger('sdnvpn_test_utils')
common_config = sdnvpn_config.CommonConfig()
ODL_USER = 'admin'
ODL_PASS = 'admin'
+executor = ThreadPoolExecutor(5)
+
+
+class ExtraRoute(object):
+ """
+ Class to represent extra route for a router
+ """
+
+ def __init__(self, destination, nexthop):
+ self.destination = destination
+ self.nexthop = nexthop
+
+
+class AllowedAddressPair(object):
+ """
+ Class to represent allowed address pair for a neutron port
+ """
+
+ def __init__(self, ipaddress, macaddress):
+ self.ipaddress = ipaddress
+ self.macaddress = macaddress
+
+
+def create_default_flavor():
+ return os_utils.get_or_create_flavor(common_config.default_flavor,
+ common_config.default_flavor_ram,
+ common_config.default_flavor_disk,
+ common_config.default_flavor_vcpus)
+
def create_custom_flavor():
return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
common_config.custom_flavor_vcpus)
-def create_net(neutron_client, name):
+def create_net(conn, name):
logger.debug("Creating network %s", name)
- net_id = os_utils.create_neutron_net(neutron_client, name)
+ net_id = os_utils.create_neutron_net(conn, name)
if not net_id:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(name))
return net_id
-def create_subnet(neutron_client, name, cidr, net_id):
+def create_subnet(conn, name, cidr, net_id):
logger.debug("Creating subnet %s in network %s with cidr %s",
name, net_id, cidr)
- subnet_id = os_utils.create_neutron_subnet(neutron_client,
+ subnet_id = os_utils.create_neutron_subnet(conn,
name,
cidr,
net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the neutron subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron subnet {}".format(name))
return subnet_id
-def create_network(neutron_client, net, subnet1, cidr1,
+def create_network(conn, net, subnet1, cidr1,
router, subnet2=None, cidr2=None):
"""Network assoc won't work for networks/subnets created by this function.
-
It is an ODL limitation due to it handling routers as vpns.
See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
- network_dic = os_utils.create_network_full(neutron_client,
+ network_dic = os_utils.create_network_full(conn,
net,
subnet1,
router,
if not network_dic:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(net))
net_id = network_dic["net_id"]
subnet_id = network_dic["subnet_id"]
router_id = network_dic["router_id"]
if subnet2 is not None:
logger.debug("Creating and attaching a second subnet...")
subnet_id = os_utils.create_neutron_subnet(
- neutron_client, subnet2, cidr2, net_id)
+ conn, subnet2, cidr2, net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the second subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the second subnet {}".format(subnet2))
logger.debug("Subnet '%s' created successfully" % subnet_id)
return net_id, subnet_id, router_id
-def create_instance(nova_client,
+def get_port(conn, instance_id):
+ ports = os_utils.get_port_list(conn)
+ for port in ports:
+ if port.device_id == instance_id:
+ return port
+ return None
+
+
+def update_port_allowed_address_pairs(conn, port_id, address_pairs):
+ if len(address_pairs) <= 0:
+ return
+ allowed_address_pairs = []
+ for address_pair in address_pairs:
+ address_pair_dict = {'ip_address': address_pair.ipaddress,
+ 'mac_address': address_pair.macaddress}
+ allowed_address_pairs.append(address_pair_dict)
+
+ try:
+ port = conn.network.\
+ update_port(port_id, allowed_address_pairs=allowed_address_pairs)
+ return port.id
+ except Exception as e:
+ logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
+ " %s" % (port_id, address_pairs, e))
+ return None
+
+
+def create_instance(conn,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
- compute_node='',
+ compute_node=None,
userdata=None,
- files=None,
+ files=[],
**kwargs
):
if 'flavor' not in kwargs:
if instance is None:
logger.error("Error while booting instance.")
- sys.exit(-1)
+ raise Exception("Error while booting instance {}".format(name))
else:
+ # Retrieve IP of INSTANCE
+ network_name = conn.network.get_network(network_id).name
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.get(network_name)[0]['addr']
logger.debug("Instance '%s' booted successfully. IP='%s'." %
- (name, instance.networks.itervalues().next()[0]))
- # Retrieve IP of INSTANCE
- # instance_ip = instance.networks.get(network_id)[0]
+ (name, instance_ip))
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+ os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
return instance
-def generate_ping_userdata(ips_array):
+def generate_ping_userdata(ips_array, ping_count=10):
ips = ""
for ip in ips_array:
ips = ("%s %s" % (ips, ip))
"while true; do\n"
" for i do\n"
" ip=$i\n"
- " ping -c 10 $ip 2>&1 >/dev/null\n"
+ " ping -c %s $ip 2>&1 >/dev/null\n"
" RES=$?\n"
" if [ \"Z$RES\" = \"Z0\" ] ; then\n"
" echo ping $ip OK\n"
" done\n"
" sleep 1\n"
"done\n"
- % ips)
+ % (ips, ping_count))
def generate_userdata_common():
return (u1 + u2)
+def generate_userdata_interface_create(interface_name, interface_number,
+ ip_Address, net_mask):
+ return ("#!/bin/sh\n"
+ "set -xe\n"
+ "sudo useradd -m sdnvpn\n"
+ "sudo adduser sdnvpn sudo\n"
+ "sudo echo sdnvpn:opnfv | chpasswd\n"
+ "sleep 20\n"
+ "sudo ifconfig %s:%s %s netmask %s up\n"
+ % (interface_name, interface_number,
+ ip_Address, net_mask))
+
+
def get_installerHandler():
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
installer_ip = get_installer_ip()
if installer_type not in ["fuel", "apex"]:
- raise ValueError("%s is not supported" % installer_type)
+ logger.warn("installer type %s is neither fuel nor apex."
+ "returning None for installer handler" % installer_type)
+ return None
else:
if installer_type in ["apex"]:
developHandler = DeploymentFactory.get_handler(
return str(os.environ['INSTALLER_IP'])
-def get_instance_ip(instance):
- instance_ip = instance.networks.itervalues().next()[0]
+def get_instance_ip(conn, instance):
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.values()[0][0]['addr']
return instance_ip
-def wait_for_instance(instance):
- logger.info("Waiting for instance %s to get a DHCP lease..." % instance.id)
- # The sleep this function replaced waited for 80s
- tries = 40
+def wait_for_instance(instance, pattern=".* login:", tries=40):
+ logger.info("Waiting for instance %s to boot up" % instance.id)
+ conn = os_utils.get_os_connection()
sleep_time = 2
- pattern = "Lease of .* obtained, lease time"
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
- console_log = instance.get_console_output()
+ console_log = conn.compute.\
+ get_server_console_output(instance)['output']
time.sleep(sleep_time)
tries -= 1
if not expected_regex.search(console_log):
- logger.error("Instance %s seems to have failed leasing an IP."
+ logger.error("Instance %s does not boot up properly."
% instance.id)
return False
return True
-def wait_for_instances_up(*args):
- check = [wait_for_instance(instance) for instance in args]
+def wait_for_instances_up(*instances):
+ check = [wait_for_instance(instance) for instance in instances]
return all(check)
+def wait_for_instances_get_dhcp(*instances):
+ check = [wait_for_instance(instance, "Lease of .* obtained")
+ for instance in instances]
+ return all(check)
+
+
+def async_Wait_for_instances(instances, tries=40):
+ if len(instances) <= 0:
+ return
+ futures = []
+ for instance in instances:
+ future = executor.submit(wait_for_instance,
+ instance,
+ ".* login:",
+ tries)
+ futures.append(future)
+ results = []
+ for future in futures:
+ results.append(future.result())
+ if False in results:
+ logger.error("one or more instances is not yet booted up")
+
+
+def wait_for_instance_delete(conn, instance_id, tries=30):
+ sleep_time = 2
+ instances = [instance_id]
+ logger.debug("Waiting for instance %s to be deleted"
+ % (instance_id))
+ while tries > 0 and instance_id in instances:
+ instances = [instance.id for instance in
+ os_utils.get_instances(conn)]
+ time.sleep(sleep_time)
+ tries -= 1
+ if instance_id in instances:
+ logger.error("Deletion of instance %s failed" %
+ (instance_id))
+
+
def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
tries = 30
sleep_time = 1
% (bgpvpn_id, net_id))
while tries > 0 and net_id not in nets:
- nets = os_utils.get_bgpvpn_networks(neutron_client, bgpvpn_id)
+ nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
time.sleep(sleep_time)
tries -= 1
if net_id not in nets:
logger.debug("Waiting for router %s to associate with BGPVPN %s "
% (bgpvpn_id, router_id))
while tries > 0 and router_id not in routers:
- routers = os_utils.get_bgpvpn_routers(neutron_client, bgpvpn_id)
+ routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
time.sleep(sleep_time)
tries -= 1
if router_id not in routers:
time.sleep(30)
-def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+def assert_and_get_compute_nodes(conn, required_node_number=2):
"""Get the compute nodes in the deployment
-
Exit if the deployment doesn't have enough compute nodes"""
- compute_nodes = os_utils.get_hypervisors(nova_client)
+ compute_nodes = os_utils.get_hypervisors(conn)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
logger.error("There are %s compute nodes in the deployment. "
"Minimum number of nodes to complete the test is 2."
% num_compute_nodes)
- sys.exit(-1)
+ raise Exception("There are {} compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test"
+ " is 2.".format(num_compute_nodes))
logger.debug("Compute nodes: %s" % compute_nodes)
return compute_nodes
-def open_icmp(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_icmp(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'icmp'):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'icmp'):
% security_group_id)
-def open_http_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_http_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
80, 80):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
% security_group_id)
-def open_bgp_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_bgp_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
179, 179):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
def run_odl_cmd(odl_node, cmd):
'''Run a command in the OpenDaylight Karaf shell
-
This is a bit flimsy because of shell quote escaping, make sure that
the cmd passed does not have any top level double quotes or this
function will break.
-
The /dev/null is used because client works, but outputs something
that contains "ERROR" and run_cmd doesn't like that.
-
'''
karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
' 2>/dev/null' % cmd)
return odl_node.run_cmd(karaf_cmd)
-def wait_for_cloud_init(instance):
+def wait_for_cloud_init(conn, instance):
success = True
# ubuntu images take a long time to start
tries = 20
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
- instance_log = instance.get_console_output()
+ instance_log = conn.compute.\
+ get_server_console_output(instance)['output']
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
def attach_instance_to_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
if installer_type == "fuel":
bridge = "br-ex"
def detach_instance_from_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
compute_node.run_cmd(cmd.format(bridge=bridge))
-def cleanup_neutron(neutron_client, bgpvpn_ids, interfaces, subnet_ids,
- router_ids, network_ids):
+def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
+ interfaces, subnet_ids, router_ids, network_ids):
+ if len(floatingip_ids) != 0:
+ for floatingip_id in floatingip_ids:
+ if not os_utils.delete_floating_ip(conn, floatingip_id):
+ logger.error('Fail to delete all floating ips. '
+ 'Floating ip with id {} was not deleted.'.
+ format(floatingip_id))
+ return False
if len(bgpvpn_ids) != 0:
for bgpvpn_id in bgpvpn_ids:
- os_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ delete_bgpvpn(neutron_client, bgpvpn_id)
if len(interfaces) != 0:
for router_id, subnet_id in interfaces:
- if not os_utils.remove_interface_router(neutron_client,
+ if not os_utils.remove_interface_router(conn,
router_id, subnet_id):
- logging.error('Fail to delete all interface routers. '
- 'Interface router with id {} was not deleted.'.
- format(router_id))
+ logger.error('Fail to delete all interface routers. '
+ 'Interface router with id {} was not deleted.'.
+ format(router_id))
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.remove_gateway_router(neutron_client, router_id):
- logging.error('Fail to delete all gateway routers. '
- 'Gateway router with id {} was not deleted.'.
- format(router_id))
+ if not os_utils.remove_gateway_router(conn, router_id):
+ logger.error('Fail to delete all gateway routers. '
+ 'Gateway router with id {} was not deleted.'.
+ format(router_id))
if len(subnet_ids) != 0:
for subnet_id in subnet_ids:
- if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
- logging.error('Fail to delete all subnets. '
- 'Subnet with id {} was not deleted.'.
- format(subnet_id))
+ if not os_utils.delete_neutron_subnet(conn, subnet_id):
+ logger.error('Fail to delete all subnets. '
+ 'Subnet with id {} was not deleted.'.
+ format(subnet_id))
return False
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.delete_neutron_router(neutron_client, router_id):
- logging.error('Fail to delete all routers. '
- 'Router with id {} was not deleted.'.
- format(router_id))
+ if not os_utils.delete_neutron_router(conn, router_id):
+ logger.error('Fail to delete all routers. '
+ 'Router with id {} was not deleted.'.
+ format(router_id))
return False
if len(network_ids) != 0:
for network_id in network_ids:
- if not os_utils.delete_neutron_net(neutron_client, network_id):
- logging.error('Fail to delete all networks. '
- 'Network with id {} was not deleted.'.
- format(network_id))
+ if not os_utils.delete_neutron_net(conn, network_id):
+ logger.error('Fail to delete all networks. '
+ 'Network with id {} was not deleted.'.
+ format(network_id))
return False
return True
-def cleanup_nova(nova_client, floatingip_ids, instance_ids, image_ids):
-
- if len(floatingip_ids) != 0:
- for floatingip_id in floatingip_ids:
- if not os_utils.delete_floating_ip(nova_client, floatingip_id):
- logging.error('Fail to delete all floating ips. '
- 'Floating ip with id {} was not deleted.'.
- format(floatingip_id))
- return False
-
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
+ if flavor_ids is not None and len(flavor_ids) != 0:
+ for flavor_id in flavor_ids:
+ conn.compute.delete_flavor(flavor_id)
if len(instance_ids) != 0:
for instance_id in instance_ids:
- if not os_utils.delete_instance(nova_client, instance_id):
- logging.error('Fail to delete all instances. '
- 'Instance with id {} was not deleted.'.
- format(instance_id))
- return False
+ if not os_utils.delete_instance(conn, instance_id):
+ logger.error('Fail to delete all instances. '
+ 'Instance with id {} was not deleted.'.
+ format(instance_id))
+ else:
+ wait_for_instance_delete(conn, instance_id)
+ return True
+
+def cleanup_glance(conn, image_ids):
if len(image_ids) != 0:
for image_id in image_ids:
- if not os_utils.delete_glance_image(nova_client, image_id):
- logging.error('Fail to delete all images. '
- 'Image with id {} was not deleted.'.
- format(image_id))
+ if not os_utils.delete_glance_image(conn, image_id):
+ logger.error('Fail to delete all images. '
+ 'Image with id {} was not deleted.'.
+ format(image_id))
return False
return True
+
+
+def create_bgpvpn(neutron_client, **kwargs):
+ # route_distinguishers
+ # route_targets
+ json_body = {"bgpvpn": kwargs}
+ return neutron_client.create_bgpvpn(json_body)
+
+
+def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
+ json_body = {"bgpvpn": kwargs}
+ return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
+
+
+def delete_bgpvpn(neutron_client, bgpvpn_id):
+ return neutron_client.delete_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn(neutron_client, bgpvpn_id):
+ return neutron_client.show_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn_routers(neutron_client, bgpvpn_id):
+ return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
+
+
+def get_bgpvpn_networks(neutron_client, bgpvpn_id):
+ return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
+
+
+def create_router_association(neutron_client, bgpvpn_id, router_id):
+ json_body = {"router_association": {"router_id": router_id}}
+ return neutron_client.create_router_association(bgpvpn_id, json_body)
+
+
+def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
+ json_body = {"network_association": {"network_id": neutron_network_id}}
+ return neutron_client.create_network_association(bgpvpn_id, json_body)
+
+
+def is_fail_mode_secure():
+ """
+ Checks the value of the attribute fail_mode,
+ if it is set to secure. This check is performed
+ on all OVS br-int interfaces, for all OpenStack nodes.
+ """
+ is_secure = {}
+ openstack_nodes = get_nodes()
+ get_ovs_int_cmd = ("sudo ovs-vsctl show | "
+ "grep -i bridge | "
+ "awk '{print $2}'")
+ # Define OVS get fail_mode command
+ get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
+ for openstack_node in openstack_nodes:
+ if not openstack_node.is_active():
+ continue
+
+ ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
+ strip().split('\n'))
+ if 'br-int' in ovs_int_list:
+ # Execute get fail_mode command
+ br_int_fail_mode = (openstack_node.
+ run_cmd(get_ovs_fail_mode_cmd).strip())
+ if br_int_fail_mode == 'secure':
+ # success
+ is_secure[openstack_node.name] = True
+ else:
+ # failure
+ logger.error('The fail_mode for br-int was not secure '
+ 'in {} node'.format(openstack_node.name))
+ is_secure[openstack_node.name] = False
+ return is_secure
+
+
+def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
+ subnet_quota, port_quota, router_quota):
+ try:
+ conn.network.update_quota(tenant_id, networks=nw_quota,
+ subnets=subnet_quota, ports=port_quota,
+ routers=router_quota)
+ return True
+ except Exception as e:
+ logger.error("Error [update_nw_subnet_port_quota(network,"
+ " '%s', '%s', '%s', '%s, %s')]: %s" %
+ (tenant_id, nw_quota, subnet_quota,
+ port_quota, router_quota, e))
+ return False
+
+
+def update_instance_quota_class(cloud, instances_quota):
+ try:
+ cloud.set_compute_quotas('admin', instances=instances_quota)
+ return True
+ except Exception as e:
+ logger.error("Error [update_instance_quota_class(compute,"
+ " '%s' )]: %s" % (instances_quota, e))
+ return False
+
+
+def get_neutron_quota(conn, tenant_id):
+ try:
+ return conn.network.get_quota(tenant_id)
+ except ResourceNotFound as e:
+ logger.error("Error in getting network quota for tenant "
+ " '%s' )]: %s" % (tenant_id, e))
+ raise
+
+
+def get_nova_instances_quota(cloud):
+ try:
+ return cloud.get_compute_quotas('admin').instances
+ except Exception as e:
+ logger.error("Error in getting nova instances quota: %s" % e)
+ raise
+
+
+def update_router_extra_route(conn, router_id, extra_routes):
+ if len(extra_routes) <= 0:
+ return
+ routes_list = []
+ for extra_route in extra_routes:
+ route_dict = {'destination': extra_route.destination,
+ 'nexthop': extra_route.nexthop}
+ routes_list.append(route_dict)
+
+ try:
+ conn.network.update_router(router_id, routes=routes_list)
+ return True
+ except Exception as e:
+ logger.error("Error in updating router with extra route: %s" % e)
+ raise
+
+
+def update_router_no_extra_route(conn, router_ids):
+ for router_id in router_ids:
+ try:
+ conn.network.update_router(router_id, routes=[])
+ return True
+ except Exception as e:
+ logger.error("Error in clearing extra route: %s" % e)
+
+
+def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+ """
+ Gets, as input, a list of compute nodes and a list of OVS bridges
+ and returns the command console output, as a list of lines, that
+ contains all the OVS groups from all bridges and nodes in lists.
+ """
+ cmd_out_lines = []
+ for compute_node in compute_node_list:
+ for ovs_br in ovs_br_list:
+ if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+ ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
+ "grep group".format(ovs_br, of_protocol))
+ cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
+ split("\n"))
+ return cmd_out_lines
+
+
+def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
+ """
+ Gets, as input, a list of compute nodes and a list of OVS bridges
+ and returns the command console output, as a list of lines, that
+ contains all the OVS flows from all bridges and nodes in lists.
+ """
+ cmd_out_lines = []
+ for compute_node in compute_node_list:
+ for ovs_br in ovs_br_list:
+ if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
+ ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
+ "grep table=".format(ovs_br, of_protocol))
+ cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
+ split("\n"))
+ return cmd_out_lines
+
+
+def get_odl_bgp_entity_owner(controllers):
+ """ Finds the ODL owner of the BGP entity in the cluster.
+
+ When ODL runs in clustering mode we need to execute the BGP speaker
+ related commands to that ODL which is the owner of the BGP entity.
+
+ :param controllers: list of OS controllers
+ :return controller: OS controller in which ODL BGP entity owner runs
+ """
+ if len(controllers) == 1:
+ return controllers[0]
+ else:
+ url = ('http://admin:admin@{ip}:8081/restconf/'
+ 'operational/entity-owners:entity-owners/entity-type/bgp'
+ .format(ip=controllers[0].ip))
+
+ remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
+ 'initial/akka.conf')
+ remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
+ local_tmp_akka_conf = '/tmp/akka.conf'
+ try:
+ json_output = requests.get(url).json()
+ except Exception:
+ logger.error('Failed to find the ODL BGP '
+ 'entity owner through REST')
+ return None
+ odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
+
+ for controller in controllers:
+
+ controller.run_cmd('sudo cp {0} /home/heat-admin/'
+ .format(remote_odl_akka_conf))
+ controller.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+
+ for line in open(local_tmp_akka_conf):
+ if re.search(odl_bgp_owner, line):
+ return controller
+ return None
+
+
+def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
+ json_body = {'input':
+ {'destination-ip': remote_tep_ip,
+ 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
+ }
+ url = ('http://{ip}:8081/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ try:
+ requests.post(url, data=json.dumps(json_body),
+ headers=headers,
+ auth=HTTPBasicAuth('admin', 'admin'))
+ except Exception as e:
+ logger.error("Failed to create external tunnel endpoint on"
+ " ODL for external tep ip %s with error %s"
+ % (remote_tep_ip, e))
+ return None
+
+
+def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
+ url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
+ 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
+ logger.error("url is %s" % url)
+ try:
+ vrf_table = requests.get(url).json()
+ is_ipprefix_exists = False
+ for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
+ if vrf_entry['destPrefix'] == ip_prefix:
+ is_ipprefix_exists = True
+ break
+ return is_ipprefix_exists
+ except Exception as e:
+ logger.error('Failed to find ip prefix %s with error %s'
+ % (ip_prefix, e))
+ return False