3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
18 from openstack.exceptions import ResourceNotFound
19 from requests.auth import HTTPBasicAuth
21 from opnfv.deployment.factory import Factory as DeploymentFactory
23 from sdnvpn.lib import config as sdnvpn_config
24 import sdnvpn.lib.openstack_utils as os_utils
26 logger = logging.getLogger('sdnvpn_test_utils')
28 common_config = sdnvpn_config.CommonConfig()
33 executor = ThreadPoolExecutor(5)
36 class ExtraRoute(object):
38 Class to represent extra route for a router
41 def __init__(self, destination, nexthop):
42 self.destination = destination
43 self.nexthop = nexthop
46 class AllowedAddressPair(object):
48 Class to represent allowed address pair for a neutron port
51 def __init__(self, ipaddress, macaddress):
52 self.ipaddress = ipaddress
53 self.macaddress = macaddress
56 def create_default_flavor():
57 return os_utils.get_or_create_flavor(common_config.default_flavor,
58 common_config.default_flavor_ram,
59 common_config.default_flavor_disk,
60 common_config.default_flavor_vcpus)
63 def create_custom_flavor():
64 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
65 common_config.custom_flavor_ram,
66 common_config.custom_flavor_disk,
67 common_config.custom_flavor_vcpus)
70 def create_net(conn, name):
71 logger.debug("Creating network %s", name)
72 net_id = os_utils.create_neutron_net(conn, name)
75 "There has been a problem when creating the neutron network")
76 raise Exception("There has been a problem when creating"
77 " the neutron network {}".format(name))
81 def create_subnet(conn, name, cidr, net_id):
82 logger.debug("Creating subnet %s in network %s with cidr %s",
84 subnet_id = os_utils.create_neutron_subnet(conn,
90 "There has been a problem when creating the neutron subnet")
91 raise Exception("There has been a problem when creating"
92 " the neutron subnet {}".format(name))
96 def create_network(conn, net, subnet1, cidr1,
97 router, subnet2=None, cidr2=None):
98 """Network assoc won't work for networks/subnets created by this function.
99 It is an ODL limitation due to it handling routers as vpns.
100 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
101 network_dic = os_utils.create_network_full(conn,
108 "There has been a problem when creating the neutron network")
109 raise Exception("There has been a problem when creating"
110 " the neutron network {}".format(net))
111 net_id = network_dic["net_id"]
112 subnet_id = network_dic["subnet_id"]
113 router_id = network_dic["router_id"]
115 if subnet2 is not None:
116 logger.debug("Creating and attaching a second subnet...")
117 subnet_id = os_utils.create_neutron_subnet(
118 conn, subnet2, cidr2, net_id)
121 "There has been a problem when creating the second subnet")
122 raise Exception("There has been a problem when creating"
123 " the second subnet {}".format(subnet2))
124 logger.debug("Subnet '%s' created successfully" % subnet_id)
125 return net_id, subnet_id, router_id
128 def get_port(conn, instance_id):
129 ports = os_utils.get_port_list(conn)
131 if port.device_id == instance_id:
136 def update_port_allowed_address_pairs(conn, port_id, address_pairs):
137 if len(address_pairs) <= 0:
139 allowed_address_pairs = []
140 for address_pair in address_pairs:
141 address_pair_dict = {'ip_address': address_pair.ipaddress,
142 'mac_address': address_pair.macaddress}
143 allowed_address_pairs.append(address_pair_dict)
146 port = conn.network.\
147 update_port(port_id, allowed_address_pairs=allowed_address_pairs)
149 except Exception as e:
150 logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
151 " %s" % (port_id, address_pairs, e))
155 def create_instance(conn,
167 if 'flavor' not in kwargs:
168 kwargs['flavor'] = common_config.default_flavor
170 logger.info("Creating instance '%s'..." % name)
172 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
173 " network=%s\n secgroup=%s \n hypervisor=%s \n"
174 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
175 % (name, kwargs['flavor'], image_id, network_id, sg_id,
176 compute_node, fixed_ip, files, userdata))
177 instance = os_utils.create_instance_and_wait_for_active(
184 av_zone=compute_node,
189 logger.error("Error while booting instance.")
190 raise Exception("Error while booting instance {}".format(name))
192 # Retrieve IP of INSTANCE
193 network_name = conn.network.get_network(network_id).name
194 instance_ip = conn.compute.get_server(instance).\
195 addresses.get(network_name)[0]['addr']
196 logger.debug("Instance '%s' booted successfully. IP='%s'." %
200 logger.debug("Adding '%s' to security group '%s'..."
201 % (name, secgroup_name))
203 logger.debug("Adding '%s' to security group '%s'..."
205 os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
210 def generate_ping_userdata(ips_array, ping_count=10):
213 ips = ("%s %s" % (ips, ip))
215 ips = ips.replace(' ', ' ')
216 return ("#!/bin/sh\n"
221 " ping -c %s $ip 2>&1 >/dev/null\n"
223 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
224 " echo ping $ip OK\n"
225 " else echo ping $ip KO\n"
233 def generate_userdata_common():
234 return ("#!/bin/sh\n"
235 "sudo mkdir -p /home/cirros/.ssh/\n"
236 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
237 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
238 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
239 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
240 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
241 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
242 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
243 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
244 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
245 "chmod 700 /home/cirros/.ssh\n"
246 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
247 "chmod 600 /home/cirros/.ssh/id_rsa\n"
251 def generate_userdata_with_ssh(ips_array):
252 u1 = generate_userdata_common()
256 ips = ("%s %s" % (ips, ip))
258 ips = ips.replace(' ', ' ')
264 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
265 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
267 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
268 " else echo $ip 'not reachable';fi;\n"
276 def generate_userdata_interface_create(interface_name, interface_number,
277 ip_Address, net_mask):
278 return ("#!/bin/sh\n"
280 "sudo useradd -m sdnvpn\n"
281 "sudo adduser sdnvpn sudo\n"
282 "sudo echo sdnvpn:opnfv | chpasswd\n"
284 "sudo ifconfig %s:%s %s netmask %s up\n"
285 % (interface_name, interface_number,
286 ip_Address, net_mask))
289 def get_installerHandler():
290 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
291 installer_ip = get_installer_ip()
293 if installer_type not in ["fuel", "apex"]:
294 logger.warn("installer type %s is neither fuel nor apex."
295 "returning None for installer handler" % installer_type)
298 if installer_type in ["apex"]:
299 developHandler = DeploymentFactory.get_handler(
303 pkey_file="/root/.ssh/id_rsa")
305 if installer_type in ["fuel"]:
306 developHandler = DeploymentFactory.get_handler(
311 return developHandler
315 developHandler = get_installerHandler()
316 return developHandler.get_nodes()
319 def get_installer_ip():
320 return str(os.environ['INSTALLER_IP'])
323 def get_instance_ip(conn, instance):
324 instance_ip = conn.compute.get_server(instance).\
325 addresses.values()[0][0]['addr']
329 def wait_for_instance(instance, pattern=".* login:", tries=40):
330 logger.info("Waiting for instance %s to boot up" % instance.id)
331 conn = os_utils.get_os_connection()
333 expected_regex = re.compile(pattern)
335 while tries > 0 and not expected_regex.search(console_log):
336 console_log = conn.compute.\
337 get_server_console_output(instance)['output']
338 time.sleep(sleep_time)
341 if not expected_regex.search(console_log):
342 logger.error("Instance %s does not boot up properly."
348 def wait_for_instances_up(*instances):
349 check = [wait_for_instance(instance) for instance in instances]
353 def wait_for_instances_get_dhcp(*instances):
354 check = [wait_for_instance(instance, "Lease of .* obtained")
355 for instance in instances]
359 def async_Wait_for_instances(instances, tries=40):
360 if len(instances) <= 0:
363 for instance in instances:
364 future = executor.submit(wait_for_instance,
368 futures.append(future)
370 for future in futures:
371 results.append(future.result())
373 logger.error("one or more instances is not yet booted up")
376 def wait_for_instance_delete(conn, instance_id, tries=30):
378 instances = [instance_id]
379 logger.debug("Waiting for instance %s to be deleted"
381 while tries > 0 and instance_id in instances:
382 instances = [instance.id for instance in
383 os_utils.get_instances(conn)]
384 time.sleep(sleep_time)
386 if instance_id in instances:
387 logger.error("Deletion of instance %s failed" %
391 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
395 logger.debug("Waiting for network %s to associate with BGPVPN %s "
396 % (bgpvpn_id, net_id))
398 while tries > 0 and net_id not in nets:
399 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
400 time.sleep(sleep_time)
402 if net_id not in nets:
403 logger.error("Association of network %s with BGPVPN %s failed" %
409 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
410 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
412 # Return True if all associations succeeded
416 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
420 logger.debug("Waiting for router %s to associate with BGPVPN %s "
421 % (bgpvpn_id, router_id))
422 while tries > 0 and router_id not in routers:
423 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
424 time.sleep(sleep_time)
426 if router_id not in routers:
427 logger.error("Association of router %s with BGPVPN %s failed" %
428 (router_id, bgpvpn_id))
433 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
434 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
436 # Return True if all associations succeeded
440 def wait_before_subtest(*args, **kwargs):
441 ''' This is a placeholder.
442 TODO: Replace delay with polling logic. '''
446 def assert_and_get_compute_nodes(conn, required_node_number=2):
447 """Get the compute nodes in the deployment
448 Exit if the deployment doesn't have enough compute nodes"""
449 compute_nodes = os_utils.get_hypervisors(conn)
451 num_compute_nodes = len(compute_nodes)
452 if num_compute_nodes < 2:
453 logger.error("There are %s compute nodes in the deployment. "
454 "Minimum number of nodes to complete the test is 2."
456 raise Exception("There are {} compute nodes in the deployment. "
457 "Minimum number of nodes to complete the test"
458 " is 2.".format(num_compute_nodes))
460 logger.debug("Compute nodes: %s" % compute_nodes)
464 def open_icmp(conn, security_group_id):
465 if os_utils.check_security_group_rules(conn,
470 if not os_utils.create_secgroup_rule(conn,
474 logger.error("Failed to create icmp security group rule...")
476 logger.info("This rule exists for security group: %s"
480 def open_http_port(conn, security_group_id):
481 if os_utils.check_security_group_rules(conn,
487 if not os_utils.create_secgroup_rule(conn,
493 logger.error("Failed to create http security group rule...")
495 logger.info("This rule exists for security group: %s"
499 def open_bgp_port(conn, security_group_id):
500 if os_utils.check_security_group_rules(conn,
506 if not os_utils.create_secgroup_rule(conn,
511 logger.error("Failed to create bgp security group rule...")
513 logger.info("This rule exists for security group: %s"
517 def exec_cmd(cmd, verbose):
519 logger.debug("Executing '%s'" % cmd)
520 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
521 stderr=subprocess.STDOUT)
523 for line in iter(p.stdout.readline, b''):
530 returncode = p.wait()
532 logger.error("Command %s failed to execute." % cmd)
535 return output, success
538 def check_odl_fib(ip, controller_ip):
539 """Check that there is an entry in the ODL Fib for `ip`"""
540 url = "http://" + controller_ip + \
541 ":8181/restconf/config/odl-fib:fibEntries/"
542 logger.debug("Querring '%s' for FIB entries", url)
543 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
544 if res.status_code != 200:
545 logger.error("OpenDaylight response status code: %s", res.status_code)
547 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
549 logger.debug("OpenDaylight FIB: \n%s" % res.text)
550 return ip in res.text
553 def run_odl_cmd(odl_node, cmd):
554 '''Run a command in the OpenDaylight Karaf shell
555 This is a bit flimsy because of shell quote escaping, make sure that
556 the cmd passed does not have any top level double quotes or this
558 The /dev/null is used because client works, but outputs something
559 that contains "ERROR" and run_cmd doesn't like that.
561 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
562 ' 2>/dev/null' % cmd)
563 return odl_node.run_cmd(karaf_cmd)
566 def wait_for_cloud_init(conn, instance):
568 # ubuntu images take a long time to start
571 logger.info("Waiting for cloud init of instance: {}"
572 "".format(instance.name))
574 instance_log = conn.compute.\
575 get_server_console_output(instance)['output']
576 if "Failed to run module" in instance_log:
578 logger.error("Cloud init failed to run. Reason: %s",
581 if re.search(r"Cloud-init v. .+ finished at", instance_log):
584 time.sleep(sleep_time)
588 logger.error("Cloud init timed out"
592 logger.info("Finished waiting for cloud init of instance {} result was {}"
593 "".format(instance.name, success))
597 def attach_instance_to_ext_br(instance, compute_node):
598 libvirt_instance_name = instance.instance_name
599 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
600 if installer_type == "fuel":
602 elif installer_type == "apex":
603 # In Apex, br-ex is an ovs bridge and virsh attach-interface
604 # won't just work. We work around it by creating a linux
605 # bridge, attaching that to br-ex with a veth pair
606 # and virsh-attaching the instance to the linux-bridge
610 if ! sudo brctl show |grep -q ^{bridge};then
611 sudo brctl addbr {bridge}
612 sudo ip link set {bridge} up
613 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
614 sudo ip link set dev ovs-quagga-tap up
615 sudo ip link set dev quagga-tap up
616 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
617 sudo brctl addif {bridge} quagga-tap
620 compute_node.run_cmd(cmd.format(bridge=bridge))
622 compute_node.run_cmd("sudo virsh attach-interface %s"
623 " bridge %s" % (libvirt_instance_name, bridge))
626 def detach_instance_from_ext_br(instance, compute_node):
627 libvirt_instance_name = instance.instance_name
628 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
629 "grep running | awk '{print $2}'); "
630 "do echo -n ; sudo virsh dumpxml $vm| "
631 "grep -oP '52:54:[\da-f:]+' ;done")
632 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
633 " --type bridge --mac %s"
634 % (libvirt_instance_name, mac))
636 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
637 if installer_type == "fuel":
639 elif installer_type == "apex":
640 # In Apex, br-ex is an ovs bridge and virsh attach-interface
641 # won't just work. We work around it by creating a linux
642 # bridge, attaching that to br-ex with a veth pair
643 # and virsh-attaching the instance to the linux-bridge
646 sudo brctl delif {bridge} quagga-tap &&
647 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
648 sudo ip link set dev quagga-tap down &&
649 sudo ip link set dev ovs-quagga-tap down &&
650 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
651 sudo ip link set {bridge} down &&
652 sudo brctl delbr {bridge}
654 compute_node.run_cmd(cmd.format(bridge=bridge))
657 def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
658 interfaces, subnet_ids, router_ids, network_ids):
659 if len(floatingip_ids) != 0:
660 for floatingip_id in floatingip_ids:
661 if not os_utils.delete_floating_ip(conn, floatingip_id):
662 logger.error('Fail to delete all floating ips. '
663 'Floating ip with id {} was not deleted.'.
664 format(floatingip_id))
667 if len(bgpvpn_ids) != 0:
668 for bgpvpn_id in bgpvpn_ids:
669 delete_bgpvpn(neutron_client, bgpvpn_id)
671 if len(interfaces) != 0:
672 for router_id, subnet_id in interfaces:
673 if not os_utils.remove_interface_router(conn,
674 router_id, subnet_id):
675 logger.error('Fail to delete all interface routers. '
676 'Interface router with id {} was not deleted.'.
679 if len(router_ids) != 0:
680 for router_id in router_ids:
681 if not os_utils.remove_gateway_router(conn, router_id):
682 logger.error('Fail to delete all gateway routers. '
683 'Gateway router with id {} was not deleted.'.
686 if len(subnet_ids) != 0:
687 for subnet_id in subnet_ids:
688 if not os_utils.delete_neutron_subnet(conn, subnet_id):
689 logger.error('Fail to delete all subnets. '
690 'Subnet with id {} was not deleted.'.
694 if len(router_ids) != 0:
695 for router_id in router_ids:
696 if not os_utils.delete_neutron_router(conn, router_id):
697 logger.error('Fail to delete all routers. '
698 'Router with id {} was not deleted.'.
702 if len(network_ids) != 0:
703 for network_id in network_ids:
704 if not os_utils.delete_neutron_net(conn, network_id):
705 logger.error('Fail to delete all networks. '
706 'Network with id {} was not deleted.'.
712 def cleanup_nova(conn, instance_ids, flavor_ids=None):
713 if flavor_ids is not None and len(flavor_ids) != 0:
714 for flavor_id in flavor_ids:
715 conn.compute.delete_flavor(flavor_id)
716 if len(instance_ids) != 0:
717 for instance_id in instance_ids:
718 if not os_utils.delete_instance(conn, instance_id):
719 logger.error('Fail to delete all instances. '
720 'Instance with id {} was not deleted.'.
723 wait_for_instance_delete(conn, instance_id)
727 def cleanup_glance(conn, image_ids):
728 if len(image_ids) != 0:
729 for image_id in image_ids:
730 if not os_utils.delete_glance_image(conn, image_id):
731 logger.error('Fail to delete all images. '
732 'Image with id {} was not deleted.'.
738 def create_bgpvpn(neutron_client, **kwargs):
739 # route_distinguishers
741 json_body = {"bgpvpn": kwargs}
742 return neutron_client.create_bgpvpn(json_body)
745 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
746 json_body = {"bgpvpn": kwargs}
747 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
750 def delete_bgpvpn(neutron_client, bgpvpn_id):
751 return neutron_client.delete_bgpvpn(bgpvpn_id)
754 def get_bgpvpn(neutron_client, bgpvpn_id):
755 return neutron_client.show_bgpvpn(bgpvpn_id)
758 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
759 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
762 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
763 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
766 def create_router_association(neutron_client, bgpvpn_id, router_id):
767 json_body = {"router_association": {"router_id": router_id}}
768 return neutron_client.create_router_association(bgpvpn_id, json_body)
771 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
772 json_body = {"network_association": {"network_id": neutron_network_id}}
773 return neutron_client.create_network_association(bgpvpn_id, json_body)
776 def is_fail_mode_secure():
778 Checks the value of the attribute fail_mode,
779 if it is set to secure. This check is performed
780 on all OVS br-int interfaces, for all OpenStack nodes.
783 openstack_nodes = get_nodes()
784 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
787 # Define OVS get fail_mode command
788 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
789 for openstack_node in openstack_nodes:
790 if not openstack_node.is_active():
793 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
795 if 'br-int' in ovs_int_list:
796 # Execute get fail_mode command
797 br_int_fail_mode = (openstack_node.
798 run_cmd(get_ovs_fail_mode_cmd).strip())
799 if br_int_fail_mode == 'secure':
801 is_secure[openstack_node.name] = True
804 logger.error('The fail_mode for br-int was not secure '
805 'in {} node'.format(openstack_node.name))
806 is_secure[openstack_node.name] = False
810 def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
811 subnet_quota, port_quota, router_quota):
813 conn.network.update_quota(tenant_id, networks=nw_quota,
814 subnets=subnet_quota, ports=port_quota,
815 routers=router_quota)
817 except Exception as e:
818 logger.error("Error [update_nw_subnet_port_quota(network,"
819 " '%s', '%s', '%s', '%s, %s')]: %s" %
820 (tenant_id, nw_quota, subnet_quota,
821 port_quota, router_quota, e))
825 def update_instance_quota_class(cloud, instances_quota):
827 cloud.set_compute_quotas('admin', instances=instances_quota)
829 except Exception as e:
830 logger.error("Error [update_instance_quota_class(compute,"
831 " '%s' )]: %s" % (instances_quota, e))
835 def get_neutron_quota(conn, tenant_id):
837 return conn.network.get_quota(tenant_id)
838 except ResourceNotFound as e:
839 logger.error("Error in getting network quota for tenant "
840 " '%s' )]: %s" % (tenant_id, e))
844 def get_nova_instances_quota(cloud):
846 return cloud.get_compute_quotas('admin').instances
847 except Exception as e:
848 logger.error("Error in getting nova instances quota: %s" % e)
852 def update_router_extra_route(conn, router_id, extra_routes):
853 if len(extra_routes) <= 0:
856 for extra_route in extra_routes:
857 route_dict = {'destination': extra_route.destination,
858 'nexthop': extra_route.nexthop}
859 routes_list.append(route_dict)
862 conn.network.update_router(router_id, routes=routes_list)
864 except Exception as e:
865 logger.error("Error in updating router with extra route: %s" % e)
869 def update_router_no_extra_route(conn, router_ids):
870 for router_id in router_ids:
872 conn.network.update_router(router_id, routes=[])
874 except Exception as e:
875 logger.error("Error in clearing extra route: %s" % e)
878 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
880 Gets, as input, a list of compute nodes and a list of OVS bridges
881 and returns the command console output, as a list of lines, that
882 contains all the OVS groups from all bridges and nodes in lists.
885 for compute_node in compute_node_list:
886 for ovs_br in ovs_br_list:
887 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
888 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
889 "grep group".format(ovs_br, of_protocol))
890 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
895 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
897 Gets, as input, a list of compute nodes and a list of OVS bridges
898 and returns the command console output, as a list of lines, that
899 contains all the OVS flows from all bridges and nodes in lists.
902 for compute_node in compute_node_list:
903 for ovs_br in ovs_br_list:
904 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
905 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
906 "grep table=".format(ovs_br, of_protocol))
907 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
912 def get_odl_bgp_entity_owner(controllers):
913 """ Finds the ODL owner of the BGP entity in the cluster.
915 When ODL runs in clustering mode we need to execute the BGP speaker
916 related commands to that ODL which is the owner of the BGP entity.
918 :param controllers: list of OS controllers
919 :return controller: OS controller in which ODL BGP entity owner runs
921 if len(controllers) == 1:
922 return controllers[0]
924 url = ('http://admin:admin@{ip}:8081/restconf/'
925 'operational/entity-owners:entity-owners/entity-type/bgp'
926 .format(ip=controllers[0].ip))
928 remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
930 remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
931 local_tmp_akka_conf = '/tmp/akka.conf'
933 json_output = requests.get(url).json()
935 logger.error('Failed to find the ODL BGP '
936 'entity owner through REST')
938 odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
940 for controller in controllers:
942 controller.run_cmd('sudo cp {0} /home/heat-admin/'
943 .format(remote_odl_akka_conf))
944 controller.run_cmd('sudo chmod 777 {0}'
945 .format(remote_odl_home_akka_conf))
946 controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
948 for line in open(local_tmp_akka_conf):
949 if re.search(odl_bgp_owner, line):
954 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
955 json_body = {'input':
956 {'destination-ip': remote_tep_ip,
957 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
959 url = ('http://{ip}:8081/restconf/operations/'
960 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
961 headers = {'Content-type': 'application/yang.data+json',
962 'Accept': 'application/yang.data+json'}
964 requests.post(url, data=json.dumps(json_body),
966 auth=HTTPBasicAuth('admin', 'admin'))
967 except Exception as e:
968 logger.error("Failed to create external tunnel endpoint on"
969 " ODL for external tep ip %s with error %s"
970 % (remote_tep_ip, e))
974 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
975 url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
976 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
977 logger.error("url is %s" % url)
979 vrf_table = requests.get(url).json()
980 is_ipprefix_exists = False
981 for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
982 if vrf_entry['destPrefix'] == ip_prefix:
983 is_ipprefix_exists = True
985 return is_ipprefix_exists
986 except Exception as e:
987 logger.error('Failed to find ip prefix %s with error %s'