3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
20 from opnfv.deployment.factory import Factory as DeploymentFactory
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
25 logger = logging.getLogger('sdnvpn_test_utils')
27 common_config = sdnvpn_config.CommonConfig()
32 executor = ThreadPoolExecutor(5)
35 class ExtraRoute(object):
37 Class to represent extra route for a router
40 def __init__(self, destination, nexthop):
41 self.destination = destination
42 self.nexthop = nexthop
45 class AllowedAddressPair(object):
47 Class to represent allowed address pair for a neutron port
50 def __init__(self, ipaddress, macaddress):
51 self.ipaddress = ipaddress
52 self.macaddress = macaddress
55 def create_default_flavor():
56 return os_utils.get_or_create_flavor(common_config.default_flavor,
57 common_config.default_flavor_ram,
58 common_config.default_flavor_disk,
59 common_config.default_flavor_vcpus)
62 def create_custom_flavor():
63 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64 common_config.custom_flavor_ram,
65 common_config.custom_flavor_disk,
66 common_config.custom_flavor_vcpus)
69 def create_net(neutron_client, name):
70 logger.debug("Creating network %s", name)
71 net_id = os_utils.create_neutron_net(neutron_client, name)
74 "There has been a problem when creating the neutron network")
75 raise Exception("There has been a problem when creating"
76 " the neutron network {}".format(name))
80 def create_subnet(neutron_client, name, cidr, net_id):
81 logger.debug("Creating subnet %s in network %s with cidr %s",
83 subnet_id = os_utils.create_neutron_subnet(neutron_client,
89 "There has been a problem when creating the neutron subnet")
90 raise Exception("There has been a problem when creating"
91 " the neutron subnet {}".format(name))
95 def create_network(neutron_client, net, subnet1, cidr1,
96 router, subnet2=None, cidr2=None):
97 """Network assoc won't work for networks/subnets created by this function.
98 It is an ODL limitation due to it handling routers as vpns.
99 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100 network_dic = os_utils.create_network_full(neutron_client,
107 "There has been a problem when creating the neutron network")
108 raise Exception("There has been a problem when creating"
109 " the neutron network {}".format(net))
110 net_id = network_dic["net_id"]
111 subnet_id = network_dic["subnet_id"]
112 router_id = network_dic["router_id"]
114 if subnet2 is not None:
115 logger.debug("Creating and attaching a second subnet...")
116 subnet_id = os_utils.create_neutron_subnet(
117 neutron_client, subnet2, cidr2, net_id)
120 "There has been a problem when creating the second subnet")
121 raise Exception("There has been a problem when creating"
122 " the second subnet {}".format(subnet2))
123 logger.debug("Subnet '%s' created successfully" % subnet_id)
124 return net_id, subnet_id, router_id
127 def get_port(neutron_client, instance_id):
128 ports = os_utils.get_port_list(neutron_client)
129 if ports is not None:
131 if port['device_id'] == instance_id:
136 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
137 if len(address_pairs) <= 0:
139 allowed_address_pairs = []
140 for address_pair in address_pairs:
141 address_pair_dict = {'ip_address': address_pair.ipaddress,
142 'mac_address': address_pair.macaddress}
143 allowed_address_pairs.append(address_pair_dict)
144 json_body = {'port': {
145 "allowed_address_pairs": allowed_address_pairs
149 port = neutron_client.update_port(port=port_id,
151 return port['port']['id']
152 except Exception as e:
153 logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
154 " %s" % (port_id, address_pairs, e))
158 def create_instance(conn,
170 if 'flavor' not in kwargs:
171 kwargs['flavor'] = common_config.default_flavor
173 logger.info("Creating instance '%s'..." % name)
175 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176 " network=%s\n secgroup=%s \n hypervisor=%s \n"
177 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178 % (name, kwargs['flavor'], image_id, network_id, sg_id,
179 compute_node, fixed_ip, files, userdata))
180 instance = os_utils.create_instance_and_wait_for_active(
187 av_zone=compute_node,
192 logger.error("Error while booting instance.")
193 raise Exception("Error while booting instance {}".format(name))
195 # Retrieve IP of INSTANCE
196 network_name = conn.network.get_network(network_id).name
197 instance_ip = conn.compute.get_server(instance).\
198 addresses.get(network_name)[0]['addr']
199 logger.debug("Instance '%s' booted successfully. IP='%s'." %
203 logger.debug("Adding '%s' to security group '%s'..."
204 % (name, secgroup_name))
206 logger.debug("Adding '%s' to security group '%s'..."
208 os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
213 def generate_ping_userdata(ips_array, ping_count=10):
216 ips = ("%s %s" % (ips, ip))
218 ips = ips.replace(' ', ' ')
219 return ("#!/bin/sh\n"
224 " ping -c %s $ip 2>&1 >/dev/null\n"
226 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
227 " echo ping $ip OK\n"
228 " else echo ping $ip KO\n"
236 def generate_userdata_common():
237 return ("#!/bin/sh\n"
238 "sudo mkdir -p /home/cirros/.ssh/\n"
239 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
240 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
241 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
242 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
243 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
244 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
245 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
246 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
247 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
248 "chmod 700 /home/cirros/.ssh\n"
249 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
250 "chmod 600 /home/cirros/.ssh/id_rsa\n"
254 def generate_userdata_with_ssh(ips_array):
255 u1 = generate_userdata_common()
259 ips = ("%s %s" % (ips, ip))
261 ips = ips.replace(' ', ' ')
267 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
268 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
270 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
271 " else echo $ip 'not reachable';fi;\n"
279 def generate_userdata_interface_create(interface_name, interface_number,
280 ip_Address, net_mask):
281 return ("#!/bin/sh\n"
283 "sudo useradd -m sdnvpn\n"
284 "sudo adduser sdnvpn sudo\n"
285 "sudo echo sdnvpn:opnfv | chpasswd\n"
287 "sudo ifconfig %s:%s %s netmask %s up\n"
288 % (interface_name, interface_number,
289 ip_Address, net_mask))
292 def get_installerHandler():
293 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
294 installer_ip = get_installer_ip()
296 if installer_type not in ["fuel", "apex"]:
297 logger.warn("installer type %s is neither fuel nor apex."
298 "returning None for installer handler" % installer_type)
301 if installer_type in ["apex"]:
302 developHandler = DeploymentFactory.get_handler(
306 pkey_file="/root/.ssh/id_rsa")
308 if installer_type in ["fuel"]:
309 developHandler = DeploymentFactory.get_handler(
314 return developHandler
318 developHandler = get_installerHandler()
319 return developHandler.get_nodes()
322 def get_installer_ip():
323 return str(os.environ['INSTALLER_IP'])
326 def get_instance_ip(conn, instance):
327 instance_ip = conn.compute.get_server(instance).\
328 addresses.values()[0][0]['addr']
332 def wait_for_instance(instance, pattern=".* login:", tries=40):
333 logger.info("Waiting for instance %s to boot up" % instance.id)
334 conn = os_utils.get_os_connection()
336 expected_regex = re.compile(pattern)
338 while tries > 0 and not expected_regex.search(console_log):
339 console_log = conn.compute.\
340 get_server_console_output(instance)['output']
341 time.sleep(sleep_time)
344 if not expected_regex.search(console_log):
345 logger.error("Instance %s does not boot up properly."
351 def wait_for_instances_up(*instances):
352 check = [wait_for_instance(instance) for instance in instances]
356 def wait_for_instances_get_dhcp(*instances):
357 check = [wait_for_instance(instance, "Lease of .* obtained")
358 for instance in instances]
362 def async_Wait_for_instances(instances, tries=40):
363 if len(instances) <= 0:
366 for instance in instances:
367 future = executor.submit(wait_for_instance,
371 futures.append(future)
373 for future in futures:
374 results.append(future.result())
376 logger.error("one or more instances is not yet booted up")
379 def wait_for_instance_delete(conn, instance_id, tries=30):
381 instances = [instance_id]
382 logger.debug("Waiting for instance %s to be deleted"
384 while tries > 0 and instance_id in instances:
385 instances = [instance.id for instance in
386 os_utils.get_instances(conn)]
387 time.sleep(sleep_time)
389 if instance_id in instances:
390 logger.error("Deletion of instance %s failed" %
394 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
398 logger.debug("Waiting for network %s to associate with BGPVPN %s "
399 % (bgpvpn_id, net_id))
401 while tries > 0 and net_id not in nets:
402 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
403 time.sleep(sleep_time)
405 if net_id not in nets:
406 logger.error("Association of network %s with BGPVPN %s failed" %
412 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
413 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
415 # Return True if all associations succeeded
419 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
423 logger.debug("Waiting for router %s to associate with BGPVPN %s "
424 % (bgpvpn_id, router_id))
425 while tries > 0 and router_id not in routers:
426 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
427 time.sleep(sleep_time)
429 if router_id not in routers:
430 logger.error("Association of router %s with BGPVPN %s failed" %
431 (router_id, bgpvpn_id))
436 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
437 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
439 # Return True if all associations succeeded
443 def wait_before_subtest(*args, **kwargs):
444 ''' This is a placeholder.
445 TODO: Replace delay with polling logic. '''
449 def assert_and_get_compute_nodes(conn, required_node_number=2):
450 """Get the compute nodes in the deployment
451 Exit if the deployment doesn't have enough compute nodes"""
452 compute_nodes = os_utils.get_hypervisors(conn)
454 num_compute_nodes = len(compute_nodes)
455 if num_compute_nodes < 2:
456 logger.error("There are %s compute nodes in the deployment. "
457 "Minimum number of nodes to complete the test is 2."
459 raise Exception("There are {} compute nodes in the deployment. "
460 "Minimum number of nodes to complete the test"
461 " is 2.".format(num_compute_nodes))
463 logger.debug("Compute nodes: %s" % compute_nodes)
467 def open_icmp(neutron_client, security_group_id):
468 if os_utils.check_security_group_rules(neutron_client,
473 if not os_utils.create_secgroup_rule(neutron_client,
477 logger.error("Failed to create icmp security group rule...")
479 logger.info("This rule exists for security group: %s"
483 def open_http_port(neutron_client, security_group_id):
484 if os_utils.check_security_group_rules(neutron_client,
490 if not os_utils.create_secgroup_rule(neutron_client,
496 logger.error("Failed to create http security group rule...")
498 logger.info("This rule exists for security group: %s"
502 def open_bgp_port(neutron_client, security_group_id):
503 if os_utils.check_security_group_rules(neutron_client,
509 if not os_utils.create_secgroup_rule(neutron_client,
514 logger.error("Failed to create bgp security group rule...")
516 logger.info("This rule exists for security group: %s"
520 def exec_cmd(cmd, verbose):
522 logger.debug("Executing '%s'" % cmd)
523 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
524 stderr=subprocess.STDOUT)
526 for line in iter(p.stdout.readline, b''):
533 returncode = p.wait()
535 logger.error("Command %s failed to execute." % cmd)
538 return output, success
541 def check_odl_fib(ip, controller_ip):
542 """Check that there is an entry in the ODL Fib for `ip`"""
543 url = "http://" + controller_ip + \
544 ":8181/restconf/config/odl-fib:fibEntries/"
545 logger.debug("Querring '%s' for FIB entries", url)
546 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
547 if res.status_code != 200:
548 logger.error("OpenDaylight response status code: %s", res.status_code)
550 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
552 logger.debug("OpenDaylight FIB: \n%s" % res.text)
553 return ip in res.text
556 def run_odl_cmd(odl_node, cmd):
557 '''Run a command in the OpenDaylight Karaf shell
558 This is a bit flimsy because of shell quote escaping, make sure that
559 the cmd passed does not have any top level double quotes or this
561 The /dev/null is used because client works, but outputs something
562 that contains "ERROR" and run_cmd doesn't like that.
564 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
565 ' 2>/dev/null' % cmd)
566 return odl_node.run_cmd(karaf_cmd)
569 def wait_for_cloud_init(conn, instance):
571 # ubuntu images take a long time to start
574 logger.info("Waiting for cloud init of instance: {}"
575 "".format(instance.name))
577 instance_log = conn.compute.\
578 get_server_console_output(instance)['output']
579 if "Failed to run module" in instance_log:
581 logger.error("Cloud init failed to run. Reason: %s",
584 if re.search(r"Cloud-init v. .+ finished at", instance_log):
587 time.sleep(sleep_time)
591 logger.error("Cloud init timed out"
595 logger.info("Finished waiting for cloud init of instance {} result was {}"
596 "".format(instance.name, success))
600 def attach_instance_to_ext_br(instance, compute_node):
601 libvirt_instance_name = instance.instance_name
602 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
603 if installer_type == "fuel":
605 elif installer_type == "apex":
606 # In Apex, br-ex is an ovs bridge and virsh attach-interface
607 # won't just work. We work around it by creating a linux
608 # bridge, attaching that to br-ex with a veth pair
609 # and virsh-attaching the instance to the linux-bridge
613 if ! sudo brctl show |grep -q ^{bridge};then
614 sudo brctl addbr {bridge}
615 sudo ip link set {bridge} up
616 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
617 sudo ip link set dev ovs-quagga-tap up
618 sudo ip link set dev quagga-tap up
619 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
620 sudo brctl addif {bridge} quagga-tap
623 compute_node.run_cmd(cmd.format(bridge=bridge))
625 compute_node.run_cmd("sudo virsh attach-interface %s"
626 " bridge %s" % (libvirt_instance_name, bridge))
629 def detach_instance_from_ext_br(instance, compute_node):
630 libvirt_instance_name = instance.instance_name
631 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
632 "grep running | awk '{print $2}'); "
633 "do echo -n ; sudo virsh dumpxml $vm| "
634 "grep -oP '52:54:[\da-f:]+' ;done")
635 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
636 " --type bridge --mac %s"
637 % (libvirt_instance_name, mac))
639 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
640 if installer_type == "fuel":
642 elif installer_type == "apex":
643 # In Apex, br-ex is an ovs bridge and virsh attach-interface
644 # won't just work. We work around it by creating a linux
645 # bridge, attaching that to br-ex with a veth pair
646 # and virsh-attaching the instance to the linux-bridge
649 sudo brctl delif {bridge} quagga-tap &&
650 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
651 sudo ip link set dev quagga-tap down &&
652 sudo ip link set dev ovs-quagga-tap down &&
653 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
654 sudo ip link set {bridge} down &&
655 sudo brctl delbr {bridge}
657 compute_node.run_cmd(cmd.format(bridge=bridge))
660 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
661 subnet_ids, router_ids, network_ids):
663 if len(floatingip_ids) != 0:
664 for floatingip_id in floatingip_ids:
665 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
666 logger.error('Fail to delete all floating ips. '
667 'Floating ip with id {} was not deleted.'.
668 format(floatingip_id))
671 if len(bgpvpn_ids) != 0:
672 for bgpvpn_id in bgpvpn_ids:
673 delete_bgpvpn(neutron_client, bgpvpn_id)
675 if len(interfaces) != 0:
676 for router_id, subnet_id in interfaces:
677 if not os_utils.remove_interface_router(neutron_client,
678 router_id, subnet_id):
679 logger.error('Fail to delete all interface routers. '
680 'Interface router with id {} was not deleted.'.
683 if len(router_ids) != 0:
684 for router_id in router_ids:
685 if not os_utils.remove_gateway_router(neutron_client, router_id):
686 logger.error('Fail to delete all gateway routers. '
687 'Gateway router with id {} was not deleted.'.
690 if len(subnet_ids) != 0:
691 for subnet_id in subnet_ids:
692 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
693 logger.error('Fail to delete all subnets. '
694 'Subnet with id {} was not deleted.'.
698 if len(router_ids) != 0:
699 for router_id in router_ids:
700 if not os_utils.delete_neutron_router(neutron_client, router_id):
701 logger.error('Fail to delete all routers. '
702 'Router with id {} was not deleted.'.
706 if len(network_ids) != 0:
707 for network_id in network_ids:
708 if not os_utils.delete_neutron_net(neutron_client, network_id):
709 logger.error('Fail to delete all networks. '
710 'Network with id {} was not deleted.'.
716 def cleanup_nova(conn, instance_ids, flavor_ids=None):
717 if flavor_ids is not None and len(flavor_ids) != 0:
718 for flavor_id in flavor_ids:
719 conn.compute.delete_flavor(flavor_id)
720 if len(instance_ids) != 0:
721 for instance_id in instance_ids:
722 if not os_utils.delete_instance(conn, instance_id):
723 logger.error('Fail to delete all instances. '
724 'Instance with id {} was not deleted.'.
727 wait_for_instance_delete(conn, instance_id)
731 def cleanup_glance(conn, image_ids):
732 if len(image_ids) != 0:
733 for image_id in image_ids:
734 if not os_utils.delete_glance_image(conn, image_id):
735 logger.error('Fail to delete all images. '
736 'Image with id {} was not deleted.'.
742 def create_bgpvpn(neutron_client, **kwargs):
743 # route_distinguishers
745 json_body = {"bgpvpn": kwargs}
746 return neutron_client.create_bgpvpn(json_body)
749 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
750 json_body = {"bgpvpn": kwargs}
751 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
754 def delete_bgpvpn(neutron_client, bgpvpn_id):
755 return neutron_client.delete_bgpvpn(bgpvpn_id)
758 def get_bgpvpn(neutron_client, bgpvpn_id):
759 return neutron_client.show_bgpvpn(bgpvpn_id)
762 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
763 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
766 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
767 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
770 def create_router_association(neutron_client, bgpvpn_id, router_id):
771 json_body = {"router_association": {"router_id": router_id}}
772 return neutron_client.create_router_association(bgpvpn_id, json_body)
775 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
776 json_body = {"network_association": {"network_id": neutron_network_id}}
777 return neutron_client.create_network_association(bgpvpn_id, json_body)
780 def is_fail_mode_secure():
782 Checks the value of the attribute fail_mode,
783 if it is set to secure. This check is performed
784 on all OVS br-int interfaces, for all OpenStack nodes.
787 openstack_nodes = get_nodes()
788 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
791 # Define OVS get fail_mode command
792 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
793 for openstack_node in openstack_nodes:
794 if not openstack_node.is_active():
797 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
799 if 'br-int' in ovs_int_list:
800 # Execute get fail_mode command
801 br_int_fail_mode = (openstack_node.
802 run_cmd(get_ovs_fail_mode_cmd).strip())
803 if br_int_fail_mode == 'secure':
805 is_secure[openstack_node.name] = True
808 logger.error('The fail_mode for br-int was not secure '
809 'in {} node'.format(openstack_node.name))
810 is_secure[openstack_node.name] = False
814 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
815 subnet_quota, port_quota, router_quota):
816 json_body = {"quota": {
818 "subnet": subnet_quota,
820 "router": router_quota
824 neutron_client.update_quota(tenant_id=tenant_id,
827 except Exception as e:
828 logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
829 " '%s', '%s', '%s', '%s, %s')]: %s" %
830 (tenant_id, nw_quota, subnet_quota,
831 port_quota, router_quota, e))
835 def update_instance_quota_class(cloud, instances_quota):
837 cloud.set_compute_quotas('admin', instances=instances_quota)
839 except Exception as e:
840 logger.error("Error [update_instance_quota_class(compute,"
841 " '%s' )]: %s" % (instances_quota, e))
845 def get_neutron_quota(neutron_client, tenant_id):
847 return neutron_client.show_quota(tenant_id=tenant_id)['quota']
848 except Exception as e:
849 logger.error("Error in getting neutron quota for tenant "
850 " '%s' )]: %s" % (tenant_id, e))
854 def get_nova_instances_quota(cloud):
856 return cloud.get_compute_quotas('admin').instances
857 except Exception as e:
858 logger.error("Error in getting nova instances quota: %s" % e)
862 def update_router_extra_route(neutron_client, router_id, extra_routes):
863 if len(extra_routes) <= 0:
866 for extra_route in extra_routes:
867 route_dict = {'destination': extra_route.destination,
868 'nexthop': extra_route.nexthop}
869 routes_list.append(route_dict)
870 json_body = {'router': {
871 "routes": routes_list
875 neutron_client.update_router(router_id, body=json_body)
877 except Exception as e:
878 logger.error("Error in updating router with extra route: %s" % e)
882 def update_router_no_extra_route(neutron_client, router_ids):
883 json_body = {'router': {
887 for router_id in router_ids:
889 neutron_client.update_router(router_id, body=json_body)
891 except Exception as e:
892 logger.error("Error in clearing extra route: %s" % e)
895 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
897 Gets, as input, a list of compute nodes and a list of OVS bridges
898 and returns the command console output, as a list of lines, that
899 contains all the OVS groups from all bridges and nodes in lists.
902 for compute_node in compute_node_list:
903 for ovs_br in ovs_br_list:
904 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
905 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
906 "grep group".format(ovs_br, of_protocol))
907 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
912 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
914 Gets, as input, a list of compute nodes and a list of OVS bridges
915 and returns the command console output, as a list of lines, that
916 contains all the OVS flows from all bridges and nodes in lists.
919 for compute_node in compute_node_list:
920 for ovs_br in ovs_br_list:
921 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
922 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
923 "grep table=".format(ovs_br, of_protocol))
924 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
929 def get_odl_bgp_entity_owner(controllers):
930 """ Finds the ODL owner of the BGP entity in the cluster.
932 When ODL runs in clustering mode we need to execute the BGP speaker
933 related commands to that ODL which is the owner of the BGP entity.
935 :param controllers: list of OS controllers
936 :return controller: OS controller in which ODL BGP entity owner runs
938 if len(controllers) == 1:
939 return controllers[0]
941 url = ('http://admin:admin@{ip}:8081/restconf/'
942 'operational/entity-owners:entity-owners/entity-type/bgp'
943 .format(ip=controllers[0].ip))
945 remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
947 remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
948 local_tmp_akka_conf = '/tmp/akka.conf'
950 json_output = requests.get(url).json()
952 logger.error('Failed to find the ODL BGP '
953 'entity owner through REST')
955 odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
957 for controller in controllers:
959 controller.run_cmd('sudo cp {0} /home/heat-admin/'
960 .format(remote_odl_akka_conf))
961 controller.run_cmd('sudo chmod 777 {0}'
962 .format(remote_odl_home_akka_conf))
963 controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
965 for line in open(local_tmp_akka_conf):
966 if re.search(odl_bgp_owner, line):
971 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
972 json_body = {'input':
973 {'destination-ip': remote_tep_ip,
974 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
976 url = ('http://{ip}:8081/restconf/operations/'
977 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
978 headers = {'Content-type': 'application/yang.data+json',
979 'Accept': 'application/yang.data+json'}
981 requests.post(url, data=json.dumps(json_body),
983 auth=HTTPBasicAuth('admin', 'admin'))
984 except Exception as e:
985 logger.error("Failed to create external tunnel endpoint on"
986 " ODL for external tep ip %s with error %s"
987 % (remote_tep_ip, e))
991 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
992 url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
993 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
994 logger.error("url is %s" % url)
996 vrf_table = requests.get(url).json()
997 is_ipprefix_exists = False
998 for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
999 if vrf_entry['destPrefix'] == ip_prefix:
1000 is_ipprefix_exists = True
1002 return is_ipprefix_exists
1003 except Exception as e:
1004 logger.error('Failed to find ip prefix %s with error %s'