3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
20 from opnfv.deployment.factory import Factory as DeploymentFactory
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
25 logger = logging.getLogger('sdnvpn_test_utils')
27 common_config = sdnvpn_config.CommonConfig()
32 executor = ThreadPoolExecutor(5)
35 class ExtraRoute(object):
37 Class to represent extra route for a router
40 def __init__(self, destination, nexthop):
41 self.destination = destination
42 self.nexthop = nexthop
45 class AllowedAddressPair(object):
47 Class to represent allowed address pair for a neutron port
50 def __init__(self, ipaddress, macaddress):
51 self.ipaddress = ipaddress
52 self.macaddress = macaddress
55 def create_default_flavor():
56 return os_utils.get_or_create_flavor(common_config.default_flavor,
57 common_config.default_flavor_ram,
58 common_config.default_flavor_disk,
59 common_config.default_flavor_vcpus)
62 def create_custom_flavor():
63 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64 common_config.custom_flavor_ram,
65 common_config.custom_flavor_disk,
66 common_config.custom_flavor_vcpus)
69 def create_net(neutron_client, name):
70 logger.debug("Creating network %s", name)
71 net_id = os_utils.create_neutron_net(neutron_client, name)
74 "There has been a problem when creating the neutron network")
75 raise Exception("There has been a problem when creating"
76 " the neutron network {}".format(name))
80 def create_subnet(neutron_client, name, cidr, net_id):
81 logger.debug("Creating subnet %s in network %s with cidr %s",
83 subnet_id = os_utils.create_neutron_subnet(neutron_client,
89 "There has been a problem when creating the neutron subnet")
90 raise Exception("There has been a problem when creating"
91 " the neutron subnet {}".format(name))
95 def create_network(neutron_client, net, subnet1, cidr1,
96 router, subnet2=None, cidr2=None):
97 """Network assoc won't work for networks/subnets created by this function.
98 It is an ODL limitation due to it handling routers as vpns.
99 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100 network_dic = os_utils.create_network_full(neutron_client,
107 "There has been a problem when creating the neutron network")
108 raise Exception("There has been a problem when creating"
109 " the neutron network {}".format(net))
110 net_id = network_dic["net_id"]
111 subnet_id = network_dic["subnet_id"]
112 router_id = network_dic["router_id"]
114 if subnet2 is not None:
115 logger.debug("Creating and attaching a second subnet...")
116 subnet_id = os_utils.create_neutron_subnet(
117 neutron_client, subnet2, cidr2, net_id)
120 "There has been a problem when creating the second subnet")
121 raise Exception("There has been a problem when creating"
122 " the second subnet {}".format(subnet2))
123 logger.debug("Subnet '%s' created successfully" % subnet_id)
124 return net_id, subnet_id, router_id
127 def get_port(neutron_client, instance_id):
128 ports = os_utils.get_port_list(neutron_client)
129 if ports is not None:
131 if port['device_id'] == instance_id:
136 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
137 if len(address_pairs) <= 0:
139 allowed_address_pairs = []
140 for address_pair in address_pairs:
141 address_pair_dict = {'ip_address': address_pair.ipaddress,
142 'mac_address': address_pair.macaddress}
143 allowed_address_pairs.append(address_pair_dict)
144 json_body = {'port': {
145 "allowed_address_pairs": allowed_address_pairs
149 port = neutron_client.update_port(port=port_id,
151 return port['port']['id']
152 except Exception as e:
153 logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
154 " %s" % (port_id, address_pairs, e))
158 def create_instance(nova_client,
170 if 'flavor' not in kwargs:
171 kwargs['flavor'] = common_config.default_flavor
173 logger.info("Creating instance '%s'..." % name)
175 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176 " network=%s\n secgroup=%s \n hypervisor=%s \n"
177 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178 % (name, kwargs['flavor'], image_id, network_id, sg_id,
179 compute_node, fixed_ip, files, userdata))
180 instance = os_utils.create_instance_and_wait_for_active(
187 av_zone=compute_node,
192 logger.error("Error while booting instance.")
193 raise Exception("Error while booting instance {}".format(name))
195 logger.debug("Instance '%s' booted successfully. IP='%s'." %
196 (name, instance.networks.itervalues().next()[0]))
197 # Retrieve IP of INSTANCE
198 # instance_ip = instance.networks.get(network_id)[0]
201 logger.debug("Adding '%s' to security group '%s'..."
202 % (name, secgroup_name))
204 logger.debug("Adding '%s' to security group '%s'..."
206 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
211 def generate_ping_userdata(ips_array, ping_count=10):
214 ips = ("%s %s" % (ips, ip))
216 ips = ips.replace(' ', ' ')
217 return ("#!/bin/sh\n"
222 " ping -c %s $ip 2>&1 >/dev/null\n"
224 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
225 " echo ping $ip OK\n"
226 " else echo ping $ip KO\n"
234 def generate_userdata_common():
235 return ("#!/bin/sh\n"
236 "sudo mkdir -p /home/cirros/.ssh/\n"
237 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
238 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
239 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
240 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
241 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
242 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
243 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
244 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
245 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
246 "chmod 700 /home/cirros/.ssh\n"
247 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
248 "chmod 600 /home/cirros/.ssh/id_rsa\n"
252 def generate_userdata_with_ssh(ips_array):
253 u1 = generate_userdata_common()
257 ips = ("%s %s" % (ips, ip))
259 ips = ips.replace(' ', ' ')
265 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
266 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
268 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
269 " else echo $ip 'not reachable';fi;\n"
277 def generate_userdata_interface_create(interface_name, interface_number,
278 ip_Address, net_mask):
279 return ("#!/bin/sh\n"
281 "sudo useradd -m sdnvpn\n"
282 "sudo adduser sdnvpn sudo\n"
283 "sudo echo sdnvpn:opnfv | chpasswd\n"
285 "sudo ifconfig %s:%s %s netmask %s up\n"
286 % (interface_name, interface_number,
287 ip_Address, net_mask))
290 def get_installerHandler():
291 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
292 installer_ip = get_installer_ip()
294 if installer_type not in ["fuel", "apex"]:
295 logger.warn("installer type %s is neither fuel nor apex."
296 "returning None for installer handler" % installer_type)
299 if installer_type in ["apex"]:
300 developHandler = DeploymentFactory.get_handler(
304 pkey_file="/root/.ssh/id_rsa")
306 if installer_type in ["fuel"]:
307 developHandler = DeploymentFactory.get_handler(
312 return developHandler
316 developHandler = get_installerHandler()
317 return developHandler.get_nodes()
320 def get_installer_ip():
321 return str(os.environ['INSTALLER_IP'])
324 def get_instance_ip(instance):
325 instance_ip = instance.networks.itervalues().next()[0]
329 def wait_for_instance(instance, pattern=".* login:", tries=40):
330 logger.info("Waiting for instance %s to boot up" % instance.id)
332 expected_regex = re.compile(pattern)
334 while tries > 0 and not expected_regex.search(console_log):
335 console_log = instance.get_console_output()
336 time.sleep(sleep_time)
339 if not expected_regex.search(console_log):
340 logger.error("Instance %s does not boot up properly."
346 def wait_for_instances_up(*instances):
347 check = [wait_for_instance(instance) for instance in instances]
351 def wait_for_instances_get_dhcp(*instances):
352 check = [wait_for_instance(instance, "Lease of .* obtained")
353 for instance in instances]
357 def async_Wait_for_instances(instances, tries=40):
358 if len(instances) <= 0:
361 for instance in instances:
362 future = executor.submit(wait_for_instance,
366 futures.append(future)
368 for future in futures:
369 results.append(future.result())
371 logger.error("one or more instances is not yet booted up")
374 def wait_for_instance_delete(nova_client, instance_id, tries=30):
376 instances = [instance_id]
377 logger.debug("Waiting for instance %s to be deleted"
379 while tries > 0 and instance_id in instances:
380 instances = [instance.id for instance in
381 os_utils.get_instances(nova_client)]
382 time.sleep(sleep_time)
384 if instance_id in instances:
385 logger.error("Deletion of instance %s failed" %
389 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
393 logger.debug("Waiting for network %s to associate with BGPVPN %s "
394 % (bgpvpn_id, net_id))
396 while tries > 0 and net_id not in nets:
397 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
398 time.sleep(sleep_time)
400 if net_id not in nets:
401 logger.error("Association of network %s with BGPVPN %s failed" %
407 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
408 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
410 # Return True if all associations succeeded
414 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
418 logger.debug("Waiting for router %s to associate with BGPVPN %s "
419 % (bgpvpn_id, router_id))
420 while tries > 0 and router_id not in routers:
421 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
422 time.sleep(sleep_time)
424 if router_id not in routers:
425 logger.error("Association of router %s with BGPVPN %s failed" %
426 (router_id, bgpvpn_id))
431 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
432 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
434 # Return True if all associations succeeded
438 def wait_before_subtest(*args, **kwargs):
439 ''' This is a placeholder.
440 TODO: Replace delay with polling logic. '''
444 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
445 """Get the compute nodes in the deployment
446 Exit if the deployment doesn't have enough compute nodes"""
447 compute_nodes = os_utils.get_hypervisors(nova_client)
449 num_compute_nodes = len(compute_nodes)
450 if num_compute_nodes < 2:
451 logger.error("There are %s compute nodes in the deployment. "
452 "Minimum number of nodes to complete the test is 2."
454 raise Exception("There are {} compute nodes in the deployment. "
455 "Minimum number of nodes to complete the test"
456 " is 2.".format(num_compute_nodes))
458 logger.debug("Compute nodes: %s" % compute_nodes)
462 def open_icmp(neutron_client, security_group_id):
463 if os_utils.check_security_group_rules(neutron_client,
468 if not os_utils.create_secgroup_rule(neutron_client,
472 logger.error("Failed to create icmp security group rule...")
474 logger.info("This rule exists for security group: %s"
478 def open_http_port(neutron_client, security_group_id):
479 if os_utils.check_security_group_rules(neutron_client,
485 if not os_utils.create_secgroup_rule(neutron_client,
491 logger.error("Failed to create http security group rule...")
493 logger.info("This rule exists for security group: %s"
497 def open_bgp_port(neutron_client, security_group_id):
498 if os_utils.check_security_group_rules(neutron_client,
504 if not os_utils.create_secgroup_rule(neutron_client,
509 logger.error("Failed to create bgp security group rule...")
511 logger.info("This rule exists for security group: %s"
515 def exec_cmd(cmd, verbose):
517 logger.debug("Executing '%s'" % cmd)
518 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
519 stderr=subprocess.STDOUT)
521 for line in iter(p.stdout.readline, b''):
528 returncode = p.wait()
530 logger.error("Command %s failed to execute." % cmd)
533 return output, success
536 def check_odl_fib(ip, controller_ip):
537 """Check that there is an entry in the ODL Fib for `ip`"""
538 url = "http://" + controller_ip + \
539 ":8181/restconf/config/odl-fib:fibEntries/"
540 logger.debug("Querring '%s' for FIB entries", url)
541 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
542 if res.status_code != 200:
543 logger.error("OpenDaylight response status code: %s", res.status_code)
545 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
547 logger.debug("OpenDaylight FIB: \n%s" % res.text)
548 return ip in res.text
551 def run_odl_cmd(odl_node, cmd):
552 '''Run a command in the OpenDaylight Karaf shell
553 This is a bit flimsy because of shell quote escaping, make sure that
554 the cmd passed does not have any top level double quotes or this
556 The /dev/null is used because client works, but outputs something
557 that contains "ERROR" and run_cmd doesn't like that.
559 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
560 ' 2>/dev/null' % cmd)
561 return odl_node.run_cmd(karaf_cmd)
564 def wait_for_cloud_init(instance):
566 # ubuntu images take a long time to start
569 logger.info("Waiting for cloud init of instance: {}"
570 "".format(instance.name))
572 instance_log = instance.get_console_output()
573 if "Failed to run module" in instance_log:
575 logger.error("Cloud init failed to run. Reason: %s",
578 if re.search(r"Cloud-init v. .+ finished at", instance_log):
581 time.sleep(sleep_time)
585 logger.error("Cloud init timed out"
589 logger.info("Finished waiting for cloud init of instance {} result was {}"
590 "".format(instance.name, success))
594 def attach_instance_to_ext_br(instance, compute_node):
595 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
596 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
597 if installer_type == "fuel":
599 elif installer_type == "apex":
600 # In Apex, br-ex is an ovs bridge and virsh attach-interface
601 # won't just work. We work around it by creating a linux
602 # bridge, attaching that to br-ex with a veth pair
603 # and virsh-attaching the instance to the linux-bridge
607 if ! sudo brctl show |grep -q ^{bridge};then
608 sudo brctl addbr {bridge}
609 sudo ip link set {bridge} up
610 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
611 sudo ip link set dev ovs-quagga-tap up
612 sudo ip link set dev quagga-tap up
613 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
614 sudo brctl addif {bridge} quagga-tap
617 compute_node.run_cmd(cmd.format(bridge=bridge))
619 compute_node.run_cmd("sudo virsh attach-interface %s"
620 " bridge %s" % (libvirt_instance_name, bridge))
623 def detach_instance_from_ext_br(instance, compute_node):
624 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
625 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
626 "grep running | awk '{print $2}'); "
627 "do echo -n ; sudo virsh dumpxml $vm| "
628 "grep -oP '52:54:[\da-f:]+' ;done")
629 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
630 " --type bridge --mac %s"
631 % (libvirt_instance_name, mac))
633 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
634 if installer_type == "fuel":
636 elif installer_type == "apex":
637 # In Apex, br-ex is an ovs bridge and virsh attach-interface
638 # won't just work. We work around it by creating a linux
639 # bridge, attaching that to br-ex with a veth pair
640 # and virsh-attaching the instance to the linux-bridge
643 sudo brctl delif {bridge} quagga-tap &&
644 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
645 sudo ip link set dev quagga-tap down &&
646 sudo ip link set dev ovs-quagga-tap down &&
647 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
648 sudo ip link set {bridge} down &&
649 sudo brctl delbr {bridge}
651 compute_node.run_cmd(cmd.format(bridge=bridge))
654 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
655 subnet_ids, router_ids, network_ids):
657 if len(floatingip_ids) != 0:
658 for floatingip_id in floatingip_ids:
659 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
660 logger.error('Fail to delete all floating ips. '
661 'Floating ip with id {} was not deleted.'.
662 format(floatingip_id))
665 if len(bgpvpn_ids) != 0:
666 for bgpvpn_id in bgpvpn_ids:
667 delete_bgpvpn(neutron_client, bgpvpn_id)
669 if len(interfaces) != 0:
670 for router_id, subnet_id in interfaces:
671 if not os_utils.remove_interface_router(neutron_client,
672 router_id, subnet_id):
673 logger.error('Fail to delete all interface routers. '
674 'Interface router with id {} was not deleted.'.
677 if len(router_ids) != 0:
678 for router_id in router_ids:
679 if not os_utils.remove_gateway_router(neutron_client, router_id):
680 logger.error('Fail to delete all gateway routers. '
681 'Gateway router with id {} was not deleted.'.
684 if len(subnet_ids) != 0:
685 for subnet_id in subnet_ids:
686 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
687 logger.error('Fail to delete all subnets. '
688 'Subnet with id {} was not deleted.'.
692 if len(router_ids) != 0:
693 for router_id in router_ids:
694 if not os_utils.delete_neutron_router(neutron_client, router_id):
695 logger.error('Fail to delete all routers. '
696 'Router with id {} was not deleted.'.
700 if len(network_ids) != 0:
701 for network_id in network_ids:
702 if not os_utils.delete_neutron_net(neutron_client, network_id):
703 logger.error('Fail to delete all networks. '
704 'Network with id {} was not deleted.'.
710 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
711 if flavor_ids is not None and len(flavor_ids) != 0:
712 for flavor_id in flavor_ids:
713 nova_client.flavors.delete(flavor_id)
714 if len(instance_ids) != 0:
715 for instance_id in instance_ids:
716 if not os_utils.delete_instance(nova_client, instance_id):
717 logger.error('Fail to delete all instances. '
718 'Instance with id {} was not deleted.'.
721 wait_for_instance_delete(nova_client, instance_id)
725 def cleanup_glance(glance_client, image_ids):
726 if len(image_ids) != 0:
727 for image_id in image_ids:
728 if not os_utils.delete_glance_image(glance_client, image_id):
729 logger.error('Fail to delete all images. '
730 'Image with id {} was not deleted.'.
736 def create_bgpvpn(neutron_client, **kwargs):
737 # route_distinguishers
739 json_body = {"bgpvpn": kwargs}
740 return neutron_client.create_bgpvpn(json_body)
743 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
744 json_body = {"bgpvpn": kwargs}
745 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
748 def delete_bgpvpn(neutron_client, bgpvpn_id):
749 return neutron_client.delete_bgpvpn(bgpvpn_id)
752 def get_bgpvpn(neutron_client, bgpvpn_id):
753 return neutron_client.show_bgpvpn(bgpvpn_id)
756 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
757 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
760 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
761 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
764 def create_router_association(neutron_client, bgpvpn_id, router_id):
765 json_body = {"router_association": {"router_id": router_id}}
766 return neutron_client.create_router_association(bgpvpn_id, json_body)
769 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
770 json_body = {"network_association": {"network_id": neutron_network_id}}
771 return neutron_client.create_network_association(bgpvpn_id, json_body)
774 def is_fail_mode_secure():
776 Checks the value of the attribute fail_mode,
777 if it is set to secure. This check is performed
778 on all OVS br-int interfaces, for all OpenStack nodes.
781 openstack_nodes = get_nodes()
782 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
785 # Define OVS get fail_mode command
786 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
787 for openstack_node in openstack_nodes:
788 if not openstack_node.is_active():
791 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
793 if 'br-int' in ovs_int_list:
794 # Execute get fail_mode command
795 br_int_fail_mode = (openstack_node.
796 run_cmd(get_ovs_fail_mode_cmd).strip())
797 if br_int_fail_mode == 'secure':
799 is_secure[openstack_node.name] = True
802 logger.error('The fail_mode for br-int was not secure '
803 'in {} node'.format(openstack_node.name))
804 is_secure[openstack_node.name] = False
808 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
809 subnet_quota, port_quota, router_quota):
810 json_body = {"quota": {
812 "subnet": subnet_quota,
814 "router": router_quota
818 neutron_client.update_quota(tenant_id=tenant_id,
821 except Exception as e:
822 logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
823 " '%s', '%s', '%s', '%s, %s')]: %s" %
824 (tenant_id, nw_quota, subnet_quota,
825 port_quota, router_quota, e))
829 def update_instance_quota_class(nova_client, instances_quota):
831 nova_client.quota_classes.update("default", instances=instances_quota)
833 except Exception as e:
834 logger.error("Error [update_instance_quota_class(nova_client,"
835 " '%s' )]: %s" % (instances_quota, e))
839 def get_neutron_quota(neutron_client, tenant_id):
841 return neutron_client.show_quota(tenant_id=tenant_id)['quota']
842 except Exception as e:
843 logger.error("Error in getting neutron quota for tenant "
844 " '%s' )]: %s" % (tenant_id, e))
848 def get_nova_instances_quota(nova_client):
850 return nova_client.quota_classes.get("default").instances
851 except Exception as e:
852 logger.error("Error in getting nova instances quota: %s" % e)
856 def update_router_extra_route(neutron_client, router_id, extra_routes):
857 if len(extra_routes) <= 0:
860 for extra_route in extra_routes:
861 route_dict = {'destination': extra_route.destination,
862 'nexthop': extra_route.nexthop}
863 routes_list.append(route_dict)
864 json_body = {'router': {
865 "routes": routes_list
869 neutron_client.update_router(router_id, body=json_body)
871 except Exception as e:
872 logger.error("Error in updating router with extra route: %s" % e)
876 def update_router_no_extra_route(neutron_client, router_ids):
877 json_body = {'router': {
881 for router_id in router_ids:
883 neutron_client.update_router(router_id, body=json_body)
885 except Exception as e:
886 logger.error("Error in clearing extra route: %s" % e)
889 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
891 Gets, as input, a list of compute nodes and a list of OVS bridges
892 and returns the command console output, as a list of lines, that
893 contains all the OVS groups from all bridges and nodes in lists.
896 for compute_node in compute_node_list:
897 for ovs_br in ovs_br_list:
898 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
899 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
900 "grep group".format(ovs_br, of_protocol))
901 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
906 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
908 Gets, as input, a list of compute nodes and a list of OVS bridges
909 and returns the command console output, as a list of lines, that
910 contains all the OVS flows from all bridges and nodes in lists.
913 for compute_node in compute_node_list:
914 for ovs_br in ovs_br_list:
915 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
916 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
917 "grep table=".format(ovs_br, of_protocol))
918 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
923 def get_odl_bgp_entity_owner(controllers):
924 """ Finds the ODL owner of the BGP entity in the cluster.
926 When ODL runs in clustering mode we need to execute the BGP speaker
927 related commands to that ODL which is the owner of the BGP entity.
929 :param controllers: list of OS controllers
930 :return controller: OS controller in which ODL BGP entity owner runs
932 if len(controllers) == 1:
933 return controllers[0]
935 url = ('http://admin:admin@{ip}:8081/restconf/'
936 'operational/entity-owners:entity-owners/entity-type/bgp'
937 .format(ip=controllers[0].ip))
939 remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
941 remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
942 local_tmp_akka_conf = '/tmp/akka.conf'
944 json_output = requests.get(url).json()
946 logger.error('Failed to find the ODL BGP '
947 'entity owner through REST')
949 odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
951 for controller in controllers:
953 controller.run_cmd('sudo cp {0} /home/heat-admin/'
954 .format(remote_odl_akka_conf))
955 controller.run_cmd('sudo chmod 777 {0}'
956 .format(remote_odl_home_akka_conf))
957 controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
959 for line in open(local_tmp_akka_conf):
960 if re.search(odl_bgp_owner, line):
965 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
966 json_body = {'input':
967 {'destination-ip': remote_tep_ip,
968 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
970 url = ('http://{ip}:8081/restconf/operations/'
971 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
972 headers = {'Content-type': 'application/yang.data+json',
973 'Accept': 'application/yang.data+json'}
975 requests.post(url, data=json.dumps(json_body),
977 auth=HTTPBasicAuth('admin', 'admin'))
978 except Exception as e:
979 logger.error("Failed to create external tunnel endpoint on"
980 " ODL for external tep ip %s with error %s"
981 % (remote_tep_ip, e))
985 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
986 url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
987 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
988 logger.error("url is %s" % url)
990 vrf_table = requests.get(url).json()
991 is_ipprefix_exists = False
992 for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
993 if vrf_entry['destPrefix'] == ip_prefix:
994 is_ipprefix_exists = True
996 return is_ipprefix_exists
997 except Exception as e:
998 logger.error('Failed to find ip prefix %s with error %s'