3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
20 from opnfv.deployment.factory import Factory as DeploymentFactory
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
25 logger = logging.getLogger('sdnvpn_test_utils')
27 common_config = sdnvpn_config.CommonConfig()
32 executor = ThreadPoolExecutor(5)
35 class ExtraRoute(object):
37 Class to represent extra route for a router
40 def __init__(self, destination, nexthop):
41 self.destination = destination
42 self.nexthop = nexthop
45 class AllowedAddressPair(object):
47 Class to represent allowed address pair for a neutron port
50 def __init__(self, ipaddress, macaddress):
51 self.ipaddress = ipaddress
52 self.macaddress = macaddress
55 def create_default_flavor():
56 return os_utils.get_or_create_flavor(common_config.default_flavor,
57 common_config.default_flavor_ram,
58 common_config.default_flavor_disk,
59 common_config.default_flavor_vcpus)
62 def create_custom_flavor():
63 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64 common_config.custom_flavor_ram,
65 common_config.custom_flavor_disk,
66 common_config.custom_flavor_vcpus)
69 def create_net(conn, name):
70 logger.debug("Creating network %s", name)
71 net_id = os_utils.create_neutron_net(conn, name)
74 "There has been a problem when creating the neutron network")
75 raise Exception("There has been a problem when creating"
76 " the neutron network {}".format(name))
80 def create_subnet(conn, name, cidr, net_id):
81 logger.debug("Creating subnet %s in network %s with cidr %s",
83 subnet_id = os_utils.create_neutron_subnet(conn,
89 "There has been a problem when creating the neutron subnet")
90 raise Exception("There has been a problem when creating"
91 " the neutron subnet {}".format(name))
95 def create_network(conn, net, subnet1, cidr1,
96 router, subnet2=None, cidr2=None):
97 """Network assoc won't work for networks/subnets created by this function.
98 It is an ODL limitation due to it handling routers as vpns.
99 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100 network_dic = os_utils.create_network_full(conn,
107 "There has been a problem when creating the neutron network")
108 raise Exception("There has been a problem when creating"
109 " the neutron network {}".format(net))
110 net_id = network_dic["net_id"]
111 subnet_id = network_dic["subnet_id"]
112 router_id = network_dic["router_id"]
114 if subnet2 is not None:
115 logger.debug("Creating and attaching a second subnet...")
116 subnet_id = os_utils.create_neutron_subnet(
117 conn, subnet2, cidr2, net_id)
120 "There has been a problem when creating the second subnet")
121 raise Exception("There has been a problem when creating"
122 " the second subnet {}".format(subnet2))
123 logger.debug("Subnet '%s' created successfully" % subnet_id)
124 return net_id, subnet_id, router_id
127 def get_port(conn, instance_id):
128 ports = os_utils.get_port_list(conn)
130 if port.device_id == instance_id:
135 def update_port_allowed_address_pairs(conn, port_id, address_pairs):
136 if len(address_pairs) <= 0:
138 allowed_address_pairs = []
139 for address_pair in address_pairs:
140 address_pair_dict = {'ip_address': address_pair.ipaddress,
141 'mac_address': address_pair.macaddress}
142 allowed_address_pairs.append(address_pair_dict)
145 port = conn.network.\
146 update_port(port_id, allowed_address_pairs=allowed_address_pairs)
148 except Exception as e:
149 logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
150 " %s" % (port_id, address_pairs, e))
154 def create_instance(conn,
166 if 'flavor' not in kwargs:
167 kwargs['flavor'] = common_config.default_flavor
169 logger.info("Creating instance '%s'..." % name)
171 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
172 " network=%s\n secgroup=%s \n hypervisor=%s \n"
173 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
174 % (name, kwargs['flavor'], image_id, network_id, sg_id,
175 compute_node, fixed_ip, files, userdata))
176 instance = os_utils.create_instance_and_wait_for_active(
183 av_zone=compute_node,
188 logger.error("Error while booting instance.")
189 raise Exception("Error while booting instance {}".format(name))
191 # Retrieve IP of INSTANCE
192 network_name = conn.network.get_network(network_id).name
193 instance_ip = conn.compute.get_server(instance).\
194 addresses.get(network_name)[0]['addr']
195 logger.debug("Instance '%s' booted successfully. IP='%s'." %
199 logger.debug("Adding '%s' to security group '%s'..."
200 % (name, secgroup_name))
202 logger.debug("Adding '%s' to security group '%s'..."
204 os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
209 def generate_ping_userdata(ips_array, ping_count=10):
212 ips = ("%s %s" % (ips, ip))
214 ips = ips.replace(' ', ' ')
215 return ("#!/bin/sh\n"
220 " ping -c %s $ip 2>&1 >/dev/null\n"
222 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
223 " echo ping $ip OK\n"
224 " else echo ping $ip KO\n"
232 def generate_userdata_common():
233 return ("#!/bin/sh\n"
234 "sudo mkdir -p /home/cirros/.ssh/\n"
235 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
236 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
237 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
238 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
239 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
240 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
241 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
242 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
243 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
244 "chmod 700 /home/cirros/.ssh\n"
245 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
246 "chmod 600 /home/cirros/.ssh/id_rsa\n"
250 def generate_userdata_with_ssh(ips_array):
251 u1 = generate_userdata_common()
255 ips = ("%s %s" % (ips, ip))
257 ips = ips.replace(' ', ' ')
263 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
264 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
266 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
267 " else echo $ip 'not reachable';fi;\n"
275 def generate_userdata_interface_create(interface_name, interface_number,
276 ip_Address, net_mask):
277 return ("#!/bin/sh\n"
279 "sudo useradd -m sdnvpn\n"
280 "sudo adduser sdnvpn sudo\n"
281 "sudo echo sdnvpn:opnfv | chpasswd\n"
283 "sudo ifconfig %s:%s %s netmask %s up\n"
284 % (interface_name, interface_number,
285 ip_Address, net_mask))
288 def get_installerHandler():
289 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
290 installer_ip = get_installer_ip()
292 if installer_type not in ["fuel", "apex"]:
293 logger.warn("installer type %s is neither fuel nor apex."
294 "returning None for installer handler" % installer_type)
297 if installer_type in ["apex"]:
298 developHandler = DeploymentFactory.get_handler(
302 pkey_file="/root/.ssh/id_rsa")
304 if installer_type in ["fuel"]:
305 developHandler = DeploymentFactory.get_handler(
310 return developHandler
314 developHandler = get_installerHandler()
315 return developHandler.get_nodes()
318 def get_installer_ip():
319 return str(os.environ['INSTALLER_IP'])
322 def get_instance_ip(conn, instance):
323 instance_ip = conn.compute.get_server(instance).\
324 addresses.values()[0][0]['addr']
328 def wait_for_instance(instance, pattern=".* login:", tries=40):
329 logger.info("Waiting for instance %s to boot up" % instance.id)
330 conn = os_utils.get_os_connection()
332 expected_regex = re.compile(pattern)
334 while tries > 0 and not expected_regex.search(console_log):
335 console_log = conn.compute.\
336 get_server_console_output(instance)['output']
337 time.sleep(sleep_time)
340 if not expected_regex.search(console_log):
341 logger.error("Instance %s does not boot up properly."
347 def wait_for_instances_up(*instances):
348 check = [wait_for_instance(instance) for instance in instances]
352 def wait_for_instances_get_dhcp(*instances):
353 check = [wait_for_instance(instance, "Lease of .* obtained")
354 for instance in instances]
358 def async_Wait_for_instances(instances, tries=40):
359 if len(instances) <= 0:
362 for instance in instances:
363 future = executor.submit(wait_for_instance,
367 futures.append(future)
369 for future in futures:
370 results.append(future.result())
372 logger.error("one or more instances is not yet booted up")
375 def wait_for_instance_delete(conn, instance_id, tries=30):
377 instances = [instance_id]
378 logger.debug("Waiting for instance %s to be deleted"
380 while tries > 0 and instance_id in instances:
381 instances = [instance.id for instance in
382 os_utils.get_instances(conn)]
383 time.sleep(sleep_time)
385 if instance_id in instances:
386 logger.error("Deletion of instance %s failed" %
390 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
394 logger.debug("Waiting for network %s to associate with BGPVPN %s "
395 % (bgpvpn_id, net_id))
397 while tries > 0 and net_id not in nets:
398 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
399 time.sleep(sleep_time)
401 if net_id not in nets:
402 logger.error("Association of network %s with BGPVPN %s failed" %
408 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
409 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
411 # Return True if all associations succeeded
415 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
419 logger.debug("Waiting for router %s to associate with BGPVPN %s "
420 % (bgpvpn_id, router_id))
421 while tries > 0 and router_id not in routers:
422 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
423 time.sleep(sleep_time)
425 if router_id not in routers:
426 logger.error("Association of router %s with BGPVPN %s failed" %
427 (router_id, bgpvpn_id))
432 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
433 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
435 # Return True if all associations succeeded
439 def wait_before_subtest(*args, **kwargs):
440 ''' This is a placeholder.
441 TODO: Replace delay with polling logic. '''
445 def assert_and_get_compute_nodes(conn, required_node_number=2):
446 """Get the compute nodes in the deployment
447 Exit if the deployment doesn't have enough compute nodes"""
448 compute_nodes = os_utils.get_hypervisors(conn)
450 num_compute_nodes = len(compute_nodes)
451 if num_compute_nodes < 2:
452 logger.error("There are %s compute nodes in the deployment. "
453 "Minimum number of nodes to complete the test is 2."
455 raise Exception("There are {} compute nodes in the deployment. "
456 "Minimum number of nodes to complete the test"
457 " is 2.".format(num_compute_nodes))
459 logger.debug("Compute nodes: %s" % compute_nodes)
463 def open_icmp(conn, security_group_id):
464 if os_utils.check_security_group_rules(conn,
469 if not os_utils.create_secgroup_rule(conn,
473 logger.error("Failed to create icmp security group rule...")
475 logger.info("This rule exists for security group: %s"
479 def open_http_port(conn, security_group_id):
480 if os_utils.check_security_group_rules(conn,
486 if not os_utils.create_secgroup_rule(conn,
492 logger.error("Failed to create http security group rule...")
494 logger.info("This rule exists for security group: %s"
498 def open_bgp_port(conn, security_group_id):
499 if os_utils.check_security_group_rules(conn,
505 if not os_utils.create_secgroup_rule(conn,
510 logger.error("Failed to create bgp security group rule...")
512 logger.info("This rule exists for security group: %s"
516 def exec_cmd(cmd, verbose):
518 logger.debug("Executing '%s'" % cmd)
519 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
520 stderr=subprocess.STDOUT)
522 for line in iter(p.stdout.readline, b''):
529 returncode = p.wait()
531 logger.error("Command %s failed to execute." % cmd)
534 return output, success
537 def check_odl_fib(ip, controller_ip):
538 """Check that there is an entry in the ODL Fib for `ip`"""
539 url = "http://" + controller_ip + \
540 ":8181/restconf/config/odl-fib:fibEntries/"
541 logger.debug("Querring '%s' for FIB entries", url)
542 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
543 if res.status_code != 200:
544 logger.error("OpenDaylight response status code: %s", res.status_code)
546 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
548 logger.debug("OpenDaylight FIB: \n%s" % res.text)
549 return ip in res.text
552 def run_odl_cmd(odl_node, cmd):
553 '''Run a command in the OpenDaylight Karaf shell
554 This is a bit flimsy because of shell quote escaping, make sure that
555 the cmd passed does not have any top level double quotes or this
557 The /dev/null is used because client works, but outputs something
558 that contains "ERROR" and run_cmd doesn't like that.
560 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
561 ' 2>/dev/null' % cmd)
562 return odl_node.run_cmd(karaf_cmd)
565 def wait_for_cloud_init(conn, instance):
567 # ubuntu images take a long time to start
570 logger.info("Waiting for cloud init of instance: {}"
571 "".format(instance.name))
573 instance_log = conn.compute.\
574 get_server_console_output(instance)['output']
575 if "Failed to run module" in instance_log:
577 logger.error("Cloud init failed to run. Reason: %s",
580 if re.search(r"Cloud-init v. .+ finished at", instance_log):
583 time.sleep(sleep_time)
587 logger.error("Cloud init timed out"
591 logger.info("Finished waiting for cloud init of instance {} result was {}"
592 "".format(instance.name, success))
596 def attach_instance_to_ext_br(instance, compute_node):
597 libvirt_instance_name = instance.instance_name
598 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
599 if installer_type == "fuel":
601 elif installer_type == "apex":
602 # In Apex, br-ex is an ovs bridge and virsh attach-interface
603 # won't just work. We work around it by creating a linux
604 # bridge, attaching that to br-ex with a veth pair
605 # and virsh-attaching the instance to the linux-bridge
609 if ! sudo brctl show |grep -q ^{bridge};then
610 sudo brctl addbr {bridge}
611 sudo ip link set {bridge} up
612 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
613 sudo ip link set dev ovs-quagga-tap up
614 sudo ip link set dev quagga-tap up
615 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
616 sudo brctl addif {bridge} quagga-tap
619 compute_node.run_cmd(cmd.format(bridge=bridge))
621 compute_node.run_cmd("sudo virsh attach-interface %s"
622 " bridge %s" % (libvirt_instance_name, bridge))
625 def detach_instance_from_ext_br(instance, compute_node):
626 libvirt_instance_name = instance.instance_name
627 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
628 "grep running | awk '{print $2}'); "
629 "do echo -n ; sudo virsh dumpxml $vm| "
630 "grep -oP '52:54:[\da-f:]+' ;done")
631 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
632 " --type bridge --mac %s"
633 % (libvirt_instance_name, mac))
635 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
636 if installer_type == "fuel":
638 elif installer_type == "apex":
639 # In Apex, br-ex is an ovs bridge and virsh attach-interface
640 # won't just work. We work around it by creating a linux
641 # bridge, attaching that to br-ex with a veth pair
642 # and virsh-attaching the instance to the linux-bridge
645 sudo brctl delif {bridge} quagga-tap &&
646 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
647 sudo ip link set dev quagga-tap down &&
648 sudo ip link set dev ovs-quagga-tap down &&
649 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
650 sudo ip link set {bridge} down &&
651 sudo brctl delbr {bridge}
653 compute_node.run_cmd(cmd.format(bridge=bridge))
656 def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
657 interfaces, subnet_ids, router_ids, network_ids):
658 if len(floatingip_ids) != 0:
659 for floatingip_id in floatingip_ids:
660 if not os_utils.delete_floating_ip(conn, floatingip_id):
661 logger.error('Fail to delete all floating ips. '
662 'Floating ip with id {} was not deleted.'.
663 format(floatingip_id))
666 if len(bgpvpn_ids) != 0:
667 for bgpvpn_id in bgpvpn_ids:
668 delete_bgpvpn(neutron_client, bgpvpn_id)
670 if len(interfaces) != 0:
671 for router_id, subnet_id in interfaces:
672 if not os_utils.remove_interface_router(conn,
673 router_id, subnet_id):
674 logger.error('Fail to delete all interface routers. '
675 'Interface router with id {} was not deleted.'.
678 if len(router_ids) != 0:
679 for router_id in router_ids:
680 if not os_utils.remove_gateway_router(conn, router_id):
681 logger.error('Fail to delete all gateway routers. '
682 'Gateway router with id {} was not deleted.'.
685 if len(subnet_ids) != 0:
686 for subnet_id in subnet_ids:
687 if not os_utils.delete_neutron_subnet(conn, subnet_id):
688 logger.error('Fail to delete all subnets. '
689 'Subnet with id {} was not deleted.'.
693 if len(router_ids) != 0:
694 for router_id in router_ids:
695 if not os_utils.delete_neutron_router(conn, router_id):
696 logger.error('Fail to delete all routers. '
697 'Router with id {} was not deleted.'.
701 if len(network_ids) != 0:
702 for network_id in network_ids:
703 if not os_utils.delete_neutron_net(conn, network_id):
704 logger.error('Fail to delete all networks. '
705 'Network with id {} was not deleted.'.
711 def cleanup_nova(conn, instance_ids, flavor_ids=None):
712 if flavor_ids is not None and len(flavor_ids) != 0:
713 for flavor_id in flavor_ids:
714 conn.compute.delete_flavor(flavor_id)
715 if len(instance_ids) != 0:
716 for instance_id in instance_ids:
717 if not os_utils.delete_instance(conn, instance_id):
718 logger.error('Fail to delete all instances. '
719 'Instance with id {} was not deleted.'.
722 wait_for_instance_delete(conn, instance_id)
726 def cleanup_glance(conn, image_ids):
727 if len(image_ids) != 0:
728 for image_id in image_ids:
729 if not os_utils.delete_glance_image(conn, image_id):
730 logger.error('Fail to delete all images. '
731 'Image with id {} was not deleted.'.
737 def create_bgpvpn(neutron_client, **kwargs):
738 # route_distinguishers
740 json_body = {"bgpvpn": kwargs}
741 return neutron_client.create_bgpvpn(json_body)
744 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
745 json_body = {"bgpvpn": kwargs}
746 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
749 def delete_bgpvpn(neutron_client, bgpvpn_id):
750 return neutron_client.delete_bgpvpn(bgpvpn_id)
753 def get_bgpvpn(neutron_client, bgpvpn_id):
754 return neutron_client.show_bgpvpn(bgpvpn_id)
757 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
758 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
761 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
762 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
765 def create_router_association(neutron_client, bgpvpn_id, router_id):
766 json_body = {"router_association": {"router_id": router_id}}
767 return neutron_client.create_router_association(bgpvpn_id, json_body)
770 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
771 json_body = {"network_association": {"network_id": neutron_network_id}}
772 return neutron_client.create_network_association(bgpvpn_id, json_body)
775 def is_fail_mode_secure():
777 Checks the value of the attribute fail_mode,
778 if it is set to secure. This check is performed
779 on all OVS br-int interfaces, for all OpenStack nodes.
782 openstack_nodes = get_nodes()
783 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
786 # Define OVS get fail_mode command
787 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
788 for openstack_node in openstack_nodes:
789 if not openstack_node.is_active():
792 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
794 if 'br-int' in ovs_int_list:
795 # Execute get fail_mode command
796 br_int_fail_mode = (openstack_node.
797 run_cmd(get_ovs_fail_mode_cmd).strip())
798 if br_int_fail_mode == 'secure':
800 is_secure[openstack_node.name] = True
803 logger.error('The fail_mode for br-int was not secure '
804 'in {} node'.format(openstack_node.name))
805 is_secure[openstack_node.name] = False
809 def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
810 subnet_quota, port_quota, router_quota):
812 conn.network.update_quota(tenant_id, networks=nw_quota,
813 subnets=subnet_quota, ports=port_quota,
814 routers=router_quota)
816 except Exception as e:
817 logger.error("Error [update_nw_subnet_port_quota(network,"
818 " '%s', '%s', '%s', '%s, %s')]: %s" %
819 (tenant_id, nw_quota, subnet_quota,
820 port_quota, router_quota, e))
824 def update_instance_quota_class(cloud, instances_quota):
826 cloud.set_compute_quotas('admin', instances=instances_quota)
828 except Exception as e:
829 logger.error("Error [update_instance_quota_class(compute,"
830 " '%s' )]: %s" % (instances_quota, e))
834 def get_neutron_quota(conn, tenant_id):
836 return conn.network.quotas(project_id=tenant_id).next()
837 except Exception as e:
838 logger.error("Error in getting network quota for tenant "
839 " '%s' )]: %s" % (tenant_id, e))
843 def get_nova_instances_quota(cloud):
845 return cloud.get_compute_quotas('admin').instances
846 except Exception as e:
847 logger.error("Error in getting nova instances quota: %s" % e)
851 def update_router_extra_route(conn, router_id, extra_routes):
852 if len(extra_routes) <= 0:
855 for extra_route in extra_routes:
856 route_dict = {'destination': extra_route.destination,
857 'nexthop': extra_route.nexthop}
858 routes_list.append(route_dict)
861 conn.network.update_router(router_id, routes=routes_list)
863 except Exception as e:
864 logger.error("Error in updating router with extra route: %s" % e)
868 def update_router_no_extra_route(conn, router_ids):
869 for router_id in router_ids:
871 conn.network.update_router(router_id, routes=[])
873 except Exception as e:
874 logger.error("Error in clearing extra route: %s" % e)
877 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
879 Gets, as input, a list of compute nodes and a list of OVS bridges
880 and returns the command console output, as a list of lines, that
881 contains all the OVS groups from all bridges and nodes in lists.
884 for compute_node in compute_node_list:
885 for ovs_br in ovs_br_list:
886 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
887 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
888 "grep group".format(ovs_br, of_protocol))
889 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
894 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
896 Gets, as input, a list of compute nodes and a list of OVS bridges
897 and returns the command console output, as a list of lines, that
898 contains all the OVS flows from all bridges and nodes in lists.
901 for compute_node in compute_node_list:
902 for ovs_br in ovs_br_list:
903 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
904 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
905 "grep table=".format(ovs_br, of_protocol))
906 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
911 def get_odl_bgp_entity_owner(controllers):
912 """ Finds the ODL owner of the BGP entity in the cluster.
914 When ODL runs in clustering mode we need to execute the BGP speaker
915 related commands to that ODL which is the owner of the BGP entity.
917 :param controllers: list of OS controllers
918 :return controller: OS controller in which ODL BGP entity owner runs
920 if len(controllers) == 1:
921 return controllers[0]
923 url = ('http://admin:admin@{ip}:8081/restconf/'
924 'operational/entity-owners:entity-owners/entity-type/bgp'
925 .format(ip=controllers[0].ip))
927 remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
929 remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
930 local_tmp_akka_conf = '/tmp/akka.conf'
932 json_output = requests.get(url).json()
934 logger.error('Failed to find the ODL BGP '
935 'entity owner through REST')
937 odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
939 for controller in controllers:
941 controller.run_cmd('sudo cp {0} /home/heat-admin/'
942 .format(remote_odl_akka_conf))
943 controller.run_cmd('sudo chmod 777 {0}'
944 .format(remote_odl_home_akka_conf))
945 controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
947 for line in open(local_tmp_akka_conf):
948 if re.search(odl_bgp_owner, line):
953 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
954 json_body = {'input':
955 {'destination-ip': remote_tep_ip,
956 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
958 url = ('http://{ip}:8081/restconf/operations/'
959 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
960 headers = {'Content-type': 'application/yang.data+json',
961 'Accept': 'application/yang.data+json'}
963 requests.post(url, data=json.dumps(json_body),
965 auth=HTTPBasicAuth('admin', 'admin'))
966 except Exception as e:
967 logger.error("Failed to create external tunnel endpoint on"
968 " ODL for external tep ip %s with error %s"
969 % (remote_tep_ip, e))
973 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
974 url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
975 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
976 logger.error("url is %s" % url)
978 vrf_table = requests.get(url).json()
979 is_ipprefix_exists = False
980 for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
981 if vrf_entry['destPrefix'] == ip_prefix:
982 is_ipprefix_exists = True
984 return is_ipprefix_exists
985 except Exception as e:
986 logger.error('Failed to find ip prefix %s with error %s'