3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
20 from opnfv.deployment.factory import Factory as DeploymentFactory
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
25 logger = logging.getLogger('sdnvpn_test_utils')
27 common_config = sdnvpn_config.CommonConfig()
32 executor = ThreadPoolExecutor(5)
35 class ExtraRoute(object):
37 Class to represent extra route for a router
40 def __init__(self, destination, nexthop):
41 self.destination = destination
42 self.nexthop = nexthop
45 class AllowedAddressPair(object):
47 Class to represent allowed address pair for a neutron port
50 def __init__(self, ipaddress, macaddress):
51 self.ipaddress = ipaddress
52 self.macaddress = macaddress
55 def create_default_flavor():
56 return os_utils.get_or_create_flavor(common_config.default_flavor,
57 common_config.default_flavor_ram,
58 common_config.default_flavor_disk,
59 common_config.default_flavor_vcpus)
62 def create_custom_flavor():
63 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64 common_config.custom_flavor_ram,
65 common_config.custom_flavor_disk,
66 common_config.custom_flavor_vcpus)
69 def create_net(neutron_client, name):
70 logger.debug("Creating network %s", name)
71 net_id = os_utils.create_neutron_net(neutron_client, name)
74 "There has been a problem when creating the neutron network")
75 raise Exception("There has been a problem when creating"
76 " the neutron network {}".format(name))
80 def create_subnet(neutron_client, name, cidr, net_id):
81 logger.debug("Creating subnet %s in network %s with cidr %s",
83 subnet_id = os_utils.create_neutron_subnet(neutron_client,
89 "There has been a problem when creating the neutron subnet")
90 raise Exception("There has been a problem when creating"
91 " the neutron subnet {}".format(name))
95 def create_network(neutron_client, net, subnet1, cidr1,
96 router, subnet2=None, cidr2=None):
97 """Network assoc won't work for networks/subnets created by this function.
98 It is an ODL limitation due to it handling routers as vpns.
99 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100 network_dic = os_utils.create_network_full(neutron_client,
107 "There has been a problem when creating the neutron network")
108 raise Exception("There has been a problem when creating"
109 " the neutron network {}".format(net))
110 net_id = network_dic["net_id"]
111 subnet_id = network_dic["subnet_id"]
112 router_id = network_dic["router_id"]
114 if subnet2 is not None:
115 logger.debug("Creating and attaching a second subnet...")
116 subnet_id = os_utils.create_neutron_subnet(
117 neutron_client, subnet2, cidr2, net_id)
120 "There has been a problem when creating the second subnet")
121 raise Exception("There has been a problem when creating"
122 " the second subnet {}".format(subnet2))
123 logger.debug("Subnet '%s' created successfully" % subnet_id)
124 return net_id, subnet_id, router_id
127 def get_port(neutron_client, instance_id):
128 ports = os_utils.get_port_list(neutron_client)
129 if ports is not None:
131 if port['device_id'] == instance_id:
136 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
137 if len(address_pairs) <= 0:
139 allowed_address_pairs = []
140 for address_pair in address_pairs:
141 address_pair_dict = {'ip_address': address_pair.ipaddress,
142 'mac_address': address_pair.macaddress}
143 allowed_address_pairs.append(address_pair_dict)
144 json_body = {'port': {
145 "allowed_address_pairs": allowed_address_pairs
149 port = neutron_client.update_port(port=port_id,
151 return port['port']['id']
152 except Exception as e:
153 logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
154 " %s" % (port_id, address_pairs, e))
158 def create_instance(nova_client,
170 if 'flavor' not in kwargs:
171 kwargs['flavor'] = common_config.default_flavor
173 logger.info("Creating instance '%s'..." % name)
175 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176 " network=%s\n secgroup=%s \n hypervisor=%s \n"
177 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178 % (name, kwargs['flavor'], image_id, network_id, sg_id,
179 compute_node, fixed_ip, files, userdata))
180 instance = os_utils.create_instance_and_wait_for_active(
187 av_zone=compute_node,
192 logger.error("Error while booting instance.")
193 raise Exception("Error while booting instance {}".format(name))
195 logger.debug("Instance '%s' booted successfully. IP='%s'." %
196 (name, instance.networks.itervalues().next()[0]))
197 # Retrieve IP of INSTANCE
198 # instance_ip = instance.networks.get(network_id)[0]
201 logger.debug("Adding '%s' to security group '%s'..."
202 % (name, secgroup_name))
204 logger.debug("Adding '%s' to security group '%s'..."
206 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
211 def generate_ping_userdata(ips_array, ping_count=10):
214 ips = ("%s %s" % (ips, ip))
216 ips = ips.replace(' ', ' ')
217 return ("#!/bin/sh\n"
222 " ping -c %s $ip 2>&1 >/dev/null\n"
224 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
225 " echo ping $ip OK\n"
226 " else echo ping $ip KO\n"
234 def generate_userdata_common():
235 return ("#!/bin/sh\n"
236 "sudo mkdir -p /home/cirros/.ssh/\n"
237 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
238 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
239 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
240 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
241 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
242 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
243 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
244 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
245 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
246 "chmod 700 /home/cirros/.ssh\n"
247 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
248 "chmod 600 /home/cirros/.ssh/id_rsa\n"
252 def generate_userdata_with_ssh(ips_array):
253 u1 = generate_userdata_common()
257 ips = ("%s %s" % (ips, ip))
259 ips = ips.replace(' ', ' ')
265 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
266 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
268 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
269 " else echo $ip 'not reachable';fi;\n"
277 def generate_userdata_interface_create(interface_name, interface_number,
278 ip_Address, net_mask):
279 return ("#!/bin/sh\n"
281 "sudo useradd -m sdnvpn\n"
282 "sudo adduser sdnvpn sudo\n"
283 "sudo echo sdnvpn:opnfv | chpasswd\n"
285 "sudo ifconfig %s:%s %s netmask %s up\n"
286 % (interface_name, interface_number,
287 ip_Address, net_mask))
290 def get_installerHandler():
291 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
292 installer_ip = get_installer_ip()
294 if installer_type not in ["fuel", "apex"]:
295 logger.warn("installer type %s is neither fuel nor apex."
296 "returning None for installer handler" % installer_type)
299 if installer_type in ["apex"]:
300 developHandler = DeploymentFactory.get_handler(
304 pkey_file="/root/.ssh/id_rsa")
306 if installer_type in ["fuel"]:
307 developHandler = DeploymentFactory.get_handler(
312 return developHandler
316 developHandler = get_installerHandler()
317 return developHandler.get_nodes()
320 def get_installer_ip():
321 return str(os.environ['INSTALLER_IP'])
324 def get_instance_ip(instance):
325 instance_ip = instance.networks.itervalues().next()[0]
329 def wait_for_instance(instance, pattern=".* login:", tries=40):
330 logger.info("Waiting for instance %s to boot up" % instance.id)
332 expected_regex = re.compile(pattern)
334 while tries > 0 and not expected_regex.search(console_log):
335 console_log = instance.get_console_output()
336 time.sleep(sleep_time)
339 if not expected_regex.search(console_log):
340 logger.error("Instance %s does not boot up properly."
346 def wait_for_instances_up(*instances):
347 check = [wait_for_instance(instance) for instance in instances]
351 def wait_for_instances_get_dhcp(*instances):
352 check = [wait_for_instance(instance, "Lease of .* obtained")
353 for instance in instances]
357 def async_Wait_for_instances(instances, tries=40):
358 if len(instances) <= 0:
361 for instance in instances:
362 future = executor.submit(wait_for_instance,
366 futures.append(future)
368 for future in futures:
369 results.append(future.result())
371 logger.error("one or more instances is not yet booted up")
374 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
378 logger.debug("Waiting for network %s to associate with BGPVPN %s "
379 % (bgpvpn_id, net_id))
381 while tries > 0 and net_id not in nets:
382 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
383 time.sleep(sleep_time)
385 if net_id not in nets:
386 logger.error("Association of network %s with BGPVPN %s failed" %
392 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
393 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
395 # Return True if all associations succeeded
399 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
403 logger.debug("Waiting for router %s to associate with BGPVPN %s "
404 % (bgpvpn_id, router_id))
405 while tries > 0 and router_id not in routers:
406 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
407 time.sleep(sleep_time)
409 if router_id not in routers:
410 logger.error("Association of router %s with BGPVPN %s failed" %
411 (router_id, bgpvpn_id))
416 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
417 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
419 # Return True if all associations succeeded
423 def wait_before_subtest(*args, **kwargs):
424 ''' This is a placeholder.
425 TODO: Replace delay with polling logic. '''
429 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
430 """Get the compute nodes in the deployment
431 Exit if the deployment doesn't have enough compute nodes"""
432 compute_nodes = os_utils.get_hypervisors(nova_client)
434 num_compute_nodes = len(compute_nodes)
435 if num_compute_nodes < 2:
436 logger.error("There are %s compute nodes in the deployment. "
437 "Minimum number of nodes to complete the test is 2."
439 raise Exception("There are {} compute nodes in the deployment. "
440 "Minimum number of nodes to complete the test"
441 " is 2.".format(num_compute_nodes))
443 logger.debug("Compute nodes: %s" % compute_nodes)
447 def open_icmp(neutron_client, security_group_id):
448 if os_utils.check_security_group_rules(neutron_client,
453 if not os_utils.create_secgroup_rule(neutron_client,
457 logger.error("Failed to create icmp security group rule...")
459 logger.info("This rule exists for security group: %s"
463 def open_http_port(neutron_client, security_group_id):
464 if os_utils.check_security_group_rules(neutron_client,
470 if not os_utils.create_secgroup_rule(neutron_client,
476 logger.error("Failed to create http security group rule...")
478 logger.info("This rule exists for security group: %s"
482 def open_bgp_port(neutron_client, security_group_id):
483 if os_utils.check_security_group_rules(neutron_client,
489 if not os_utils.create_secgroup_rule(neutron_client,
494 logger.error("Failed to create bgp security group rule...")
496 logger.info("This rule exists for security group: %s"
500 def exec_cmd(cmd, verbose):
502 logger.debug("Executing '%s'" % cmd)
503 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
504 stderr=subprocess.STDOUT)
506 for line in iter(p.stdout.readline, b''):
513 returncode = p.wait()
515 logger.error("Command %s failed to execute." % cmd)
518 return output, success
521 def check_odl_fib(ip, controller_ip):
522 """Check that there is an entry in the ODL Fib for `ip`"""
523 url = "http://" + controller_ip + \
524 ":8181/restconf/config/odl-fib:fibEntries/"
525 logger.debug("Querring '%s' for FIB entries", url)
526 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
527 if res.status_code != 200:
528 logger.error("OpenDaylight response status code: %s", res.status_code)
530 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
532 logger.debug("OpenDaylight FIB: \n%s" % res.text)
533 return ip in res.text
536 def run_odl_cmd(odl_node, cmd):
537 '''Run a command in the OpenDaylight Karaf shell
538 This is a bit flimsy because of shell quote escaping, make sure that
539 the cmd passed does not have any top level double quotes or this
541 The /dev/null is used because client works, but outputs something
542 that contains "ERROR" and run_cmd doesn't like that.
544 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
545 ' 2>/dev/null' % cmd)
546 return odl_node.run_cmd(karaf_cmd)
549 def wait_for_cloud_init(instance):
551 # ubuntu images take a long time to start
554 logger.info("Waiting for cloud init of instance: {}"
555 "".format(instance.name))
557 instance_log = instance.get_console_output()
558 if "Failed to run module" in instance_log:
560 logger.error("Cloud init failed to run. Reason: %s",
563 if re.search(r"Cloud-init v. .+ finished at", instance_log):
566 time.sleep(sleep_time)
570 logger.error("Cloud init timed out"
574 logger.info("Finished waiting for cloud init of instance {} result was {}"
575 "".format(instance.name, success))
579 def attach_instance_to_ext_br(instance, compute_node):
580 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
581 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
582 if installer_type == "fuel":
584 elif installer_type == "apex":
585 # In Apex, br-ex is an ovs bridge and virsh attach-interface
586 # won't just work. We work around it by creating a linux
587 # bridge, attaching that to br-ex with a veth pair
588 # and virsh-attaching the instance to the linux-bridge
592 if ! sudo brctl show |grep -q ^{bridge};then
593 sudo brctl addbr {bridge}
594 sudo ip link set {bridge} up
595 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
596 sudo ip link set dev ovs-quagga-tap up
597 sudo ip link set dev quagga-tap up
598 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
599 sudo brctl addif {bridge} quagga-tap
602 compute_node.run_cmd(cmd.format(bridge=bridge))
604 compute_node.run_cmd("sudo virsh attach-interface %s"
605 " bridge %s" % (libvirt_instance_name, bridge))
608 def detach_instance_from_ext_br(instance, compute_node):
609 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
610 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
611 "grep running | awk '{print $2}'); "
612 "do echo -n ; sudo virsh dumpxml $vm| "
613 "grep -oP '52:54:[\da-f:]+' ;done")
614 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
615 " --type bridge --mac %s"
616 % (libvirt_instance_name, mac))
618 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
619 if installer_type == "fuel":
621 elif installer_type == "apex":
622 # In Apex, br-ex is an ovs bridge and virsh attach-interface
623 # won't just work. We work around it by creating a linux
624 # bridge, attaching that to br-ex with a veth pair
625 # and virsh-attaching the instance to the linux-bridge
628 sudo brctl delif {bridge} quagga-tap &&
629 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
630 sudo ip link set dev quagga-tap down &&
631 sudo ip link set dev ovs-quagga-tap down &&
632 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
633 sudo ip link set {bridge} down &&
634 sudo brctl delbr {bridge}
636 compute_node.run_cmd(cmd.format(bridge=bridge))
639 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
640 subnet_ids, router_ids, network_ids):
642 if len(floatingip_ids) != 0:
643 for floatingip_id in floatingip_ids:
644 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
645 logger.error('Fail to delete all floating ips. '
646 'Floating ip with id {} was not deleted.'.
647 format(floatingip_id))
650 if len(bgpvpn_ids) != 0:
651 for bgpvpn_id in bgpvpn_ids:
652 delete_bgpvpn(neutron_client, bgpvpn_id)
654 if len(interfaces) != 0:
655 for router_id, subnet_id in interfaces:
656 if not os_utils.remove_interface_router(neutron_client,
657 router_id, subnet_id):
658 logger.error('Fail to delete all interface routers. '
659 'Interface router with id {} was not deleted.'.
662 if len(router_ids) != 0:
663 for router_id in router_ids:
664 if not os_utils.remove_gateway_router(neutron_client, router_id):
665 logger.error('Fail to delete all gateway routers. '
666 'Gateway router with id {} was not deleted.'.
669 if len(subnet_ids) != 0:
670 for subnet_id in subnet_ids:
671 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
672 logger.error('Fail to delete all subnets. '
673 'Subnet with id {} was not deleted.'.
677 if len(router_ids) != 0:
678 for router_id in router_ids:
679 if not os_utils.delete_neutron_router(neutron_client, router_id):
680 logger.error('Fail to delete all routers. '
681 'Router with id {} was not deleted.'.
685 if len(network_ids) != 0:
686 for network_id in network_ids:
687 if not os_utils.delete_neutron_net(neutron_client, network_id):
688 logger.error('Fail to delete all networks. '
689 'Network with id {} was not deleted.'.
695 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
696 if flavor_ids is not None and len(flavor_ids) != 0:
697 for flavor_id in flavor_ids:
698 nova_client.flavors.delete(flavor_id)
699 if len(instance_ids) != 0:
700 for instance_id in instance_ids:
701 if not os_utils.delete_instance(nova_client, instance_id):
702 logger.error('Fail to delete all instances. '
703 'Instance with id {} was not deleted.'.
709 def cleanup_glance(glance_client, image_ids):
710 if len(image_ids) != 0:
711 for image_id in image_ids:
712 if not os_utils.delete_glance_image(glance_client, image_id):
713 logger.error('Fail to delete all images. '
714 'Image with id {} was not deleted.'.
720 def create_bgpvpn(neutron_client, **kwargs):
721 # route_distinguishers
723 json_body = {"bgpvpn": kwargs}
724 return neutron_client.create_bgpvpn(json_body)
727 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
728 json_body = {"bgpvpn": kwargs}
729 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
732 def delete_bgpvpn(neutron_client, bgpvpn_id):
733 return neutron_client.delete_bgpvpn(bgpvpn_id)
736 def get_bgpvpn(neutron_client, bgpvpn_id):
737 return neutron_client.show_bgpvpn(bgpvpn_id)
740 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
741 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
744 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
745 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
748 def create_router_association(neutron_client, bgpvpn_id, router_id):
749 json_body = {"router_association": {"router_id": router_id}}
750 return neutron_client.create_router_association(bgpvpn_id, json_body)
753 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
754 json_body = {"network_association": {"network_id": neutron_network_id}}
755 return neutron_client.create_network_association(bgpvpn_id, json_body)
758 def is_fail_mode_secure():
760 Checks the value of the attribute fail_mode,
761 if it is set to secure. This check is performed
762 on all OVS br-int interfaces, for all OpenStack nodes.
765 openstack_nodes = get_nodes()
766 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
769 # Define OVS get fail_mode command
770 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
771 for openstack_node in openstack_nodes:
772 if not openstack_node.is_active():
775 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
777 if 'br-int' in ovs_int_list:
778 # Execute get fail_mode command
779 br_int_fail_mode = (openstack_node.
780 run_cmd(get_ovs_fail_mode_cmd).strip())
781 if br_int_fail_mode == 'secure':
783 is_secure[openstack_node.name] = True
786 logger.error('The fail_mode for br-int was not secure '
787 'in {} node'.format(openstack_node.name))
788 is_secure[openstack_node.name] = False
792 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
793 subnet_quota, port_quota, router_quota):
794 json_body = {"quota": {
796 "subnet": subnet_quota,
798 "router": router_quota
802 neutron_client.update_quota(tenant_id=tenant_id,
805 except Exception as e:
806 logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
807 " '%s', '%s', '%s', '%s, %s')]: %s" %
808 (tenant_id, nw_quota, subnet_quota,
809 port_quota, router_quota, e))
813 def update_instance_quota_class(nova_client, instances_quota):
815 nova_client.quota_classes.update("default", instances=instances_quota)
817 except Exception as e:
818 logger.error("Error [update_instance_quota_class(nova_client,"
819 " '%s' )]: %s" % (instances_quota, e))
823 def get_neutron_quota(neutron_client, tenant_id):
825 return neutron_client.show_quota(tenant_id=tenant_id)['quota']
826 except Exception as e:
827 logger.error("Error in getting neutron quota for tenant "
828 " '%s' )]: %s" % (tenant_id, e))
832 def get_nova_instances_quota(nova_client):
834 return nova_client.quota_classes.get("default").instances
835 except Exception as e:
836 logger.error("Error in getting nova instances quota: %s" % e)
840 def update_router_extra_route(neutron_client, router_id, extra_routes):
841 if len(extra_routes) <= 0:
844 for extra_route in extra_routes:
845 route_dict = {'destination': extra_route.destination,
846 'nexthop': extra_route.nexthop}
847 routes_list.append(route_dict)
848 json_body = {'router': {
849 "routes": routes_list
853 neutron_client.update_router(router_id, body=json_body)
855 except Exception as e:
856 logger.error("Error in updating router with extra route: %s" % e)
860 def update_router_no_extra_route(neutron_client, router_ids):
861 json_body = {'router': {
865 for router_id in router_ids:
867 neutron_client.update_router(router_id, body=json_body)
869 except Exception as e:
870 logger.error("Error in clearing extra route: %s" % e)
873 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
875 Gets, as input, a list of compute nodes and a list of OVS bridges
876 and returns the command console output, as a list of lines, that
877 contains all the OVS groups from all bridges and nodes in lists.
880 for compute_node in compute_node_list:
881 for ovs_br in ovs_br_list:
882 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
883 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
884 "grep group".format(ovs_br, of_protocol))
885 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
890 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
892 Gets, as input, a list of compute nodes and a list of OVS bridges
893 and returns the command console output, as a list of lines, that
894 contains all the OVS flows from all bridges and nodes in lists.
897 for compute_node in compute_node_list:
898 for ovs_br in ovs_br_list:
899 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
900 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
901 "grep table=".format(ovs_br, of_protocol))
902 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
907 def get_odl_bgp_entity_owner(controllers):
908 """ Finds the ODL owner of the BGP entity in the cluster.
910 When ODL runs in clustering mode we need to execute the BGP speaker
911 related commands to that ODL which is the owner of the BGP entity.
913 :param controllers: list of OS controllers
914 :return controller: OS controller in which ODL BGP entity owner runs
916 if len(controllers) == 1:
917 return controllers[0]
919 url = ('http://admin:admin@{ip}:8081/restconf/'
920 'operational/entity-owners:entity-owners/entity-type/bgp'
921 .format(ip=controllers[0].ip))
923 remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
925 remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
926 local_tmp_akka_conf = '/tmp/akka.conf'
928 json_output = requests.get(url).json()
930 logger.error('Failed to find the ODL BGP '
931 'entity owner through REST')
933 odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
935 for controller in controllers:
937 controller.run_cmd('sudo cp {0} /home/heat-admin/'
938 .format(remote_odl_akka_conf))
939 controller.run_cmd('sudo chmod 777 {0}'
940 .format(remote_odl_home_akka_conf))
941 controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
943 for line in open(local_tmp_akka_conf):
944 if re.search(odl_bgp_owner, line):
949 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
950 json_body = {'input':
951 {'destination-ip': remote_tep_ip,
952 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
954 url = ('http://{ip}:8081/restconf/operations/'
955 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
956 headers = {'Content-type': 'application/yang.data+json',
957 'Accept': 'application/yang.data+json'}
959 requests.post(url, data=json.dumps(json_body),
961 auth=HTTPBasicAuth('admin', 'admin'))
962 except Exception as e:
963 logger.error("Failed to create external tunnel endpoint on"
964 " ODL for external tep ip %s with error %s"
965 % (remote_tep_ip, e))
969 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
970 url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
971 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
972 logger.error("url is %s" % url)
974 vrf_table = requests.get(url).json()
975 is_ipprefix_exists = False
976 for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
977 if vrf_entry['destPrefix'] == ip_prefix:
978 is_ipprefix_exists = True
980 return is_ipprefix_exists
981 except Exception as e:
982 logger.error('Failed to find ip prefix %s with error %s'