3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
17 from concurrent.futures import ThreadPoolExecutor
18 from openstack.exceptions import ResourceNotFound, NotFoundException
19 from requests.auth import HTTPBasicAuth
21 from functest.utils import env
22 from opnfv.deployment.factory import Factory as DeploymentFactory
24 from sdnvpn.lib import config as sdnvpn_config
25 import sdnvpn.lib.openstack_utils as os_utils
27 logger = logging.getLogger('sdnvpn_test_utils')
29 common_config = sdnvpn_config.CommonConfig()
31 ODL_USER = env.get('SDN_CONTROLLER_USER')
32 ODL_PASSWORD = env.get('SDN_CONTROLLER_PASSWORD')
33 ODL_IP = env.get('SDN_CONTROLLER_IP')
34 ODL_PORT = env.get('SDN_CONTROLLER_RESTCONFPORT')
36 executor = ThreadPoolExecutor(5)
39 class ExtraRoute(object):
41 Class to represent extra route for a router
44 def __init__(self, destination, nexthop):
45 self.destination = destination
46 self.nexthop = nexthop
49 class AllowedAddressPair(object):
51 Class to represent allowed address pair for a neutron port
54 def __init__(self, ipaddress, macaddress):
55 self.ipaddress = ipaddress
56 self.macaddress = macaddress
59 def create_default_flavor():
60 return os_utils.get_or_create_flavor(common_config.default_flavor,
61 common_config.default_flavor_ram,
62 common_config.default_flavor_disk,
63 common_config.default_flavor_vcpus)
66 def create_custom_flavor():
67 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
68 common_config.custom_flavor_ram,
69 common_config.custom_flavor_disk,
70 common_config.custom_flavor_vcpus)
73 def create_net(conn, name):
74 logger.debug("Creating network %s", name)
75 net_id = os_utils.create_neutron_net(conn, name)
78 "There has been a problem when creating the neutron network")
79 raise Exception("There has been a problem when creating"
80 " the neutron network {}".format(name))
84 def create_subnet(conn, name, cidr, net_id):
85 logger.debug("Creating subnet %s in network %s with cidr %s",
87 subnet_id = os_utils.create_neutron_subnet(conn,
93 "There has been a problem when creating the neutron subnet")
94 raise Exception("There has been a problem when creating"
95 " the neutron subnet {}".format(name))
99 def create_network(conn, net, subnet1, cidr1,
100 router, subnet2=None, cidr2=None):
101 """Network assoc won't work for networks/subnets created by this function.
102 It is an ODL limitation due to it handling routers as vpns.
103 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
104 network_dic = os_utils.create_network_full(conn,
111 "There has been a problem when creating the neutron network")
112 raise Exception("There has been a problem when creating"
113 " the neutron network {}".format(net))
114 net_id = network_dic["net_id"]
115 subnet_id = network_dic["subnet_id"]
116 router_id = network_dic["router_id"]
118 if subnet2 is not None:
119 logger.debug("Creating and attaching a second subnet...")
120 subnet_id = os_utils.create_neutron_subnet(
121 conn, subnet2, cidr2, net_id)
124 "There has been a problem when creating the second subnet")
125 raise Exception("There has been a problem when creating"
126 " the second subnet {}".format(subnet2))
127 logger.debug("Subnet '%s' created successfully" % subnet_id)
128 return net_id, subnet_id, router_id
131 def get_port(conn, instance_id):
132 ports = os_utils.get_port_list(conn)
134 if port.device_id == instance_id:
139 def update_port_allowed_address_pairs(conn, port_id, address_pairs):
140 if len(address_pairs) <= 0:
142 allowed_address_pairs = []
143 for address_pair in address_pairs:
144 address_pair_dict = {'ip_address': address_pair.ipaddress,
145 'mac_address': address_pair.macaddress}
146 allowed_address_pairs.append(address_pair_dict)
149 port = conn.network.\
150 update_port(port_id, allowed_address_pairs=allowed_address_pairs)
152 except Exception as e:
153 logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
154 " %s" % (port_id, address_pairs, e))
158 def create_instance(conn,
170 if 'flavor' not in kwargs:
171 kwargs['flavor'] = common_config.default_flavor
173 logger.info("Creating instance '%s'..." % name)
175 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176 " network=%s\n secgroup=%s \n hypervisor=%s \n"
177 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178 % (name, kwargs['flavor'], image_id, network_id, sg_id,
179 compute_node, fixed_ip, files, userdata))
180 instance = os_utils.create_instance_and_wait_for_active(
187 av_zone=compute_node,
192 logger.error("Error while booting instance.")
193 raise Exception("Error while booting instance {}".format(name))
195 # Retrieve IP of INSTANCE
196 network_name = conn.network.get_network(network_id).name
197 instance_ip = conn.compute.get_server(instance).\
198 addresses.get(network_name)[0]['addr']
199 logger.debug("Instance '%s' booted successfully. IP='%s'." %
203 logger.debug("Adding '%s' to security group '%s'..."
204 % (name, secgroup_name))
206 logger.debug("Adding '%s' to security group '%s'..."
208 os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
213 def generate_ping_userdata(ips_array, ping_count=10):
216 ips = ("%s %s" % (ips, ip))
218 ips = ips.replace(' ', ' ')
219 return ("#!/bin/sh\n"
224 " ping -c %s $ip 2>&1 >/dev/null\n"
226 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
227 " echo ping $ip OK\n"
228 " else echo ping $ip KO\n"
236 def generate_userdata_common():
237 return ("#!/bin/sh\n"
238 "sudo mkdir -p /home/cirros/.ssh/\n"
239 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
240 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
241 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
242 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
243 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
244 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
245 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
246 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
247 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
248 "chmod 700 /home/cirros/.ssh\n"
249 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
250 "chmod 600 /home/cirros/.ssh/id_rsa\n"
254 def generate_userdata_with_ssh(ips_array):
255 u1 = generate_userdata_common()
259 ips = ("%s %s" % (ips, ip))
261 ips = ips.replace(' ', ' ')
267 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
268 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
270 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
271 " else echo $ip 'not reachable';fi;\n"
279 def generate_userdata_interface_create(interface_name, interface_number,
280 ip_Address, net_mask):
281 return ("#!/bin/sh\n"
283 "sudo useradd -m sdnvpn\n"
284 "sudo adduser sdnvpn sudo\n"
285 "sudo echo sdnvpn:opnfv | chpasswd\n"
287 "sudo ifconfig %s:%s %s netmask %s up\n"
288 % (interface_name, interface_number,
289 ip_Address, net_mask))
292 def get_installerHandler():
293 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
294 installer_ip = get_installer_ip()
296 if installer_type not in ["fuel", "apex"]:
297 logger.warn("installer type %s is neither fuel nor apex."
298 "returning None for installer handler" % installer_type)
301 if installer_type in ["apex"]:
302 installer_user = "root"
303 elif installer_type in ["fuel"]:
304 installer_user = "ubuntu"
306 developHandler = DeploymentFactory.get_handler(
310 pkey_file="/root/.ssh/id_rsa")
312 return developHandler
316 developHandler = get_installerHandler()
317 return developHandler.get_nodes()
320 def get_installer_ip():
321 return str(os.environ['INSTALLER_IP'])
324 def get_instance_ip(conn, instance):
325 instance_ip = conn.compute.get_server(instance).\
326 addresses.values()[0][0]['addr']
330 def wait_for_instance(instance, pattern=".* login:", tries=40):
331 logger.info("Waiting for instance %s to boot up" % instance.id)
332 conn = os_utils.get_os_connection()
334 expected_regex = re.compile(pattern)
336 while tries > 0 and not expected_regex.search(console_log):
337 console_log = conn.compute.\
338 get_server_console_output(instance)['output']
339 time.sleep(sleep_time)
342 if not expected_regex.search(console_log):
343 logger.error("Instance %s does not boot up properly."
349 def wait_for_instances_up(*instances):
350 check = [wait_for_instance(instance) for instance in instances]
354 def wait_for_instances_get_dhcp(*instances):
355 check = [wait_for_instance(instance, "Lease of .* obtained")
356 for instance in instances]
360 def async_Wait_for_instances(instances, tries=40):
361 if len(instances) <= 0:
364 for instance in instances:
365 future = executor.submit(wait_for_instance,
369 futures.append(future)
371 for future in futures:
372 results.append(future.result())
374 logger.error("one or more instances is not yet booted up")
377 def wait_for_instance_delete(conn, instance_id, tries=30):
379 instances = [instance_id]
380 logger.debug("Waiting for instance %s to be deleted"
382 while tries > 0 and instance_id in instances:
383 instances = [instance.id for instance in
384 os_utils.get_instances(conn)]
385 time.sleep(sleep_time)
387 if instance_id in instances:
388 logger.error("Deletion of instance %s failed" %
392 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
396 logger.debug("Waiting for network %s to associate with BGPVPN %s "
397 % (bgpvpn_id, net_id))
399 while tries > 0 and net_id not in nets:
400 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
401 time.sleep(sleep_time)
403 if net_id not in nets:
404 logger.error("Association of network %s with BGPVPN %s failed" %
410 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
411 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
413 # Return True if all associations succeeded
417 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
421 logger.debug("Waiting for router %s to associate with BGPVPN %s "
422 % (bgpvpn_id, router_id))
423 while tries > 0 and router_id not in routers:
424 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
425 time.sleep(sleep_time)
427 if router_id not in routers:
428 logger.error("Association of router %s with BGPVPN %s failed" %
429 (router_id, bgpvpn_id))
434 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
435 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
437 # Return True if all associations succeeded
441 def wait_before_subtest(*args, **kwargs):
442 ''' This is a placeholder.
443 TODO: Replace delay with polling logic. '''
447 def assert_and_get_compute_nodes(conn, required_node_number=2):
448 """Get the compute nodes in the deployment
449 Exit if the deployment doesn't have enough compute nodes"""
450 compute_nodes = os_utils.get_hypervisors(conn)
452 num_compute_nodes = len(compute_nodes)
453 if num_compute_nodes < 2:
454 logger.error("There are %s compute nodes in the deployment. "
455 "Minimum number of nodes to complete the test is 2."
457 raise Exception("There are {} compute nodes in the deployment. "
458 "Minimum number of nodes to complete the test"
459 " is 2.".format(num_compute_nodes))
461 logger.debug("Compute nodes: %s" % compute_nodes)
465 def open_icmp(conn, security_group_id):
466 if os_utils.check_security_group_rules(conn,
471 if not os_utils.create_secgroup_rule(conn,
475 logger.error("Failed to create icmp security group rule...")
477 logger.info("This rule exists for security group: %s"
481 def open_http_port(conn, security_group_id):
482 if os_utils.check_security_group_rules(conn,
488 if not os_utils.create_secgroup_rule(conn,
494 logger.error("Failed to create http security group rule...")
496 logger.info("This rule exists for security group: %s"
500 def open_bgp_port(conn, security_group_id):
501 if os_utils.check_security_group_rules(conn,
507 if not os_utils.create_secgroup_rule(conn,
512 logger.error("Failed to create bgp security group rule...")
514 logger.info("This rule exists for security group: %s"
518 def exec_cmd(cmd, verbose):
520 logger.debug("Executing '%s'" % cmd)
521 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
522 stderr=subprocess.STDOUT)
524 for line in iter(p.stdout.readline, b''):
531 returncode = p.wait()
533 logger.error("Command %s failed to execute." % cmd)
536 return output, success
539 def check_odl_fib(ip):
540 """Check that there is an entry in the ODL Fib for `ip`"""
541 url = ("http://{user}:{password}@{ip}:{port}/restconf/config/"
542 "odl-fib:fibEntries/".format(user=ODL_USER,
543 password=ODL_PASSWORD, ip=ODL_IP,
545 logger.debug("Querring '%s' for FIB entries", url)
546 res = requests.get(url, auth=(ODL_USER, ODL_PASSWORD))
547 if res.status_code != 200:
548 logger.error("OpenDaylight response status code: %s", res.status_code)
550 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
552 logger.debug("OpenDaylight FIB: \n%s" % res.text)
553 return ip in res.text
556 def run_odl_cmd(odl_node, cmd):
557 '''Run a command in the OpenDaylight Karaf shell
558 This is a bit flimsy because of shell quote escaping, make sure that
559 the cmd passed does not have any top level double quotes or this
561 The /dev/null is used because client works, but outputs something
562 that contains "ERROR" and run_cmd doesn't like that.
564 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
565 ' 2>/dev/null' % cmd)
566 return odl_node.run_cmd(karaf_cmd)
569 def wait_for_cloud_init(conn, instance):
571 # ubuntu images take a long time to start
574 logger.info("Waiting for cloud init of instance: {}"
575 "".format(instance.name))
577 instance_log = conn.compute.\
578 get_server_console_output(instance)['output']
579 if "Failed to run module" in instance_log:
581 logger.error("Cloud init failed to run. Reason: %s",
584 if re.search(r"Cloud-init v. .+ finished at", instance_log):
587 time.sleep(sleep_time)
591 logger.error("Cloud init timed out"
595 logger.info("Finished waiting for cloud init of instance {} result was {}"
596 "".format(instance.name, success))
600 def attach_instance_to_ext_br(instance, compute_node):
601 libvirt_instance_name = instance.instance_name
602 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
603 # In Apex, br-ex (or br-floating for Fuel) is an ovs bridge and virsh
604 # attach-interface won't just work. We work around it by creating a linux
605 # bridge, attaching that to br-ex (or br-floating for Fuel) with a
606 # veth pair and virsh-attaching the instance to the linux-bridge
607 if installer_type in ["fuel"]:
608 bridge = "br-floating"
609 elif installer_type in ["apex"]:
612 logger.warn("installer type %s is neither fuel nor apex."
618 if ! sudo brctl show |grep -q ^br-quagga;then
619 sudo brctl addbr br-quagga
620 sudo ip link set br-quagga up
621 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
622 sudo ip link set dev ovs-quagga-tap up
623 sudo ip link set dev quagga-tap up
624 sudo ovs-vsctl add-port {bridge} ovs-quagga-tap
625 sudo brctl addif br-quagga quagga-tap
628 compute_node.run_cmd(cmd.format(bridge=bridge))
630 compute_node.run_cmd("sudo virsh attach-interface %s"
631 " bridge br-quagga" % (libvirt_instance_name))
634 def detach_instance_from_ext_br(instance, compute_node):
635 libvirt_instance_name = instance.instance_name
636 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
637 # This function undoes all the actions performed by
638 # attach_instance_to_ext_br on Fuel and Apex installers.
639 if installer_type in ["fuel"]:
640 bridge = "br-floating"
641 elif installer_type in ["apex"]:
644 logger.warn("installer type %s is neither fuel nor apex."
647 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
648 "grep running | awk '{print $2}'); "
649 "do echo -n ; sudo virsh dumpxml $vm| "
650 "grep -oP '52:54:[\da-f:]+' ;done")
651 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
652 " --type bridge --mac %s"
653 % (libvirt_instance_name, mac))
656 sudo brctl delif br-quagga quagga-tap &&
657 sudo ovs-vsctl del-port {bridge} ovs-quagga-tap &&
658 sudo ip link set dev quagga-tap down &&
659 sudo ip link set dev ovs-quagga-tap down &&
660 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
661 sudo ip link set br-quagga down &&
662 sudo brctl delbr br-quagga
664 compute_node.run_cmd(cmd.format(bridge=bridge))
667 def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
668 interfaces, subnet_ids, router_ids, network_ids):
669 if len(floatingip_ids) != 0:
670 for floatingip_id in floatingip_ids:
671 if not os_utils.delete_floating_ip(conn, floatingip_id):
672 logger.error('Fail to delete all floating ips. '
673 'Floating ip with id {} was not deleted.'.
674 format(floatingip_id))
677 if len(bgpvpn_ids) != 0:
678 for bgpvpn_id in bgpvpn_ids:
679 delete_bgpvpn(neutron_client, bgpvpn_id)
681 if len(interfaces) != 0:
682 for router_id, subnet_id in interfaces:
683 if not os_utils.remove_interface_router(conn,
684 router_id, subnet_id):
685 logger.error('Fail to delete all interface routers. '
686 'Interface router with id {} was not deleted.'.
689 if len(router_ids) != 0:
690 for router_id in router_ids:
691 if not os_utils.remove_gateway_router(conn, router_id):
692 logger.error('Fail to delete all gateway routers. '
693 'Gateway router with id {} was not deleted.'.
696 if len(subnet_ids) != 0:
697 for subnet_id in subnet_ids:
698 if not os_utils.delete_neutron_subnet(conn, subnet_id):
699 logger.error('Fail to delete all subnets. '
700 'Subnet with id {} was not deleted.'.
704 if len(router_ids) != 0:
705 for router_id in router_ids:
706 if not os_utils.delete_neutron_router(conn, router_id):
707 logger.error('Fail to delete all routers. '
708 'Router with id {} was not deleted.'.
712 if len(network_ids) != 0:
713 for network_id in network_ids:
714 if not os_utils.delete_neutron_net(conn, network_id):
715 logger.error('Fail to delete all networks. '
716 'Network with id {} was not deleted.'.
722 def cleanup_nova(conn, instance_ids, flavor_ids=None):
723 if flavor_ids is not None and len(flavor_ids) != 0:
724 for flavor_id in flavor_ids:
725 conn.compute.delete_flavor(flavor_id)
726 if len(instance_ids) != 0:
727 for instance_id in instance_ids:
728 if not os_utils.delete_instance(conn, instance_id):
729 logger.error('Fail to delete all instances. '
730 'Instance with id {} was not deleted.'.
733 wait_for_instance_delete(conn, instance_id)
737 def cleanup_glance(conn, image_ids):
738 if len(image_ids) != 0:
739 for image_id in image_ids:
740 if not os_utils.delete_glance_image(conn, image_id):
741 logger.error('Fail to delete all images. '
742 'Image with id {} was not deleted.'.
748 def create_bgpvpn(neutron_client, **kwargs):
749 # route_distinguishers
751 json_body = {"bgpvpn": kwargs}
752 return neutron_client.create_bgpvpn(json_body)
755 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
756 json_body = {"bgpvpn": kwargs}
757 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
760 def delete_bgpvpn(neutron_client, bgpvpn_id):
761 return neutron_client.delete_bgpvpn(bgpvpn_id)
764 def get_bgpvpn(neutron_client, bgpvpn_id):
765 return neutron_client.show_bgpvpn(bgpvpn_id)
768 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
769 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
772 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
773 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
776 def create_router_association(neutron_client, bgpvpn_id, router_id):
777 json_body = {"router_association": {"router_id": router_id}}
778 return neutron_client.create_router_association(bgpvpn_id, json_body)
781 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
782 json_body = {"network_association": {"network_id": neutron_network_id}}
783 return neutron_client.create_network_association(bgpvpn_id, json_body)
786 def is_fail_mode_secure():
788 Checks the value of the attribute fail_mode,
789 if it is set to secure. This check is performed
790 on all OVS br-int interfaces, for all OpenStack nodes.
793 openstack_nodes = get_nodes()
794 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
797 # Define OVS get fail_mode command
798 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
799 for openstack_node in openstack_nodes:
800 if not openstack_node.is_active():
803 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
804 if installer_type in ['fuel']:
806 'controller' in openstack_node.roles or
807 'opendaylight' in openstack_node.roles or
808 'installer' in openstack_node.roles
812 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
814 if 'br-int' in ovs_int_list:
815 # Execute get fail_mode command
816 br_int_fail_mode = (openstack_node.
817 run_cmd(get_ovs_fail_mode_cmd).strip())
818 if br_int_fail_mode == 'secure':
820 is_secure[openstack_node.name] = True
823 logger.error('The fail_mode for br-int was not secure '
824 'in {} node'.format(openstack_node.name))
825 is_secure[openstack_node.name] = False
829 def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
830 subnet_quota, port_quota, router_quota):
832 conn.network.update_quota(tenant_id, networks=nw_quota,
833 subnets=subnet_quota, ports=port_quota,
834 routers=router_quota)
836 except Exception as e:
837 logger.error("Error [update_nw_subnet_port_quota(network,"
838 " '%s', '%s', '%s', '%s, %s')]: %s" %
839 (tenant_id, nw_quota, subnet_quota,
840 port_quota, router_quota, e))
844 def update_instance_quota_class(cloud, instances_quota):
846 cloud.set_compute_quotas('admin', instances=instances_quota)
848 except Exception as e:
849 logger.error("Error [update_instance_quota_class(compute,"
850 " '%s' )]: %s" % (instances_quota, e))
854 def get_neutron_quota(conn, tenant_id):
856 return conn.network.get_quota(tenant_id)
857 except ResourceNotFound as e:
858 logger.error("Error in getting network quota for tenant "
859 " '%s' )]: %s" % (tenant_id, e))
863 def get_nova_instances_quota(cloud):
865 return cloud.get_compute_quotas('admin').instances
866 except Exception as e:
867 logger.error("Error in getting nova instances quota: %s" % e)
871 def update_router_extra_route(conn, router_id, extra_routes):
872 if len(extra_routes) <= 0:
875 for extra_route in extra_routes:
876 route_dict = {'destination': extra_route.destination,
877 'nexthop': extra_route.nexthop}
878 routes_list.append(route_dict)
881 conn.network.update_router(router_id, routes=routes_list)
883 except Exception as e:
884 logger.error("Error in updating router with extra route: %s" % e)
888 def update_router_no_extra_route(conn, router_ids):
889 for router_id in router_ids:
891 conn.network.update_router(router_id, routes=[])
893 except Exception as e:
894 logger.error("Error in clearing extra route: %s" % e)
897 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
899 Gets, as input, a list of compute nodes and a list of OVS bridges
900 and returns the command console output, as a list of lines, that
901 contains all the OVS groups from all bridges and nodes in lists.
904 for compute_node in compute_node_list:
905 for ovs_br in ovs_br_list:
906 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
907 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
908 "grep group".format(ovs_br, of_protocol))
909 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
914 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
916 Gets, as input, a list of compute nodes and a list of OVS bridges
917 and returns the command console output, as a list of lines, that
918 contains all the OVS flows from all bridges and nodes in lists.
921 for compute_node in compute_node_list:
922 for ovs_br in ovs_br_list:
923 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
924 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
925 "grep table=".format(ovs_br, of_protocol))
926 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
931 def get_node_ip_and_netmask(node, iface):
932 cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
934 mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
935 mgmt_ip = mgmt_net_cidr[0].split('/')[0]
936 mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
938 return mgmt_ip, mgmt_netmask
941 def get_odl_bgp_entity_owner(odl_nodes):
942 """ Finds the ODL owner of the BGP entity in the cluster.
944 When ODL runs in clustering mode we need to execute the BGP speaker
945 related commands to that ODL which is the owner of the BGP entity.
947 :param odl_nodes: list of Opendaylight nodes
948 :return odl_node: Opendaylight node in which ODL BGP entity owner runs
950 if len(odl_nodes) == 1:
953 url = ('http://{user}:{password}@{ip}:{port}/restconf/'
954 'operational/entity-owners:entity-owners/entity-type/bgp'
955 .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
958 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
959 if installer_type in ['apex']:
960 node_user = 'heat-admin'
961 elif installer_type in ['fuel']:
964 remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
966 remote_odl_home_akka_conf = '/home/{0}/akka.conf'.format(node_user)
967 local_tmp_akka_conf = '/tmp/akka.conf'
969 json_output = requests.get(url).json()
971 logger.error('Failed to find the ODL BGP '
972 'entity owner through REST')
974 odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
976 for odl_node in odl_nodes:
977 if installer_type in ['apex']:
978 get_odl_id_cmd = 'sudo docker ps -qf name=opendaylight_api'
979 odl_id = odl_node.run_cmd(get_odl_id_cmd)
980 odl_node.run_cmd('sudo docker cp '
981 '{container_id}:{odl_akka_conf} '
983 .format(container_id=odl_id,
984 odl_akka_conf=remote_odl_akka_conf,
986 elif installer_type in ['fuel']:
987 odl_node.run_cmd('sudo cp {0} /home/{1}/'
988 .format(remote_odl_akka_conf, node_user))
989 odl_node.run_cmd('sudo chmod 777 {0}'
990 .format(remote_odl_home_akka_conf))
991 odl_node.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
993 for line in open(local_tmp_akka_conf):
994 if re.search(odl_bgp_owner, line):
999 def add_quagga_external_gre_end_point(odl_nodes, remote_tep_ip):
1000 json_body = {'input':
1001 {'destination-ip': remote_tep_ip,
1002 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
1004 url = ('http://{ip}:{port}/restconf/operations/'
1005 'itm-rpc:add-external-tunnel-endpoint'.format(ip=ODL_IP,
1007 headers = {'Content-type': 'application/yang.data+json',
1008 'Accept': 'application/yang.data+json'}
1010 requests.post(url, data=json.dumps(json_body),
1012 auth=HTTPBasicAuth(ODL_USER, ODL_PASSWORD))
1013 except Exception as e:
1014 logger.error("Failed to create external tunnel endpoint on"
1015 " ODL for external tep ip %s with error %s"
1016 % (remote_tep_ip, e))
1020 def is_fib_entry_present_on_odl(odl_nodes, ip_prefix, vrf_id):
1021 url = ('http://{user}:{password}@{ip}:{port}/restconf/config/'
1022 'odl-fib:fibEntries/vrfTables/{vrf}/'
1023 .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
1024 port=ODL_PORT, vrf=vrf_id))
1025 logger.error("url is %s" % url)
1027 vrf_table = requests.get(url).json()
1028 is_ipprefix_exists = False
1029 for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
1030 if vrf_entry['destPrefix'] == ip_prefix:
1031 is_ipprefix_exists = True
1033 return is_ipprefix_exists
1034 except Exception as e:
1035 logger.error('Failed to find ip prefix %s with error %s'
1040 def wait_stack_for_status(conn, stack_id, stack_status, limit=12):
1041 """ Waits to reach specified stack status. To be used with
1042 CREATE_COMPLETE and UPDATE_COMPLETE.
1043 Will try a specific number of attempts at 10sec intervals
1046 :param stack_id: the stack id returned by create_stack api call
1047 :param stack_status: the stack status waiting for
1048 :param limit: the maximum number of attempts
1050 logger.debug("Stack '%s' create started" % stack_id)
1052 stack_create_complete = False
1054 while attempts < limit:
1056 stack_st = conn.orchestration.get_stack(stack_id).status
1057 except NotFoundException:
1058 logger.error("Stack create failed")
1059 raise SystemError("Stack create failed")
1061 if stack_st == stack_status:
1062 stack_create_complete = True
1067 logger.debug("Stack status check: %s times" % attempts)
1068 if stack_create_complete is False:
1069 logger.error("Stack create failed")
1070 raise SystemError("Stack create failed")
1076 def delete_stack_and_wait(conn, stack_id, limit=12):
1077 """ Starts and waits for completion of delete stack
1079 Will try a specific number of attempts at 10sec intervals
1082 :param stack_id: the id of the stack to be deleted
1083 :param limit: the maximum number of attempts
1085 delete_started = False
1086 if stack_id is not None:
1087 delete_started = os_utils.delete_stack(conn, stack_id)
1089 if delete_started is True:
1090 logger.debug("Stack delete succesfully started")
1092 logger.error("Stack delete start failed")
1094 stack_delete_complete = False
1096 while attempts < limit:
1098 stack_st = conn.orchestration.get_stack(stack_id).status
1099 if stack_st == 'DELETE_COMPLETE':
1100 stack_delete_complete = True
1104 except NotFoundException:
1105 stack_delete_complete = True
1108 logger.debug("Stack status check: %s times" % attempts)
1109 if not stack_delete_complete:
1110 logger.error("Stack delete failed")
1111 raise SystemError("Stack delete failed")
1117 def get_heat_environment(testcase, common_config):
1118 """ Reads the heat parameters of a testcase into a yaml object
1120 Each testcase where Heat Orchestratoin Template (HOT) is introduced
1121 has an associated parameters section.
1122 Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
1123 and place it under parameters tree.
1125 :param testcase: the tescase for which the HOT file is fetched
1126 :param common_config: the common config section
1127 :return environment: a yaml object to be used as environment
1129 fl = common_config.default_flavor
1130 param_dict = testcase.heat_parameters
1131 param_dict['flavor'] = fl
1132 env_dict = {'parameters': param_dict}
1136 def get_vms_from_stack_outputs(conn, stack_id, vm_stack_output_keys):
1137 """ Converts a vm name from a heat stack output to a nova vm object
1139 :param stack_id: the id of the stack to fetch the vms from
1140 :param vm_stack_output_keys: a list of stack outputs with the vm names
1141 :return vms: a list of vm objects corresponding to the outputs
1144 for vmk in vm_stack_output_keys:
1145 vm_output = os_utils.get_output(conn, stack_id, vmk)
1146 if vm_output is not None:
1147 vm_name = vm_output['output_value']
1148 logger.debug("vm '%s' read from heat output" % vm_name)
1149 vm = os_utils.get_instance_by_name(conn, vm_name)