3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
18 import functest.utils.openstack_utils as os_utils
19 from opnfv.deployment.factory import Factory as DeploymentFactory
21 from sdnvpn.lib import config as sdnvpn_config
23 logger = logging.getLogger('sdnvpn_test_utils')
25 common_config = sdnvpn_config.CommonConfig()
31 def create_custom_flavor():
32 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
33 common_config.custom_flavor_ram,
34 common_config.custom_flavor_disk,
35 common_config.custom_flavor_vcpus)
38 def create_net(neutron_client, name):
39 logger.debug("Creating network %s", name)
40 net_id = os_utils.create_neutron_net(neutron_client, name)
43 "There has been a problem when creating the neutron network")
48 def create_subnet(neutron_client, name, cidr, net_id):
49 logger.debug("Creating subnet %s in network %s with cidr %s",
51 subnet_id = os_utils.create_neutron_subnet(neutron_client,
57 "There has been a problem when creating the neutron subnet")
62 def create_network(neutron_client, net, subnet1, cidr1,
63 router, subnet2=None, cidr2=None):
64 """Network assoc won't work for networks/subnets created by this function.
65 It is an ODL limitation due to it handling routers as vpns.
66 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
67 network_dic = os_utils.create_network_full(neutron_client,
74 "There has been a problem when creating the neutron network")
76 net_id = network_dic["net_id"]
77 subnet_id = network_dic["subnet_id"]
78 router_id = network_dic["router_id"]
80 if subnet2 is not None:
81 logger.debug("Creating and attaching a second subnet...")
82 subnet_id = os_utils.create_neutron_subnet(
83 neutron_client, subnet2, cidr2, net_id)
86 "There has been a problem when creating the second subnet")
88 logger.debug("Subnet '%s' created successfully" % subnet_id)
89 return net_id, subnet_id, router_id
92 def create_instance(nova_client,
104 if 'flavor' not in kwargs:
105 kwargs['flavor'] = common_config.default_flavor
107 logger.info("Creating instance '%s'..." % name)
109 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
110 " network=%s\n secgroup=%s \n hypervisor=%s \n"
111 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
112 % (name, kwargs['flavor'], image_id, network_id, sg_id,
113 compute_node, fixed_ip, files, userdata))
114 instance = os_utils.create_instance_and_wait_for_active(
121 av_zone=compute_node,
126 logger.error("Error while booting instance.")
129 logger.debug("Instance '%s' booted successfully. IP='%s'." %
130 (name, instance.networks.itervalues().next()[0]))
131 # Retrieve IP of INSTANCE
132 # instance_ip = instance.networks.get(network_id)[0]
135 logger.debug("Adding '%s' to security group '%s'..."
136 % (name, secgroup_name))
138 logger.debug("Adding '%s' to security group '%s'..."
140 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
145 def generate_ping_userdata(ips_array, ping_count=10):
148 ips = ("%s %s" % (ips, ip))
150 ips = ips.replace(' ', ' ')
151 return ("#!/bin/sh\n"
156 " ping -c %s $ip 2>&1 >/dev/null\n"
158 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
159 " echo ping $ip OK\n"
160 " else echo ping $ip KO\n"
168 def generate_userdata_common():
169 return ("#!/bin/sh\n"
170 "sudo mkdir -p /home/cirros/.ssh/\n"
171 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
172 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
173 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
174 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
175 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
176 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
177 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
178 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
179 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
180 "chmod 700 /home/cirros/.ssh\n"
181 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
182 "chmod 600 /home/cirros/.ssh/id_rsa\n"
186 def generate_userdata_with_ssh(ips_array):
187 u1 = generate_userdata_common()
191 ips = ("%s %s" % (ips, ip))
193 ips = ips.replace(' ', ' ')
199 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
200 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
202 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
203 " else echo $ip 'not reachable';fi;\n"
211 def get_installerHandler():
212 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
213 installer_ip = get_installer_ip()
215 if installer_type not in ["fuel", "apex"]:
216 logger.warn("installer type %s is neither fuel nor apex."
217 "returning None for installer handler" % installer_type)
220 if installer_type in ["apex"]:
221 developHandler = DeploymentFactory.get_handler(
225 pkey_file="/root/.ssh/id_rsa")
227 if installer_type in ["fuel"]:
228 developHandler = DeploymentFactory.get_handler(
233 return developHandler
237 developHandler = get_installerHandler()
238 return developHandler.get_nodes()
241 def get_installer_ip():
242 return str(os.environ['INSTALLER_IP'])
245 def get_instance_ip(instance):
246 instance_ip = instance.networks.itervalues().next()[0]
250 def wait_for_instance(instance, pattern=".* login:"):
251 logger.info("Waiting for instance %s to boot up" % instance.id)
254 expected_regex = re.compile(pattern)
256 while tries > 0 and not expected_regex.search(console_log):
257 console_log = instance.get_console_output()
258 time.sleep(sleep_time)
261 if not expected_regex.search(console_log):
262 logger.error("Instance %s does not boot up properly."
268 def wait_for_instances_up(*instances):
269 check = [wait_for_instance(instance) for instance in instances]
273 def wait_for_instances_get_dhcp(*instances):
274 check = [wait_for_instance(instance, "Lease of .* obtained")
275 for instance in instances]
279 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
283 logger.debug("Waiting for network %s to associate with BGPVPN %s "
284 % (bgpvpn_id, net_id))
286 while tries > 0 and net_id not in nets:
287 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
288 time.sleep(sleep_time)
290 if net_id not in nets:
291 logger.error("Association of network %s with BGPVPN %s failed" %
297 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
298 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
300 # Return True if all associations succeeded
304 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
308 logger.debug("Waiting for router %s to associate with BGPVPN %s "
309 % (bgpvpn_id, router_id))
310 while tries > 0 and router_id not in routers:
311 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
312 time.sleep(sleep_time)
314 if router_id not in routers:
315 logger.error("Association of router %s with BGPVPN %s failed" %
316 (router_id, bgpvpn_id))
321 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
322 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
324 # Return True if all associations succeeded
328 def wait_before_subtest(*args, **kwargs):
329 ''' This is a placeholder.
330 TODO: Replace delay with polling logic. '''
334 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
335 """Get the compute nodes in the deployment
336 Exit if the deployment doesn't have enough compute nodes"""
337 compute_nodes = os_utils.get_hypervisors(nova_client)
339 num_compute_nodes = len(compute_nodes)
340 if num_compute_nodes < 2:
341 logger.error("There are %s compute nodes in the deployment. "
342 "Minimum number of nodes to complete the test is 2."
346 logger.debug("Compute nodes: %s" % compute_nodes)
350 def open_icmp(neutron_client, security_group_id):
351 if os_utils.check_security_group_rules(neutron_client,
356 if not os_utils.create_secgroup_rule(neutron_client,
360 logger.error("Failed to create icmp security group rule...")
362 logger.info("This rule exists for security group: %s"
366 def open_http_port(neutron_client, security_group_id):
367 if os_utils.check_security_group_rules(neutron_client,
373 if not os_utils.create_secgroup_rule(neutron_client,
379 logger.error("Failed to create http security group rule...")
381 logger.info("This rule exists for security group: %s"
385 def open_bgp_port(neutron_client, security_group_id):
386 if os_utils.check_security_group_rules(neutron_client,
392 if not os_utils.create_secgroup_rule(neutron_client,
397 logger.error("Failed to create bgp security group rule...")
399 logger.info("This rule exists for security group: %s"
403 def exec_cmd(cmd, verbose):
405 logger.debug("Executing '%s'" % cmd)
406 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
407 stderr=subprocess.STDOUT)
409 for line in iter(p.stdout.readline, b''):
416 returncode = p.wait()
418 logger.error("Command %s failed to execute." % cmd)
421 return output, success
424 def check_odl_fib(ip, controller_ip):
425 """Check that there is an entry in the ODL Fib for `ip`"""
426 url = "http://" + controller_ip + \
427 ":8181/restconf/config/odl-fib:fibEntries/"
428 logger.debug("Querring '%s' for FIB entries", url)
429 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
430 if res.status_code != 200:
431 logger.error("OpenDaylight response status code: %s", res.status_code)
433 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
435 logger.debug("OpenDaylight FIB: \n%s" % res.text)
436 return ip in res.text
439 def run_odl_cmd(odl_node, cmd):
440 '''Run a command in the OpenDaylight Karaf shell
441 This is a bit flimsy because of shell quote escaping, make sure that
442 the cmd passed does not have any top level double quotes or this
444 The /dev/null is used because client works, but outputs something
445 that contains "ERROR" and run_cmd doesn't like that.
447 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
448 ' 2>/dev/null' % cmd)
449 return odl_node.run_cmd(karaf_cmd)
452 def wait_for_cloud_init(instance):
454 # ubuntu images take a long time to start
457 logger.info("Waiting for cloud init of instance: {}"
458 "".format(instance.name))
460 instance_log = instance.get_console_output()
461 if "Failed to run module" in instance_log:
463 logger.error("Cloud init failed to run. Reason: %s",
466 if re.search(r"Cloud-init v. .+ finished at", instance_log):
469 time.sleep(sleep_time)
473 logger.error("Cloud init timed out"
477 logger.info("Finished waiting for cloud init of instance {} result was {}"
478 "".format(instance.name, success))
482 def attach_instance_to_ext_br(instance, compute_node):
483 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
484 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
485 if installer_type == "fuel":
487 elif installer_type == "apex":
488 # In Apex, br-ex is an ovs bridge and virsh attach-interface
489 # won't just work. We work around it by creating a linux
490 # bridge, attaching that to br-ex with a veth pair
491 # and virsh-attaching the instance to the linux-bridge
495 if ! sudo brctl show |grep -q ^{bridge};then
496 sudo brctl addbr {bridge}
497 sudo ip link set {bridge} up
498 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
499 sudo ip link set dev ovs-quagga-tap up
500 sudo ip link set dev quagga-tap up
501 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
502 sudo brctl addif {bridge} quagga-tap
505 compute_node.run_cmd(cmd.format(bridge=bridge))
507 compute_node.run_cmd("sudo virsh attach-interface %s"
508 " bridge %s" % (libvirt_instance_name, bridge))
511 def detach_instance_from_ext_br(instance, compute_node):
512 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
513 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
514 "grep running | awk '{print $2}'); "
515 "do echo -n ; sudo virsh dumpxml $vm| "
516 "grep -oP '52:54:[\da-f:]+' ;done")
517 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
518 " --type bridge --mac %s"
519 % (libvirt_instance_name, mac))
521 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
522 if installer_type == "fuel":
524 elif installer_type == "apex":
525 # In Apex, br-ex is an ovs bridge and virsh attach-interface
526 # won't just work. We work around it by creating a linux
527 # bridge, attaching that to br-ex with a veth pair
528 # and virsh-attaching the instance to the linux-bridge
531 sudo brctl delif {bridge} quagga-tap &&
532 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
533 sudo ip link set dev quagga-tap down &&
534 sudo ip link set dev ovs-quagga-tap down &&
535 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
536 sudo ip link set {bridge} down &&
537 sudo brctl delbr {bridge}
539 compute_node.run_cmd(cmd.format(bridge=bridge))
542 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
543 subnet_ids, router_ids, network_ids):
545 if len(floatingip_ids) != 0:
546 for floatingip_id in floatingip_ids:
547 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
548 logging.error('Fail to delete all floating ips. '
549 'Floating ip with id {} was not deleted.'.
550 format(floatingip_id))
553 if len(bgpvpn_ids) != 0:
554 for bgpvpn_id in bgpvpn_ids:
555 delete_bgpvpn(neutron_client, bgpvpn_id)
557 if len(interfaces) != 0:
558 for router_id, subnet_id in interfaces:
559 if not os_utils.remove_interface_router(neutron_client,
560 router_id, subnet_id):
561 logging.error('Fail to delete all interface routers. '
562 'Interface router with id {} was not deleted.'.
565 if len(router_ids) != 0:
566 for router_id in router_ids:
567 if not os_utils.remove_gateway_router(neutron_client, router_id):
568 logging.error('Fail to delete all gateway routers. '
569 'Gateway router with id {} was not deleted.'.
572 if len(subnet_ids) != 0:
573 for subnet_id in subnet_ids:
574 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
575 logging.error('Fail to delete all subnets. '
576 'Subnet with id {} was not deleted.'.
580 if len(router_ids) != 0:
581 for router_id in router_ids:
582 if not os_utils.delete_neutron_router(neutron_client, router_id):
583 logging.error('Fail to delete all routers. '
584 'Router with id {} was not deleted.'.
588 if len(network_ids) != 0:
589 for network_id in network_ids:
590 if not os_utils.delete_neutron_net(neutron_client, network_id):
591 logging.error('Fail to delete all networks. '
592 'Network with id {} was not deleted.'.
598 def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
599 if flavor_ids is not None and len(flavor_ids) != 0:
600 for flavor_id in flavor_ids:
601 if not nova_client.flavors.delete(flavor_id):
602 logging.error('Fail to delete flavor. '
603 'Flavor with id {} was not deleted.'.
605 if len(instance_ids) != 0:
606 for instance_id in instance_ids:
607 if not os_utils.delete_instance(nova_client, instance_id):
608 logging.error('Fail to delete all instances. '
609 'Instance with id {} was not deleted.'.
615 def cleanup_glance(glance_client, image_ids):
616 if len(image_ids) != 0:
617 for image_id in image_ids:
618 if not os_utils.delete_glance_image(glance_client, image_id):
619 logging.error('Fail to delete all images. '
620 'Image with id {} was not deleted.'.
626 def create_bgpvpn(neutron_client, **kwargs):
627 # route_distinguishers
629 json_body = {"bgpvpn": kwargs}
630 return neutron_client.create_bgpvpn(json_body)
633 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
634 json_body = {"bgpvpn": kwargs}
635 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
638 def delete_bgpvpn(neutron_client, bgpvpn_id):
639 return neutron_client.delete_bgpvpn(bgpvpn_id)
642 def get_bgpvpn(neutron_client, bgpvpn_id):
643 return neutron_client.show_bgpvpn(bgpvpn_id)
646 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
647 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
650 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
651 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
654 def create_router_association(neutron_client, bgpvpn_id, router_id):
655 json_body = {"router_association": {"router_id": router_id}}
656 return neutron_client.create_router_association(bgpvpn_id, json_body)
659 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
660 json_body = {"network_association": {"network_id": neutron_network_id}}
661 return neutron_client.create_network_association(bgpvpn_id, json_body)
664 def is_fail_mode_secure():
666 Checks the value of the attribute fail_mode,
667 if it is set to secure. This check is performed
668 on all OVS br-int interfaces, for all OpenStack nodes.
671 openstack_nodes = get_nodes()
672 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
675 # Define OVS get fail_mode command
676 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
677 for openstack_node in openstack_nodes:
678 if not openstack_node.is_active():
681 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
683 if 'br-int' in ovs_int_list:
684 # Execute get fail_mode command
685 br_int_fail_mode = (openstack_node.
686 run_cmd(get_ovs_fail_mode_cmd).strip())
687 if br_int_fail_mode == 'secure':
689 is_secure[openstack_node.name] = True
692 logging.error('The fail_mode for br-int was not secure '
693 'in {} node'.format(openstack_node.name))
694 is_secure[openstack_node.name] = False
698 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
699 subnet_quota, port_quota):
700 json_body = {"quota": {
702 "subnet": subnet_quota,
707 neutron_client.update_quota(tenant_id=tenant_id,
710 except Exception as e:
711 logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
712 " '%s', '%s', '%s', '%s')]: %s" %
713 (tenant_id, nw_quota, subnet_quota, port_quota, e))
717 def update_instance_quota_class(nova_client, instances_quota):
719 nova_client.quota_classes.update("default", instances=instances_quota)
721 except Exception as e:
722 logger.error("Error [update_instance_quota_class(nova_client,"
723 " '%s' )]: %s" % (instances_quota, e))
727 def get_neutron_quota(neutron_client, tenant_id):
729 return neutron_client.show_quota(tenant_id=tenant_id)['quota']
730 except Exception as e:
731 logger.error("Error in getting neutron quota for tenant "
732 " '%s' )]: %s" % (tenant_id, e))
736 def get_nova_instances_quota(nova_client):
738 return nova_client.quota_classes.get("default").instances
739 except Exception as e:
740 logger.error("Error in getting nova instances quota: %s" % e)
744 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
746 Gets, as input, a list of compute nodes and a list of OVS bridges
747 and returns the command console output, as a list of lines, that
748 contains all the OVS groups from all bridges and nodes in lists.
751 for compute_node in compute_node_list:
752 for ovs_br in ovs_br_list:
753 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
754 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
755 "grep group".format(ovs_br, of_protocol))
756 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
761 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
763 Gets, as input, a list of compute nodes and a list of OVS bridges
764 and returns the command console output, as a list of lines, that
765 contains all the OVS flows from all bridges and nodes in lists.
768 for compute_node in compute_node_list:
769 for ovs_br in ovs_br_list:
770 if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
771 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
772 "grep table=".format(ovs_br, of_protocol))
773 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().