3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
18 import functest.utils.openstack_utils as os_utils
19 from opnfv.deployment.factory import Factory as DeploymentFactory
21 from sdnvpn.lib import config as sdnvpn_config
23 logger = logging.getLogger('sndvpn_test_utils')
25 common_config = sdnvpn_config.CommonConfig()
31 def create_custom_flavor():
32 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
33 common_config.custom_flavor_ram,
34 common_config.custom_flavor_disk,
35 common_config.custom_flavor_vcpus)
38 def create_net(neutron_client, name):
39 logger.debug("Creating network %s", name)
40 net_id = os_utils.create_neutron_net(neutron_client, name)
43 "There has been a problem when creating the neutron network")
48 def create_subnet(neutron_client, name, cidr, net_id):
49 logger.debug("Creating subnet %s in network %s with cidr %s",
51 subnet_id = os_utils.create_neutron_subnet(neutron_client,
57 "There has been a problem when creating the neutron subnet")
62 def create_network(neutron_client, net, subnet1, cidr1,
63 router, subnet2=None, cidr2=None):
64 """Network assoc won't work for networks/subnets created by this function.
65 It is an ODL limitation due to it handling routers as vpns.
66 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
67 network_dic = os_utils.create_network_full(neutron_client,
74 "There has been a problem when creating the neutron network")
76 net_id = network_dic["net_id"]
77 subnet_id = network_dic["subnet_id"]
78 router_id = network_dic["router_id"]
80 if subnet2 is not None:
81 logger.debug("Creating and attaching a second subnet...")
82 subnet_id = os_utils.create_neutron_subnet(
83 neutron_client, subnet2, cidr2, net_id)
86 "There has been a problem when creating the second subnet")
88 logger.debug("Subnet '%s' created successfully" % subnet_id)
89 return net_id, subnet_id, router_id
92 def create_instance(nova_client,
104 if 'flavor' not in kwargs:
105 kwargs['flavor'] = common_config.default_flavor
107 logger.info("Creating instance '%s'..." % name)
109 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
110 " network=%s\n secgroup=%s \n hypervisor=%s \n"
111 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
112 % (name, kwargs['flavor'], image_id, network_id, sg_id,
113 compute_node, fixed_ip, files, userdata))
114 instance = os_utils.create_instance_and_wait_for_active(
121 av_zone=compute_node,
126 logger.error("Error while booting instance.")
129 logger.debug("Instance '%s' booted successfully. IP='%s'." %
130 (name, instance.networks.itervalues().next()[0]))
131 # Retrieve IP of INSTANCE
132 # instance_ip = instance.networks.get(network_id)[0]
135 logger.debug("Adding '%s' to security group '%s'..."
136 % (name, secgroup_name))
138 logger.debug("Adding '%s' to security group '%s'..."
140 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
145 def generate_ping_userdata(ips_array, ping_count=10):
148 ips = ("%s %s" % (ips, ip))
150 ips = ips.replace(' ', ' ')
151 return ("#!/bin/sh\n"
156 " ping -c %s $ip 2>&1 >/dev/null\n"
158 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
159 " echo ping $ip OK\n"
160 " else echo ping $ip KO\n"
168 def generate_userdata_common():
169 return ("#!/bin/sh\n"
170 "sudo mkdir -p /home/cirros/.ssh/\n"
171 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
172 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
173 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
174 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
175 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
176 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
177 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
178 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
179 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
180 "chmod 700 /home/cirros/.ssh\n"
181 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
182 "chmod 600 /home/cirros/.ssh/id_rsa\n"
186 def generate_userdata_with_ssh(ips_array):
187 u1 = generate_userdata_common()
191 ips = ("%s %s" % (ips, ip))
193 ips = ips.replace(' ', ' ')
199 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
200 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
202 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
203 " else echo $ip 'not reachable';fi;\n"
211 def get_installerHandler():
212 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
213 installer_ip = get_installer_ip()
215 if installer_type not in ["fuel", "apex"]:
216 logger.warn("installer type %s is neither fuel nor apex."
217 "returning None for installer handler" % installer_type)
220 if installer_type in ["apex"]:
221 developHandler = DeploymentFactory.get_handler(
225 pkey_file="/root/.ssh/id_rsa")
227 if installer_type in ["fuel"]:
228 developHandler = DeploymentFactory.get_handler(
233 return developHandler
237 developHandler = get_installerHandler()
238 return developHandler.get_nodes()
241 def get_installer_ip():
242 return str(os.environ['INSTALLER_IP'])
245 def get_instance_ip(instance):
246 instance_ip = instance.networks.itervalues().next()[0]
250 def wait_for_instance(instance):
251 logger.info("Waiting for instance %s to get a DHCP lease and "
252 "prompt for login..." % instance.id)
253 # The sleep this function replaced waited for 80s
256 pattern = ".* login:"
257 expected_regex = re.compile(pattern)
259 while tries > 0 and not expected_regex.search(console_log):
260 console_log = instance.get_console_output()
261 time.sleep(sleep_time)
264 if not expected_regex.search(console_log):
265 logger.error("Instance %s seems not to boot up properly."
271 def wait_for_instances_up(*args):
272 check = [wait_for_instance(instance) for instance in args]
276 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
280 logger.debug("Waiting for network %s to associate with BGPVPN %s "
281 % (bgpvpn_id, net_id))
283 while tries > 0 and net_id not in nets:
284 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
285 time.sleep(sleep_time)
287 if net_id not in nets:
288 logger.error("Association of network %s with BGPVPN %s failed" %
294 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
295 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
297 # Return True if all associations succeeded
301 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
305 logger.debug("Waiting for router %s to associate with BGPVPN %s "
306 % (bgpvpn_id, router_id))
307 while tries > 0 and router_id not in routers:
308 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
309 time.sleep(sleep_time)
311 if router_id not in routers:
312 logger.error("Association of router %s with BGPVPN %s failed" %
313 (router_id, bgpvpn_id))
318 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
319 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
321 # Return True if all associations succeeded
325 def wait_before_subtest(*args, **kwargs):
326 ''' This is a placeholder.
327 TODO: Replace delay with polling logic. '''
331 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
332 """Get the compute nodes in the deployment
333 Exit if the deployment doesn't have enough compute nodes"""
334 compute_nodes = os_utils.get_hypervisors(nova_client)
336 num_compute_nodes = len(compute_nodes)
337 if num_compute_nodes < 2:
338 logger.error("There are %s compute nodes in the deployment. "
339 "Minimum number of nodes to complete the test is 2."
343 logger.debug("Compute nodes: %s" % compute_nodes)
347 def open_icmp(neutron_client, security_group_id):
348 if os_utils.check_security_group_rules(neutron_client,
353 if not os_utils.create_secgroup_rule(neutron_client,
357 logger.error("Failed to create icmp security group rule...")
359 logger.info("This rule exists for security group: %s"
363 def open_http_port(neutron_client, security_group_id):
364 if os_utils.check_security_group_rules(neutron_client,
370 if not os_utils.create_secgroup_rule(neutron_client,
376 logger.error("Failed to create http security group rule...")
378 logger.info("This rule exists for security group: %s"
382 def open_bgp_port(neutron_client, security_group_id):
383 if os_utils.check_security_group_rules(neutron_client,
389 if not os_utils.create_secgroup_rule(neutron_client,
394 logger.error("Failed to create bgp security group rule...")
396 logger.info("This rule exists for security group: %s"
400 def exec_cmd(cmd, verbose):
402 logger.debug("Executing '%s'" % cmd)
403 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
404 stderr=subprocess.STDOUT)
406 for line in iter(p.stdout.readline, b''):
413 returncode = p.wait()
415 logger.error("Command %s failed to execute." % cmd)
418 return output, success
421 def check_odl_fib(ip, controller_ip):
422 """Check that there is an entry in the ODL Fib for `ip`"""
423 url = "http://" + controller_ip + \
424 ":8181/restconf/config/odl-fib:fibEntries/"
425 logger.debug("Querring '%s' for FIB entries", url)
426 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
427 if res.status_code != 200:
428 logger.error("OpenDaylight response status code: %s", res.status_code)
430 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
432 logger.debug("OpenDaylight FIB: \n%s" % res.text)
433 return ip in res.text
436 def run_odl_cmd(odl_node, cmd):
437 '''Run a command in the OpenDaylight Karaf shell
438 This is a bit flimsy because of shell quote escaping, make sure that
439 the cmd passed does not have any top level double quotes or this
441 The /dev/null is used because client works, but outputs something
442 that contains "ERROR" and run_cmd doesn't like that.
444 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
445 ' 2>/dev/null' % cmd)
446 return odl_node.run_cmd(karaf_cmd)
449 def wait_for_cloud_init(instance):
451 # ubuntu images take a long time to start
454 logger.info("Waiting for cloud init of instance: {}"
455 "".format(instance.name))
457 instance_log = instance.get_console_output()
458 if "Failed to run module" in instance_log:
460 logger.error("Cloud init failed to run. Reason: %s",
463 if re.search(r"Cloud-init v. .+ finished at", instance_log):
466 time.sleep(sleep_time)
470 logger.error("Cloud init timed out"
474 logger.info("Finished waiting for cloud init of instance {} result was {}"
475 "".format(instance.name, success))
479 def attach_instance_to_ext_br(instance, compute_node):
480 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
481 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
482 if installer_type == "fuel":
484 elif installer_type == "apex":
485 # In Apex, br-ex is an ovs bridge and virsh attach-interface
486 # won't just work. We work around it by creating a linux
487 # bridge, attaching that to br-ex with a veth pair
488 # and virsh-attaching the instance to the linux-bridge
492 if ! sudo brctl show |grep -q ^{bridge};then
493 sudo brctl addbr {bridge}
494 sudo ip link set {bridge} up
495 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
496 sudo ip link set dev ovs-quagga-tap up
497 sudo ip link set dev quagga-tap up
498 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
499 sudo brctl addif {bridge} quagga-tap
502 compute_node.run_cmd(cmd.format(bridge=bridge))
504 compute_node.run_cmd("sudo virsh attach-interface %s"
505 " bridge %s" % (libvirt_instance_name, bridge))
508 def detach_instance_from_ext_br(instance, compute_node):
509 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
510 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
511 "grep running | awk '{print $2}'); "
512 "do echo -n ; sudo virsh dumpxml $vm| "
513 "grep -oP '52:54:[\da-f:]+' ;done")
514 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
515 " --type bridge --mac %s"
516 % (libvirt_instance_name, mac))
518 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
519 if installer_type == "fuel":
521 elif installer_type == "apex":
522 # In Apex, br-ex is an ovs bridge and virsh attach-interface
523 # won't just work. We work around it by creating a linux
524 # bridge, attaching that to br-ex with a veth pair
525 # and virsh-attaching the instance to the linux-bridge
528 sudo brctl delif {bridge} quagga-tap &&
529 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
530 sudo ip link set dev quagga-tap down &&
531 sudo ip link set dev ovs-quagga-tap down &&
532 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
533 sudo ip link set {bridge} down &&
534 sudo brctl delbr {bridge}
536 compute_node.run_cmd(cmd.format(bridge=bridge))
539 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
540 subnet_ids, router_ids, network_ids):
542 if len(floatingip_ids) != 0:
543 for floatingip_id in floatingip_ids:
544 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
545 logging.error('Fail to delete all floating ips. '
546 'Floating ip with id {} was not deleted.'.
547 format(floatingip_id))
550 if len(bgpvpn_ids) != 0:
551 for bgpvpn_id in bgpvpn_ids:
552 delete_bgpvpn(neutron_client, bgpvpn_id)
554 if len(interfaces) != 0:
555 for router_id, subnet_id in interfaces:
556 if not os_utils.remove_interface_router(neutron_client,
557 router_id, subnet_id):
558 logging.error('Fail to delete all interface routers. '
559 'Interface router with id {} was not deleted.'.
562 if len(router_ids) != 0:
563 for router_id in router_ids:
564 if not os_utils.remove_gateway_router(neutron_client, router_id):
565 logging.error('Fail to delete all gateway routers. '
566 'Gateway router with id {} was not deleted.'.
569 if len(subnet_ids) != 0:
570 for subnet_id in subnet_ids:
571 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
572 logging.error('Fail to delete all subnets. '
573 'Subnet with id {} was not deleted.'.
577 if len(router_ids) != 0:
578 for router_id in router_ids:
579 if not os_utils.delete_neutron_router(neutron_client, router_id):
580 logging.error('Fail to delete all routers. '
581 'Router with id {} was not deleted.'.
585 if len(network_ids) != 0:
586 for network_id in network_ids:
587 if not os_utils.delete_neutron_net(neutron_client, network_id):
588 logging.error('Fail to delete all networks. '
589 'Network with id {} was not deleted.'.
595 def cleanup_nova(nova_client, instance_ids, image_ids):
596 if len(instance_ids) != 0:
597 for instance_id in instance_ids:
598 if not os_utils.delete_instance(nova_client, instance_id):
599 logging.error('Fail to delete all instances. '
600 'Instance with id {} was not deleted.'.
604 if len(image_ids) != 0:
605 for image_id in image_ids:
606 if not os_utils.delete_glance_image(nova_client, image_id):
607 logging.error('Fail to delete all images. '
608 'Image with id {} was not deleted.'.
614 def create_bgpvpn(neutron_client, **kwargs):
615 # route_distinguishers
617 json_body = {"bgpvpn": kwargs}
618 return neutron_client.create_bgpvpn(json_body)
621 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
622 json_body = {"bgpvpn": kwargs}
623 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
626 def delete_bgpvpn(neutron_client, bgpvpn_id):
627 return neutron_client.delete_bgpvpn(bgpvpn_id)
630 def get_bgpvpn(neutron_client, bgpvpn_id):
631 return neutron_client.show_bgpvpn(bgpvpn_id)
634 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
635 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
638 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
639 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
642 def create_router_association(neutron_client, bgpvpn_id, router_id):
643 json_body = {"router_association": {"router_id": router_id}}
644 return neutron_client.create_router_association(bgpvpn_id, json_body)
647 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
648 json_body = {"network_association": {"network_id": neutron_network_id}}
649 return neutron_client.create_network_association(bgpvpn_id, json_body)
652 def is_fail_mode_secure():
654 Checks the value of the attribute fail_mode,
655 if it is set to secure. This check is performed
656 on all OVS br-int interfaces, for all OpenStack nodes.
659 openstack_nodes = get_nodes()
660 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
663 # Define OVS get fail_mode command
664 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
665 for openstack_node in openstack_nodes:
666 if not openstack_node.is_active():
669 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
671 if 'br-int' in ovs_int_list:
672 # Execute get fail_mode command
673 br_int_fail_mode = (openstack_node.
674 run_cmd(get_ovs_fail_mode_cmd).strip())
675 if br_int_fail_mode == 'secure':
677 is_secure[openstack_node.name] = True
680 logging.error('The fail_mode for br-int was not secure '
681 'in {} node'.format(openstack_node.name))
682 is_secure[openstack_node.name] = False