3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
18 import functest.utils.openstack_utils as os_utils
19 from opnfv.deployment.factory import Factory as DeploymentFactory
21 from sdnvpn.lib import config as sdnvpn_config
23 logger = logging.getLogger('sndvpn_test_utils')
25 common_config = sdnvpn_config.CommonConfig()
31 def create_custom_flavor():
32 return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
33 common_config.custom_flavor_ram,
34 common_config.custom_flavor_disk,
35 common_config.custom_flavor_vcpus)
38 def create_net(neutron_client, name):
39 logger.debug("Creating network %s", name)
40 net_id = os_utils.create_neutron_net(neutron_client, name)
43 "There has been a problem when creating the neutron network")
48 def create_subnet(neutron_client, name, cidr, net_id):
49 logger.debug("Creating subnet %s in network %s with cidr %s",
51 subnet_id = os_utils.create_neutron_subnet(neutron_client,
57 "There has been a problem when creating the neutron subnet")
62 def create_network(neutron_client, net, subnet1, cidr1,
63 router, subnet2=None, cidr2=None):
64 """Network assoc won't work for networks/subnets created by this function.
65 It is an ODL limitation due to it handling routers as vpns.
66 See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
67 network_dic = os_utils.create_network_full(neutron_client,
74 "There has been a problem when creating the neutron network")
76 net_id = network_dic["net_id"]
77 subnet_id = network_dic["subnet_id"]
78 router_id = network_dic["router_id"]
80 if subnet2 is not None:
81 logger.debug("Creating and attaching a second subnet...")
82 subnet_id = os_utils.create_neutron_subnet(
83 neutron_client, subnet2, cidr2, net_id)
86 "There has been a problem when creating the second subnet")
88 logger.debug("Subnet '%s' created successfully" % subnet_id)
89 return net_id, subnet_id, router_id
92 def create_instance(nova_client,
104 if 'flavor' not in kwargs:
105 kwargs['flavor'] = common_config.default_flavor
107 logger.info("Creating instance '%s'..." % name)
109 "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
110 " network=%s\n secgroup=%s \n hypervisor=%s \n"
111 " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
112 % (name, kwargs['flavor'], image_id, network_id, sg_id,
113 compute_node, fixed_ip, files, userdata))
114 instance = os_utils.create_instance_and_wait_for_active(
121 av_zone=compute_node,
126 logger.error("Error while booting instance.")
129 logger.debug("Instance '%s' booted successfully. IP='%s'." %
130 (name, instance.networks.itervalues().next()[0]))
131 # Retrieve IP of INSTANCE
132 # instance_ip = instance.networks.get(network_id)[0]
135 logger.debug("Adding '%s' to security group '%s'..."
136 % (name, secgroup_name))
138 logger.debug("Adding '%s' to security group '%s'..."
140 os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
145 def generate_ping_userdata(ips_array, ping_count=10):
148 ips = ("%s %s" % (ips, ip))
150 ips = ips.replace(' ', ' ')
151 return ("#!/bin/sh\n"
156 " ping -c %s $ip 2>&1 >/dev/null\n"
158 " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
159 " echo ping $ip OK\n"
160 " else echo ping $ip KO\n"
168 def generate_userdata_common():
169 return ("#!/bin/sh\n"
170 "sudo mkdir -p /home/cirros/.ssh/\n"
171 "sudo chown cirros:cirros /home/cirros/.ssh/\n"
172 "sudo chown cirros:cirros /home/cirros/id_rsa\n"
173 "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
174 "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
175 "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
176 "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
177 "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
178 "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
179 "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
180 "chmod 700 /home/cirros/.ssh\n"
181 "chmod 644 /home/cirros/.ssh/authorized_keys\n"
182 "chmod 600 /home/cirros/.ssh/id_rsa\n"
186 def generate_userdata_with_ssh(ips_array):
187 u1 = generate_userdata_common()
191 ips = ("%s %s" % (ips, ip))
193 ips = ips.replace(' ', ' ')
199 " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
200 "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
202 " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
203 " else echo $ip 'not reachable';fi;\n"
211 def get_installerHandler():
212 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
213 installer_ip = get_installer_ip()
215 if installer_type not in ["fuel", "apex"]:
216 raise ValueError("%s is not supported" % installer_type)
218 if installer_type in ["apex"]:
219 developHandler = DeploymentFactory.get_handler(
223 pkey_file="/root/.ssh/id_rsa")
225 if installer_type in ["fuel"]:
226 developHandler = DeploymentFactory.get_handler(
231 return developHandler
235 developHandler = get_installerHandler()
236 return developHandler.get_nodes()
239 def get_installer_ip():
240 return str(os.environ['INSTALLER_IP'])
243 def get_instance_ip(instance):
244 instance_ip = instance.networks.itervalues().next()[0]
248 def wait_for_instance(instance):
249 logger.info("Waiting for instance %s to get a DHCP lease and "
250 "prompt for login..." % instance.id)
251 # The sleep this function replaced waited for 80s
254 pattern = ".* login:"
255 expected_regex = re.compile(pattern)
257 while tries > 0 and not expected_regex.search(console_log):
258 console_log = instance.get_console_output()
259 time.sleep(sleep_time)
262 if not expected_regex.search(console_log):
263 logger.error("Instance %s seems not to boot up properly."
269 def wait_for_instances_up(*args):
270 check = [wait_for_instance(instance) for instance in args]
274 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
278 logger.debug("Waiting for network %s to associate with BGPVPN %s "
279 % (bgpvpn_id, net_id))
281 while tries > 0 and net_id not in nets:
282 nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
283 time.sleep(sleep_time)
285 if net_id not in nets:
286 logger.error("Association of network %s with BGPVPN %s failed" %
292 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
293 check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
295 # Return True if all associations succeeded
299 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
303 logger.debug("Waiting for router %s to associate with BGPVPN %s "
304 % (bgpvpn_id, router_id))
305 while tries > 0 and router_id not in routers:
306 routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
307 time.sleep(sleep_time)
309 if router_id not in routers:
310 logger.error("Association of router %s with BGPVPN %s failed" %
311 (router_id, bgpvpn_id))
316 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
317 check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
319 # Return True if all associations succeeded
323 def wait_before_subtest(*args, **kwargs):
324 ''' This is a placeholder.
325 TODO: Replace delay with polling logic. '''
329 def assert_and_get_compute_nodes(nova_client, required_node_number=2):
330 """Get the compute nodes in the deployment
331 Exit if the deployment doesn't have enough compute nodes"""
332 compute_nodes = os_utils.get_hypervisors(nova_client)
334 num_compute_nodes = len(compute_nodes)
335 if num_compute_nodes < 2:
336 logger.error("There are %s compute nodes in the deployment. "
337 "Minimum number of nodes to complete the test is 2."
341 logger.debug("Compute nodes: %s" % compute_nodes)
345 def open_icmp(neutron_client, security_group_id):
346 if os_utils.check_security_group_rules(neutron_client,
351 if not os_utils.create_secgroup_rule(neutron_client,
355 logger.error("Failed to create icmp security group rule...")
357 logger.info("This rule exists for security group: %s"
361 def open_http_port(neutron_client, security_group_id):
362 if os_utils.check_security_group_rules(neutron_client,
368 if not os_utils.create_secgroup_rule(neutron_client,
374 logger.error("Failed to create http security group rule...")
376 logger.info("This rule exists for security group: %s"
380 def open_bgp_port(neutron_client, security_group_id):
381 if os_utils.check_security_group_rules(neutron_client,
387 if not os_utils.create_secgroup_rule(neutron_client,
392 logger.error("Failed to create bgp security group rule...")
394 logger.info("This rule exists for security group: %s"
398 def exec_cmd(cmd, verbose):
400 logger.debug("Executing '%s'" % cmd)
401 p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
402 stderr=subprocess.STDOUT)
404 for line in iter(p.stdout.readline, b''):
411 returncode = p.wait()
413 logger.error("Command %s failed to execute." % cmd)
416 return output, success
419 def check_odl_fib(ip, controller_ip):
420 """Check that there is an entry in the ODL Fib for `ip`"""
421 url = "http://" + controller_ip + \
422 ":8181/restconf/config/odl-fib:fibEntries/"
423 logger.debug("Querring '%s' for FIB entries", url)
424 res = requests.get(url, auth=(ODL_USER, ODL_PASS))
425 if res.status_code != 200:
426 logger.error("OpenDaylight response status code: %s", res.status_code)
428 logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
430 logger.debug("OpenDaylight FIB: \n%s" % res.text)
431 return ip in res.text
434 def run_odl_cmd(odl_node, cmd):
435 '''Run a command in the OpenDaylight Karaf shell
436 This is a bit flimsy because of shell quote escaping, make sure that
437 the cmd passed does not have any top level double quotes or this
439 The /dev/null is used because client works, but outputs something
440 that contains "ERROR" and run_cmd doesn't like that.
442 karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
443 ' 2>/dev/null' % cmd)
444 return odl_node.run_cmd(karaf_cmd)
447 def wait_for_cloud_init(instance):
449 # ubuntu images take a long time to start
452 logger.info("Waiting for cloud init of instance: {}"
453 "".format(instance.name))
455 instance_log = instance.get_console_output()
456 if "Failed to run module" in instance_log:
458 logger.error("Cloud init failed to run. Reason: %s",
461 if re.search(r"Cloud-init v. .+ finished at", instance_log):
464 time.sleep(sleep_time)
468 logger.error("Cloud init timed out"
472 logger.info("Finished waiting for cloud init of instance {} result was {}"
473 "".format(instance.name, success))
477 def attach_instance_to_ext_br(instance, compute_node):
478 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
479 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
480 if installer_type == "fuel":
482 elif installer_type == "apex":
483 # In Apex, br-ex is an ovs bridge and virsh attach-interface
484 # won't just work. We work around it by creating a linux
485 # bridge, attaching that to br-ex with a veth pair
486 # and virsh-attaching the instance to the linux-bridge
490 if ! sudo brctl show |grep -q ^{bridge};then
491 sudo brctl addbr {bridge}
492 sudo ip link set {bridge} up
493 sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
494 sudo ip link set dev ovs-quagga-tap up
495 sudo ip link set dev quagga-tap up
496 sudo ovs-vsctl add-port br-ex ovs-quagga-tap
497 sudo brctl addif {bridge} quagga-tap
500 compute_node.run_cmd(cmd.format(bridge=bridge))
502 compute_node.run_cmd("sudo virsh attach-interface %s"
503 " bridge %s" % (libvirt_instance_name, bridge))
506 def detach_instance_from_ext_br(instance, compute_node):
507 libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
508 mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
509 "grep running | awk '{print $2}'); "
510 "do echo -n ; sudo virsh dumpxml $vm| "
511 "grep -oP '52:54:[\da-f:]+' ;done")
512 compute_node.run_cmd("sudo virsh detach-interface --domain %s"
513 " --type bridge --mac %s"
514 % (libvirt_instance_name, mac))
516 installer_type = str(os.environ['INSTALLER_TYPE'].lower())
517 if installer_type == "fuel":
519 elif installer_type == "apex":
520 # In Apex, br-ex is an ovs bridge and virsh attach-interface
521 # won't just work. We work around it by creating a linux
522 # bridge, attaching that to br-ex with a veth pair
523 # and virsh-attaching the instance to the linux-bridge
526 sudo brctl delif {bridge} quagga-tap &&
527 sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
528 sudo ip link set dev quagga-tap down &&
529 sudo ip link set dev ovs-quagga-tap down &&
530 sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
531 sudo ip link set {bridge} down &&
532 sudo brctl delbr {bridge}
534 compute_node.run_cmd(cmd.format(bridge=bridge))
537 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
538 subnet_ids, router_ids, network_ids):
540 if len(floatingip_ids) != 0:
541 for floatingip_id in floatingip_ids:
542 if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
543 logging.error('Fail to delete all floating ips. '
544 'Floating ip with id {} was not deleted.'.
545 format(floatingip_id))
548 if len(bgpvpn_ids) != 0:
549 for bgpvpn_id in bgpvpn_ids:
550 delete_bgpvpn(neutron_client, bgpvpn_id)
552 if len(interfaces) != 0:
553 for router_id, subnet_id in interfaces:
554 if not os_utils.remove_interface_router(neutron_client,
555 router_id, subnet_id):
556 logging.error('Fail to delete all interface routers. '
557 'Interface router with id {} was not deleted.'.
560 if len(router_ids) != 0:
561 for router_id in router_ids:
562 if not os_utils.remove_gateway_router(neutron_client, router_id):
563 logging.error('Fail to delete all gateway routers. '
564 'Gateway router with id {} was not deleted.'.
567 if len(subnet_ids) != 0:
568 for subnet_id in subnet_ids:
569 if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
570 logging.error('Fail to delete all subnets. '
571 'Subnet with id {} was not deleted.'.
575 if len(router_ids) != 0:
576 for router_id in router_ids:
577 if not os_utils.delete_neutron_router(neutron_client, router_id):
578 logging.error('Fail to delete all routers. '
579 'Router with id {} was not deleted.'.
583 if len(network_ids) != 0:
584 for network_id in network_ids:
585 if not os_utils.delete_neutron_net(neutron_client, network_id):
586 logging.error('Fail to delete all networks. '
587 'Network with id {} was not deleted.'.
593 def cleanup_nova(nova_client, instance_ids, image_ids):
594 if len(instance_ids) != 0:
595 for instance_id in instance_ids:
596 if not os_utils.delete_instance(nova_client, instance_id):
597 logging.error('Fail to delete all instances. '
598 'Instance with id {} was not deleted.'.
602 if len(image_ids) != 0:
603 for image_id in image_ids:
604 if not os_utils.delete_glance_image(nova_client, image_id):
605 logging.error('Fail to delete all images. '
606 'Image with id {} was not deleted.'.
612 def create_bgpvpn(neutron_client, **kwargs):
613 # route_distinguishers
615 json_body = {"bgpvpn": kwargs}
616 return neutron_client.create_bgpvpn(json_body)
619 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
620 json_body = {"bgpvpn": kwargs}
621 return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
624 def delete_bgpvpn(neutron_client, bgpvpn_id):
625 return neutron_client.delete_bgpvpn(bgpvpn_id)
628 def get_bgpvpn(neutron_client, bgpvpn_id):
629 return neutron_client.show_bgpvpn(bgpvpn_id)
632 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
633 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
636 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
637 return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
640 def create_router_association(neutron_client, bgpvpn_id, router_id):
641 json_body = {"router_association": {"router_id": router_id}}
642 return neutron_client.create_router_association(bgpvpn_id, json_body)
645 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
646 json_body = {"network_association": {"network_id": neutron_network_id}}
647 return neutron_client.create_network_association(bgpvpn_id, json_body)
650 def is_fail_mode_secure():
652 Checks the value of the attribute fail_mode,
653 if it is set to secure. This check is performed
654 on all OVS br-int interfaces, for all OpenStack nodes.
657 openstack_nodes = get_nodes()
658 get_ovs_int_cmd = ("sudo ovs-vsctl show | "
661 # Define OVS get fail_mode command
662 get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
663 for openstack_node in openstack_nodes:
664 if not openstack_node.is_active():
667 ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
669 if 'br-int' in ovs_int_list:
670 # Execute get fail_mode command
671 br_int_fail_mode = (openstack_node.
672 run_cmd(get_ovs_fail_mode_cmd).strip())
673 if br_int_fail_mode == 'secure':
675 is_secure[openstack_node.name] = True
678 logging.error('The fail_mode for br-int was not secure '
679 'in {} node'.format(openstack_node.name))
680 is_secure[openstack_node.name] = False