Add suport for fuel installer
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import json
11 import logging
12 import os
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18 from openstack.exceptions import ResourceNotFound, NotFoundException
19 from requests.auth import HTTPBasicAuth
20
21 from functest.utils import env
22 from opnfv.deployment.factory import Factory as DeploymentFactory
23
24 from sdnvpn.lib import config as sdnvpn_config
25 import sdnvpn.lib.openstack_utils as os_utils
26
27 logger = logging.getLogger('sdnvpn_test_utils')
28
29 common_config = sdnvpn_config.CommonConfig()
30
31 ODL_USER = env.get('SDN_CONTROLLER_USER')
32 ODL_PASSWORD = env.get('SDN_CONTROLLER_PASSWORD')
33 ODL_IP = env.get('SDN_CONTROLLER_IP')
34 ODL_PORT = env.get('SDN_CONTROLLER_RESTCONFPORT')
35
36 executor = ThreadPoolExecutor(5)
37
38
39 class ExtraRoute(object):
40     """
41     Class to represent extra route for a router
42     """
43
44     def __init__(self, destination, nexthop):
45         self.destination = destination
46         self.nexthop = nexthop
47
48
49 class AllowedAddressPair(object):
50     """
51     Class to represent allowed address pair for a neutron port
52     """
53
54     def __init__(self, ipaddress, macaddress):
55         self.ipaddress = ipaddress
56         self.macaddress = macaddress
57
58
59 def create_default_flavor():
60     return os_utils.get_or_create_flavor(common_config.default_flavor,
61                                          common_config.default_flavor_ram,
62                                          common_config.default_flavor_disk,
63                                          common_config.default_flavor_vcpus)
64
65
66 def create_custom_flavor():
67     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
68                                          common_config.custom_flavor_ram,
69                                          common_config.custom_flavor_disk,
70                                          common_config.custom_flavor_vcpus)
71
72
73 def create_net(conn, name):
74     logger.debug("Creating network %s", name)
75     net_id = os_utils.create_neutron_net(conn, name)
76     if not net_id:
77         logger.error(
78             "There has been a problem when creating the neutron network")
79         raise Exception("There has been a problem when creating"
80                         " the neutron network {}".format(name))
81     return net_id
82
83
84 def create_subnet(conn, name, cidr, net_id):
85     logger.debug("Creating subnet %s in network %s with cidr %s",
86                  name, net_id, cidr)
87     subnet_id = os_utils.create_neutron_subnet(conn,
88                                                name,
89                                                cidr,
90                                                net_id)
91     if not subnet_id:
92         logger.error(
93             "There has been a problem when creating the neutron subnet")
94         raise Exception("There has been a problem when creating"
95                         " the neutron subnet {}".format(name))
96     return subnet_id
97
98
99 def create_network(conn, net, subnet1, cidr1,
100                    router, subnet2=None, cidr2=None):
101     """Network assoc won't work for networks/subnets created by this function.
102     It is an ODL limitation due to it handling routers as vpns.
103     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
104     network_dic = os_utils.create_network_full(conn,
105                                                net,
106                                                subnet1,
107                                                router,
108                                                cidr1)
109     if not network_dic:
110         logger.error(
111             "There has been a problem when creating the neutron network")
112         raise Exception("There has been a problem when creating"
113                         " the neutron network {}".format(net))
114     net_id = network_dic["net_id"]
115     subnet_id = network_dic["subnet_id"]
116     router_id = network_dic["router_id"]
117
118     if subnet2 is not None:
119         logger.debug("Creating and attaching a second subnet...")
120         subnet_id = os_utils.create_neutron_subnet(
121             conn, subnet2, cidr2, net_id)
122         if not subnet_id:
123             logger.error(
124                 "There has been a problem when creating the second subnet")
125             raise Exception("There has been a problem when creating"
126                             " the second subnet {}".format(subnet2))
127         logger.debug("Subnet '%s' created successfully" % subnet_id)
128     return net_id, subnet_id, router_id
129
130
131 def get_port(conn, instance_id):
132     ports = os_utils.get_port_list(conn)
133     for port in ports:
134         if port.device_id == instance_id:
135             return port
136     return None
137
138
139 def update_port_allowed_address_pairs(conn, port_id, address_pairs):
140     if len(address_pairs) <= 0:
141         return
142     allowed_address_pairs = []
143     for address_pair in address_pairs:
144         address_pair_dict = {'ip_address': address_pair.ipaddress,
145                              'mac_address': address_pair.macaddress}
146         allowed_address_pairs.append(address_pair_dict)
147
148     try:
149         port = conn.network.\
150             update_port(port_id, allowed_address_pairs=allowed_address_pairs)
151         return port.id
152     except Exception as e:
153         logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
154                      " %s" % (port_id, address_pairs, e))
155         return None
156
157
158 def create_instance(conn,
159                     name,
160                     image_id,
161                     network_id,
162                     sg_id,
163                     secgroup_name=None,
164                     fixed_ip=None,
165                     compute_node=None,
166                     userdata=None,
167                     files=[],
168                     **kwargs
169                     ):
170     if 'flavor' not in kwargs:
171         kwargs['flavor'] = common_config.default_flavor
172
173     logger.info("Creating instance '%s'..." % name)
174     logger.debug(
175         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176         " network=%s\n secgroup=%s \n hypervisor=%s \n"
177         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178         % (name, kwargs['flavor'], image_id, network_id, sg_id,
179            compute_node, fixed_ip, files, userdata))
180     instance = os_utils.create_instance_and_wait_for_active(
181         kwargs['flavor'],
182         image_id,
183         network_id,
184         name,
185         config_drive=True,
186         userdata=userdata,
187         av_zone=compute_node,
188         fixed_ip=fixed_ip,
189         files=files)
190
191     if instance is None:
192         logger.error("Error while booting instance.")
193         raise Exception("Error while booting instance {}".format(name))
194     else:
195         # Retrieve IP of INSTANCE
196         network_name = conn.network.get_network(network_id).name
197         instance_ip = conn.compute.get_server(instance).\
198             addresses.get(network_name)[0]['addr']
199         logger.debug("Instance '%s' booted successfully. IP='%s'." %
200                      (name, instance_ip))
201
202     if secgroup_name:
203         logger.debug("Adding '%s' to security group '%s'..."
204                      % (name, secgroup_name))
205     else:
206         logger.debug("Adding '%s' to security group '%s'..."
207                      % (name, sg_id))
208     os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
209
210     return instance
211
212
213 def generate_ping_userdata(ips_array, ping_count=10):
214     ips = ""
215     for ip in ips_array:
216         ips = ("%s %s" % (ips, ip))
217
218     ips = ips.replace('  ', ' ')
219     return ("#!/bin/sh\n"
220             "set%s\n"
221             "while true; do\n"
222             " for i do\n"
223             "  ip=$i\n"
224             "  ping -c %s $ip 2>&1 >/dev/null\n"
225             "  RES=$?\n"
226             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
227             "   echo ping $ip OK\n"
228             "  else echo ping $ip KO\n"
229             "  fi\n"
230             " done\n"
231             " sleep 1\n"
232             "done\n"
233             % (ips, ping_count))
234
235
236 def generate_userdata_common():
237     return ("#!/bin/sh\n"
238             "sudo mkdir -p /home/cirros/.ssh/\n"
239             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
240             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
241             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
242             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
243             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
244             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
245             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
246             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
247             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
248             "chmod 700 /home/cirros/.ssh\n"
249             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
250             "chmod 600 /home/cirros/.ssh/id_rsa\n"
251             )
252
253
254 def generate_userdata_with_ssh(ips_array):
255     u1 = generate_userdata_common()
256
257     ips = ""
258     for ip in ips_array:
259         ips = ("%s %s" % (ips, ip))
260
261     ips = ips.replace('  ', ' ')
262     u2 = ("#!/bin/sh\n"
263           "set%s\n"
264           "while true; do\n"
265           " for i do\n"
266           "  ip=$i\n"
267           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
268           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
269           "  RES=$?\n"
270           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
271           "  else echo $ip 'not reachable';fi;\n"
272           " done\n"
273           " sleep 1\n"
274           "done\n"
275           % ips)
276     return (u1 + u2)
277
278
279 def generate_userdata_interface_create(interface_name, interface_number,
280                                        ip_Address, net_mask):
281     return ("#!/bin/sh\n"
282             "set -xe\n"
283             "sudo useradd -m sdnvpn\n"
284             "sudo adduser sdnvpn sudo\n"
285             "sudo echo sdnvpn:opnfv | chpasswd\n"
286             "sleep 20\n"
287             "sudo ifconfig %s:%s %s netmask %s up\n"
288             % (interface_name, interface_number,
289                ip_Address, net_mask))
290
291
292 def get_installerHandler():
293     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
294     installer_ip = get_installer_ip()
295
296     if installer_type not in ["fuel", "apex"]:
297         logger.warn("installer type %s is neither fuel nor apex."
298                     "returning None for installer handler" % installer_type)
299         return None
300     else:
301         if installer_type in ["apex"]:
302             installer_user = "root"
303         elif installer_type in ["fuel"]:
304             installer_user = "ubuntu"
305
306         developHandler = DeploymentFactory.get_handler(
307             installer_type,
308             installer_ip,
309             installer_user,
310             pkey_file="/root/.ssh/id_rsa")
311
312         return developHandler
313
314
315 def get_nodes():
316     developHandler = get_installerHandler()
317     return developHandler.get_nodes()
318
319
320 def get_installer_ip():
321     return str(os.environ['INSTALLER_IP'])
322
323
324 def get_instance_ip(conn, instance):
325     instance_ip = conn.compute.get_server(instance).\
326         addresses.values()[0][0]['addr']
327     return instance_ip
328
329
330 def wait_for_instance(instance, pattern=".* login:", tries=40):
331     logger.info("Waiting for instance %s to boot up" % instance.id)
332     conn = os_utils.get_os_connection()
333     sleep_time = 2
334     expected_regex = re.compile(pattern)
335     console_log = ""
336     while tries > 0 and not expected_regex.search(console_log):
337         console_log = conn.compute.\
338             get_server_console_output(instance)['output']
339         time.sleep(sleep_time)
340         tries -= 1
341
342     if not expected_regex.search(console_log):
343         logger.error("Instance %s does not boot up properly."
344                      % instance.id)
345         return False
346     return True
347
348
349 def wait_for_instances_up(*instances):
350     check = [wait_for_instance(instance) for instance in instances]
351     return all(check)
352
353
354 def wait_for_instances_get_dhcp(*instances):
355     check = [wait_for_instance(instance, "Lease of .* obtained")
356              for instance in instances]
357     return all(check)
358
359
360 def async_Wait_for_instances(instances, tries=40):
361     if len(instances) <= 0:
362         return
363     futures = []
364     for instance in instances:
365         future = executor.submit(wait_for_instance,
366                                  instance,
367                                  ".* login:",
368                                  tries)
369         futures.append(future)
370     results = []
371     for future in futures:
372         results.append(future.result())
373     if False in results:
374         logger.error("one or more instances is not yet booted up")
375
376
377 def wait_for_instance_delete(conn, instance_id, tries=30):
378     sleep_time = 2
379     instances = [instance_id]
380     logger.debug("Waiting for instance %s to be deleted"
381                  % (instance_id))
382     while tries > 0 and instance_id in instances:
383         instances = [instance.id for instance in
384                      os_utils.get_instances(conn)]
385         time.sleep(sleep_time)
386         tries -= 1
387     if instance_id in instances:
388         logger.error("Deletion of instance %s failed" %
389                      (instance_id))
390
391
392 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
393     tries = 30
394     sleep_time = 1
395     nets = []
396     logger.debug("Waiting for network %s to associate with BGPVPN %s "
397                  % (bgpvpn_id, net_id))
398
399     while tries > 0 and net_id not in nets:
400         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
401         time.sleep(sleep_time)
402         tries -= 1
403     if net_id not in nets:
404         logger.error("Association of network %s with BGPVPN %s failed" %
405                      (net_id, bgpvpn_id))
406         return False
407     return True
408
409
410 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
411     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
412              for id in args]
413     # Return True if all associations succeeded
414     return all(check)
415
416
417 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
418     tries = 30
419     sleep_time = 1
420     routers = []
421     logger.debug("Waiting for router %s to associate with BGPVPN %s "
422                  % (bgpvpn_id, router_id))
423     while tries > 0 and router_id not in routers:
424         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
425         time.sleep(sleep_time)
426         tries -= 1
427     if router_id not in routers:
428         logger.error("Association of router %s with BGPVPN %s failed" %
429                      (router_id, bgpvpn_id))
430         return False
431     return True
432
433
434 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
435     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
436              for id in args]
437     # Return True if all associations succeeded
438     return all(check)
439
440
441 def wait_before_subtest(*args, **kwargs):
442     ''' This is a placeholder.
443         TODO: Replace delay with polling logic. '''
444     time.sleep(30)
445
446
447 def assert_and_get_compute_nodes(conn, required_node_number=2):
448     """Get the compute nodes in the deployment
449     Exit if the deployment doesn't have enough compute nodes"""
450     compute_nodes = os_utils.get_hypervisors(conn)
451
452     num_compute_nodes = len(compute_nodes)
453     if num_compute_nodes < 2:
454         logger.error("There are %s compute nodes in the deployment. "
455                      "Minimum number of nodes to complete the test is 2."
456                      % num_compute_nodes)
457         raise Exception("There are {} compute nodes in the deployment. "
458                         "Minimum number of nodes to complete the test"
459                         " is 2.".format(num_compute_nodes))
460
461     logger.debug("Compute nodes: %s" % compute_nodes)
462     return compute_nodes
463
464
465 def open_icmp(conn, security_group_id):
466     if os_utils.check_security_group_rules(conn,
467                                            security_group_id,
468                                            'ingress',
469                                            'icmp'):
470
471         if not os_utils.create_secgroup_rule(conn,
472                                              security_group_id,
473                                              'ingress',
474                                              'icmp'):
475             logger.error("Failed to create icmp security group rule...")
476     else:
477         logger.info("This rule exists for security group: %s"
478                     % security_group_id)
479
480
481 def open_http_port(conn, security_group_id):
482     if os_utils.check_security_group_rules(conn,
483                                            security_group_id,
484                                            'ingress',
485                                            'tcp',
486                                            80, 80):
487
488         if not os_utils.create_secgroup_rule(conn,
489                                              security_group_id,
490                                              'ingress',
491                                              'tcp',
492                                              80, 80):
493
494             logger.error("Failed to create http security group rule...")
495     else:
496         logger.info("This rule exists for security group: %s"
497                     % security_group_id)
498
499
500 def open_bgp_port(conn, security_group_id):
501     if os_utils.check_security_group_rules(conn,
502                                            security_group_id,
503                                            'ingress',
504                                            'tcp',
505                                            179, 179):
506
507         if not os_utils.create_secgroup_rule(conn,
508                                              security_group_id,
509                                              'ingress',
510                                              'tcp',
511                                              179, 179):
512             logger.error("Failed to create bgp security group rule...")
513     else:
514         logger.info("This rule exists for security group: %s"
515                     % security_group_id)
516
517
518 def exec_cmd(cmd, verbose):
519     success = True
520     logger.debug("Executing '%s'" % cmd)
521     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
522                          stderr=subprocess.STDOUT)
523     output = ""
524     for line in iter(p.stdout.readline, b''):
525         output += line
526
527     if verbose:
528         logger.debug(output)
529
530     p.stdout.close()
531     returncode = p.wait()
532     if returncode != 0:
533         logger.error("Command %s failed to execute." % cmd)
534         success = False
535
536     return output, success
537
538
539 def check_odl_fib(ip):
540     """Check that there is an entry in the ODL Fib for `ip`"""
541     url = ("http://{user}:{password}@{ip}:{port}/restconf/config/"
542            "odl-fib:fibEntries/".format(user=ODL_USER,
543                                         password=ODL_PASSWORD, ip=ODL_IP,
544                                         port=ODL_PORT))
545     logger.debug("Querring '%s' for FIB entries", url)
546     res = requests.get(url, auth=(ODL_USER, ODL_PASSWORD))
547     if res.status_code != 200:
548         logger.error("OpenDaylight response status code: %s", res.status_code)
549         return False
550     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
551                  % ODL_IP)
552     logger.debug("OpenDaylight FIB: \n%s" % res.text)
553     return ip in res.text
554
555
556 def run_odl_cmd(odl_node, cmd):
557     '''Run a command in the OpenDaylight Karaf shell
558     This is a bit flimsy because of shell quote escaping, make sure that
559     the cmd passed does not have any top level double quotes or this
560     function will break.
561     The /dev/null is used because client works, but outputs something
562     that contains "ERROR" and run_cmd doesn't like that.
563     '''
564     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
565                  ' 2>/dev/null' % cmd)
566     return odl_node.run_cmd(karaf_cmd)
567
568
569 def wait_for_cloud_init(conn, instance):
570     success = True
571     # ubuntu images take a long time to start
572     tries = 20
573     sleep_time = 30
574     logger.info("Waiting for cloud init of instance: {}"
575                 "".format(instance.name))
576     while tries > 0:
577         instance_log = conn.compute.\
578             get_server_console_output(instance)['output']
579         if "Failed to run module" in instance_log:
580             success = False
581             logger.error("Cloud init failed to run. Reason: %s",
582                          instance_log)
583             break
584         if re.search(r"Cloud-init v. .+ finished at", instance_log):
585             success = True
586             break
587         time.sleep(sleep_time)
588         tries = tries - 1
589
590     if tries == 0:
591         logger.error("Cloud init timed out"
592                      ". Reason: %s",
593                      instance_log)
594         success = False
595     logger.info("Finished waiting for cloud init of instance {} result was {}"
596                 "".format(instance.name, success))
597     return success
598
599
600 def attach_instance_to_ext_br(instance, compute_node):
601     libvirt_instance_name = instance.instance_name
602     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
603     # In Apex, br-ex (or br-floating for Fuel) is an ovs bridge and virsh
604     # attach-interface won't just work. We work around it by creating a linux
605     # bridge, attaching that to br-ex (or br-floating for Fuel) with a
606     # veth pair and virsh-attaching the instance to the linux-bridge
607     if installer_type in ["fuel"]:
608         bridge = "br-floating"
609     elif installer_type in ["apex"]:
610         bridge = "br-ex"
611     else:
612         logger.warn("installer type %s is neither fuel nor apex."
613                     % installer_type)
614         return
615
616     cmd = """
617     set -e
618     if ! sudo brctl show |grep -q ^br-quagga;then
619       sudo brctl addbr br-quagga
620       sudo ip link set br-quagga up
621       sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
622       sudo ip link set dev ovs-quagga-tap up
623       sudo ip link set dev quagga-tap up
624       sudo ovs-vsctl add-port {bridge} ovs-quagga-tap
625       sudo brctl addif br-quagga quagga-tap
626     fi
627     """
628     compute_node.run_cmd(cmd.format(bridge=bridge))
629
630     compute_node.run_cmd("sudo virsh attach-interface %s"
631                          " bridge br-quagga" % (libvirt_instance_name))
632
633
634 def detach_instance_from_ext_br(instance, compute_node):
635     libvirt_instance_name = instance.instance_name
636     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
637     # This function undoes all the actions performed by
638     # attach_instance_to_ext_br on Fuel and Apex installers.
639     if installer_type in ["fuel"]:
640         bridge = "br-floating"
641     elif installer_type in ["apex"]:
642         bridge = "br-ex"
643     else:
644         logger.warn("installer type %s is neither fuel nor apex."
645                     % installer_type)
646         return
647     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
648                                "grep running | awk '{print $2}'); "
649                                "do echo -n ; sudo virsh dumpxml $vm| "
650                                "grep -oP '52:54:[\da-f:]+' ;done")
651     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
652                          " --type bridge --mac %s"
653                          % (libvirt_instance_name, mac))
654
655     cmd = """
656         sudo brctl delif br-quagga quagga-tap &&
657         sudo ovs-vsctl del-port {bridge} ovs-quagga-tap &&
658         sudo ip link set dev quagga-tap down &&
659         sudo ip link set dev ovs-quagga-tap down &&
660         sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
661         sudo ip link set br-quagga down &&
662         sudo brctl delbr br-quagga
663     """
664     compute_node.run_cmd(cmd.format(bridge=bridge))
665
666
667 def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
668                     interfaces, subnet_ids, router_ids, network_ids):
669     if len(floatingip_ids) != 0:
670         for floatingip_id in floatingip_ids:
671             if not os_utils.delete_floating_ip(conn, floatingip_id):
672                 logger.error('Fail to delete all floating ips. '
673                              'Floating ip with id {} was not deleted.'.
674                              format(floatingip_id))
675                 return False
676
677     if len(bgpvpn_ids) != 0:
678         for bgpvpn_id in bgpvpn_ids:
679             delete_bgpvpn(neutron_client, bgpvpn_id)
680
681     if len(interfaces) != 0:
682         for router_id, subnet_id in interfaces:
683             if not os_utils.remove_interface_router(conn,
684                                                     router_id, subnet_id):
685                 logger.error('Fail to delete all interface routers. '
686                              'Interface router with id {} was not deleted.'.
687                              format(router_id))
688
689     if len(router_ids) != 0:
690         for router_id in router_ids:
691             if not os_utils.remove_gateway_router(conn, router_id):
692                 logger.error('Fail to delete all gateway routers. '
693                              'Gateway router with id {} was not deleted.'.
694                              format(router_id))
695
696     if len(subnet_ids) != 0:
697         for subnet_id in subnet_ids:
698             if not os_utils.delete_neutron_subnet(conn, subnet_id):
699                 logger.error('Fail to delete all subnets. '
700                              'Subnet with id {} was not deleted.'.
701                              format(subnet_id))
702                 return False
703
704     if len(router_ids) != 0:
705         for router_id in router_ids:
706             if not os_utils.delete_neutron_router(conn, router_id):
707                 logger.error('Fail to delete all routers. '
708                              'Router with id {} was not deleted.'.
709                              format(router_id))
710                 return False
711
712     if len(network_ids) != 0:
713         for network_id in network_ids:
714             if not os_utils.delete_neutron_net(conn, network_id):
715                 logger.error('Fail to delete all networks. '
716                              'Network with id {} was not deleted.'.
717                              format(network_id))
718                 return False
719     return True
720
721
722 def cleanup_nova(conn, instance_ids, flavor_ids=None):
723     if flavor_ids is not None and len(flavor_ids) != 0:
724         for flavor_id in flavor_ids:
725             conn.compute.delete_flavor(flavor_id)
726     if len(instance_ids) != 0:
727         for instance_id in instance_ids:
728             if not os_utils.delete_instance(conn, instance_id):
729                 logger.error('Fail to delete all instances. '
730                              'Instance with id {} was not deleted.'.
731                              format(instance_id))
732             else:
733                 wait_for_instance_delete(conn, instance_id)
734     return True
735
736
737 def cleanup_glance(conn, image_ids):
738     if len(image_ids) != 0:
739         for image_id in image_ids:
740             if not os_utils.delete_glance_image(conn, image_id):
741                 logger.error('Fail to delete all images. '
742                              'Image with id {} was not deleted.'.
743                              format(image_id))
744                 return False
745     return True
746
747
748 def create_bgpvpn(neutron_client, **kwargs):
749     # route_distinguishers
750     # route_targets
751     json_body = {"bgpvpn": kwargs}
752     return neutron_client.create_bgpvpn(json_body)
753
754
755 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
756     json_body = {"bgpvpn": kwargs}
757     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
758
759
760 def delete_bgpvpn(neutron_client, bgpvpn_id):
761     return neutron_client.delete_bgpvpn(bgpvpn_id)
762
763
764 def get_bgpvpn(neutron_client, bgpvpn_id):
765     return neutron_client.show_bgpvpn(bgpvpn_id)
766
767
768 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
769     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
770
771
772 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
773     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
774
775
776 def create_router_association(neutron_client, bgpvpn_id, router_id):
777     json_body = {"router_association": {"router_id": router_id}}
778     return neutron_client.create_router_association(bgpvpn_id, json_body)
779
780
781 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
782     json_body = {"network_association": {"network_id": neutron_network_id}}
783     return neutron_client.create_network_association(bgpvpn_id, json_body)
784
785
786 def is_fail_mode_secure():
787     """
788     Checks the value of the attribute fail_mode,
789     if it is set to secure. This check is performed
790     on all OVS br-int interfaces, for all OpenStack nodes.
791     """
792     is_secure = {}
793     openstack_nodes = get_nodes()
794     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
795                        "grep -i bridge | "
796                        "awk '{print $2}'")
797     # Define OVS get fail_mode command
798     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
799     for openstack_node in openstack_nodes:
800         if not openstack_node.is_active():
801             continue
802
803         installer_type = str(os.environ['INSTALLER_TYPE'].lower())
804         if installer_type in ['fuel']:
805             if (
806                 'controller' in openstack_node.roles or
807                 'opendaylight' in openstack_node.roles or
808                 'installer' in openstack_node.roles
809             ):
810                 continue
811
812         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
813                         strip().split('\n'))
814         if 'br-int' in ovs_int_list:
815             # Execute get fail_mode command
816             br_int_fail_mode = (openstack_node.
817                                 run_cmd(get_ovs_fail_mode_cmd).strip())
818             if br_int_fail_mode == 'secure':
819                 # success
820                 is_secure[openstack_node.name] = True
821             else:
822                 # failure
823                 logger.error('The fail_mode for br-int was not secure '
824                              'in {} node'.format(openstack_node.name))
825                 is_secure[openstack_node.name] = False
826     return is_secure
827
828
829 def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
830                                 subnet_quota, port_quota, router_quota):
831     try:
832         conn.network.update_quota(tenant_id, networks=nw_quota,
833                                   subnets=subnet_quota, ports=port_quota,
834                                   routers=router_quota)
835         return True
836     except Exception as e:
837         logger.error("Error [update_nw_subnet_port_quota(network,"
838                      " '%s', '%s', '%s', '%s, %s')]: %s" %
839                      (tenant_id, nw_quota, subnet_quota,
840                       port_quota, router_quota, e))
841         return False
842
843
844 def update_instance_quota_class(cloud, instances_quota):
845     try:
846         cloud.set_compute_quotas('admin', instances=instances_quota)
847         return True
848     except Exception as e:
849         logger.error("Error [update_instance_quota_class(compute,"
850                      " '%s' )]: %s" % (instances_quota, e))
851         return False
852
853
854 def get_neutron_quota(conn, tenant_id):
855     try:
856         return conn.network.get_quota(tenant_id)
857     except ResourceNotFound as e:
858         logger.error("Error in getting network quota for tenant "
859                      " '%s' )]: %s" % (tenant_id, e))
860         raise
861
862
863 def get_nova_instances_quota(cloud):
864     try:
865         return cloud.get_compute_quotas('admin').instances
866     except Exception as e:
867         logger.error("Error in getting nova instances quota: %s" % e)
868         raise
869
870
871 def update_router_extra_route(conn, router_id, extra_routes):
872     if len(extra_routes) <= 0:
873         return
874     routes_list = []
875     for extra_route in extra_routes:
876         route_dict = {'destination': extra_route.destination,
877                       'nexthop': extra_route.nexthop}
878         routes_list.append(route_dict)
879
880     try:
881         conn.network.update_router(router_id, routes=routes_list)
882         return True
883     except Exception as e:
884         logger.error("Error in updating router with extra route: %s" % e)
885         raise
886
887
888 def update_router_no_extra_route(conn, router_ids):
889     for router_id in router_ids:
890         try:
891             conn.network.update_router(router_id, routes=[])
892             return True
893         except Exception as e:
894             logger.error("Error in clearing extra route: %s" % e)
895
896
897 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
898     """
899     Gets, as input, a list of compute nodes and a list of OVS bridges
900     and returns the command console output, as a list of lines, that
901     contains all the OVS groups from all bridges and nodes in lists.
902     """
903     cmd_out_lines = []
904     for compute_node in compute_node_list:
905         for ovs_br in ovs_br_list:
906             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
907                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
908                                   "grep group".format(ovs_br, of_protocol))
909                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
910                                   split("\n"))
911     return cmd_out_lines
912
913
914 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
915     """
916     Gets, as input, a list of compute nodes and a list of OVS bridges
917     and returns the command console output, as a list of lines, that
918     contains all the OVS flows from all bridges and nodes in lists.
919     """
920     cmd_out_lines = []
921     for compute_node in compute_node_list:
922         for ovs_br in ovs_br_list:
923             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
924                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
925                                  "grep table=".format(ovs_br, of_protocol))
926                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
927                                   split("\n"))
928     return cmd_out_lines
929
930
931 def get_node_ip_and_netmask(node, iface):
932     cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
933           .format(iface=iface)
934     mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
935     mgmt_ip = mgmt_net_cidr[0].split('/')[0]
936     mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
937
938     return mgmt_ip, mgmt_netmask
939
940
941 def get_odl_bgp_entity_owner(odl_nodes):
942     """ Finds the ODL owner of the BGP entity in the cluster.
943
944     When ODL runs in clustering mode we need to execute the BGP speaker
945     related commands to that ODL which is the owner of the BGP entity.
946
947     :param odl_nodes: list of Opendaylight nodes
948     :return odl_node: Opendaylight node in which ODL BGP entity owner runs
949     """
950     if len(odl_nodes) == 1:
951         return odl_nodes[0]
952     else:
953         url = ('http://{user}:{password}@{ip}:{port}/restconf/'
954                'operational/entity-owners:entity-owners/entity-type/bgp'
955                .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
956                        port=ODL_PORT))
957
958         installer_type = str(os.environ['INSTALLER_TYPE'].lower())
959         if installer_type in ['apex']:
960             node_user = 'heat-admin'
961         elif installer_type in ['fuel']:
962             node_user = 'ubuntu'
963
964         remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
965                                 'initial/akka.conf')
966         remote_odl_home_akka_conf = '/home/{0}/akka.conf'.format(node_user)
967         local_tmp_akka_conf = '/tmp/akka.conf'
968         try:
969             json_output = requests.get(url).json()
970         except Exception:
971             logger.error('Failed to find the ODL BGP '
972                          'entity owner through REST')
973             return None
974         odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
975
976         for odl_node in odl_nodes:
977             if installer_type in ['apex']:
978                 get_odl_id_cmd = 'sudo docker ps -qf name=opendaylight_api'
979                 odl_id = odl_node.run_cmd(get_odl_id_cmd)
980                 odl_node.run_cmd('sudo docker cp '
981                                  '{container_id}:{odl_akka_conf} '
982                                  '/home/{user}/'
983                                  .format(container_id=odl_id,
984                                          odl_akka_conf=remote_odl_akka_conf,
985                                          user=node_user))
986             elif installer_type in ['fuel']:
987                 odl_node.run_cmd('sudo cp {0} /home/{1}/'
988                                  .format(remote_odl_akka_conf, node_user))
989             odl_node.run_cmd('sudo chmod 777 {0}'
990                              .format(remote_odl_home_akka_conf))
991             odl_node.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
992
993             for line in open(local_tmp_akka_conf):
994                 if re.search(odl_bgp_owner, line):
995                     return odl_node
996         return None
997
998
999 def add_quagga_external_gre_end_point(odl_nodes, remote_tep_ip):
1000     json_body = {'input':
1001                  {'destination-ip': remote_tep_ip,
1002                   'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
1003                  }
1004     url = ('http://{ip}:{port}/restconf/operations/'
1005            'itm-rpc:add-external-tunnel-endpoint'.format(ip=ODL_IP,
1006                                                          port=ODL_PORT))
1007     headers = {'Content-type': 'application/yang.data+json',
1008                'Accept': 'application/yang.data+json'}
1009     try:
1010         requests.post(url, data=json.dumps(json_body),
1011                       headers=headers,
1012                       auth=HTTPBasicAuth(ODL_USER, ODL_PASSWORD))
1013     except Exception as e:
1014         logger.error("Failed to create external tunnel endpoint on"
1015                      " ODL for external tep ip %s with error %s"
1016                      % (remote_tep_ip, e))
1017     return None
1018
1019
1020 def is_fib_entry_present_on_odl(odl_nodes, ip_prefix, vrf_id):
1021     url = ('http://{user}:{password}@{ip}:{port}/restconf/config/'
1022            'odl-fib:fibEntries/vrfTables/{vrf}/'
1023            .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
1024                    port=ODL_PORT, vrf=vrf_id))
1025     logger.error("url is %s" % url)
1026     try:
1027         vrf_table = requests.get(url).json()
1028         is_ipprefix_exists = False
1029         for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
1030             if vrf_entry['destPrefix'] == ip_prefix:
1031                 is_ipprefix_exists = True
1032                 break
1033         return is_ipprefix_exists
1034     except Exception as e:
1035         logger.error('Failed to find ip prefix %s with error %s'
1036                      % (ip_prefix, e))
1037     return False
1038
1039
1040 def wait_stack_for_status(conn, stack_id, stack_status, limit=12):
1041     """ Waits to reach specified stack status. To be used with
1042     CREATE_COMPLETE and UPDATE_COMPLETE.
1043     Will try a specific number of attempts at 10sec intervals
1044     (default 2min)
1045
1046     :param stack_id: the stack id returned by create_stack api call
1047     :param stack_status: the stack status waiting for
1048     :param limit: the maximum number of attempts
1049     """
1050     logger.debug("Stack '%s' create started" % stack_id)
1051
1052     stack_create_complete = False
1053     attempts = 0
1054     while attempts < limit:
1055         try:
1056             stack_st = conn.orchestration.get_stack(stack_id).status
1057         except NotFoundException:
1058             logger.error("Stack create failed")
1059             raise SystemError("Stack create failed")
1060             return False
1061         if stack_st == stack_status:
1062             stack_create_complete = True
1063             break
1064         attempts += 1
1065         time.sleep(10)
1066
1067     logger.debug("Stack status check: %s times" % attempts)
1068     if stack_create_complete is False:
1069         logger.error("Stack create failed")
1070         raise SystemError("Stack create failed")
1071         return False
1072
1073     return True
1074
1075
1076 def delete_stack_and_wait(conn, stack_id, limit=12):
1077     """ Starts and waits for completion of delete stack
1078
1079     Will try a specific number of attempts at 10sec intervals
1080     (default 2min)
1081
1082     :param stack_id: the id of the stack to be deleted
1083     :param limit: the maximum number of attempts
1084     """
1085     delete_started = False
1086     if stack_id is not None:
1087         delete_started = os_utils.delete_stack(conn, stack_id)
1088
1089     if delete_started is True:
1090         logger.debug("Stack delete succesfully started")
1091     else:
1092         logger.error("Stack delete start failed")
1093
1094     stack_delete_complete = False
1095     attempts = 0
1096     while attempts < limit:
1097         try:
1098             stack_st = conn.orchestration.get_stack(stack_id).status
1099             if stack_st == 'DELETE_COMPLETE':
1100                 stack_delete_complete = True
1101                 break
1102             attempts += 1
1103             time.sleep(10)
1104         except NotFoundException:
1105             stack_delete_complete = True
1106             break
1107
1108     logger.debug("Stack status check: %s times" % attempts)
1109     if not stack_delete_complete:
1110         logger.error("Stack delete failed")
1111         raise SystemError("Stack delete failed")
1112         return False
1113
1114     return True
1115
1116
1117 def get_heat_environment(testcase, common_config):
1118     """ Reads the heat parameters of a testcase into a yaml object
1119
1120     Each testcase where Heat Orchestratoin Template (HOT) is introduced
1121     has an associated parameters section.
1122     Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
1123     and place it under parameters tree.
1124
1125     :param testcase: the tescase for which the HOT file is fetched
1126     :param common_config: the common config section
1127     :return environment: a yaml object to be used as environment
1128     """
1129     fl = common_config.default_flavor
1130     param_dict = testcase.heat_parameters
1131     param_dict['flavor'] = fl
1132     env_dict = {'parameters': param_dict}
1133     return env_dict
1134
1135
1136 def get_vms_from_stack_outputs(conn, stack_id, vm_stack_output_keys):
1137     """ Converts a vm name from a heat stack output to a nova vm object
1138
1139     :param stack_id: the id of the stack to fetch the vms from
1140     :param vm_stack_output_keys: a list of stack outputs with the vm names
1141     :return vms: a list of vm objects corresponding to the outputs
1142     """
1143     vms = []
1144     for vmk in vm_stack_output_keys:
1145         vm_output = os_utils.get_output(conn, stack_id, vmk)
1146         if vm_output is not None:
1147             vm_name = vm_output['output_value']
1148             logger.debug("vm '%s' read from heat output" % vm_name)
1149             vm = os_utils.get_instance_by_name(conn, vm_name)
1150             if vm is not None:
1151                 vms.append(vm)
1152     return vms