Merge "Updated from global requirements"
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import json
11 import logging
12 import os
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
19
20 from opnfv.deployment.factory import Factory as DeploymentFactory
21
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
24
25 logger = logging.getLogger('sdnvpn_test_utils')
26
27 common_config = sdnvpn_config.CommonConfig()
28
29 ODL_USER = 'admin'
30 ODL_PASS = 'admin'
31
32 executor = ThreadPoolExecutor(5)
33
34
35 class ExtraRoute(object):
36     """
37     Class to represent extra route for a router
38     """
39
40     def __init__(self, destination, nexthop):
41         self.destination = destination
42         self.nexthop = nexthop
43
44
45 class AllowedAddressPair(object):
46     """
47     Class to represent allowed address pair for a neutron port
48     """
49
50     def __init__(self, ipaddress, macaddress):
51         self.ipaddress = ipaddress
52         self.macaddress = macaddress
53
54
55 def create_default_flavor():
56     return os_utils.get_or_create_flavor(common_config.default_flavor,
57                                          common_config.default_flavor_ram,
58                                          common_config.default_flavor_disk,
59                                          common_config.default_flavor_vcpus)
60
61
62 def create_custom_flavor():
63     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64                                          common_config.custom_flavor_ram,
65                                          common_config.custom_flavor_disk,
66                                          common_config.custom_flavor_vcpus)
67
68
69 def create_net(conn, name):
70     logger.debug("Creating network %s", name)
71     net_id = os_utils.create_neutron_net(conn, name)
72     if not net_id:
73         logger.error(
74             "There has been a problem when creating the neutron network")
75         raise Exception("There has been a problem when creating"
76                         " the neutron network {}".format(name))
77     return net_id
78
79
80 def create_subnet(conn, name, cidr, net_id):
81     logger.debug("Creating subnet %s in network %s with cidr %s",
82                  name, net_id, cidr)
83     subnet_id = os_utils.create_neutron_subnet(conn,
84                                                name,
85                                                cidr,
86                                                net_id)
87     if not subnet_id:
88         logger.error(
89             "There has been a problem when creating the neutron subnet")
90         raise Exception("There has been a problem when creating"
91                         " the neutron subnet {}".format(name))
92     return subnet_id
93
94
95 def create_network(conn, net, subnet1, cidr1,
96                    router, subnet2=None, cidr2=None):
97     """Network assoc won't work for networks/subnets created by this function.
98     It is an ODL limitation due to it handling routers as vpns.
99     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100     network_dic = os_utils.create_network_full(conn,
101                                                net,
102                                                subnet1,
103                                                router,
104                                                cidr1)
105     if not network_dic:
106         logger.error(
107             "There has been a problem when creating the neutron network")
108         raise Exception("There has been a problem when creating"
109                         " the neutron network {}".format(net))
110     net_id = network_dic["net_id"]
111     subnet_id = network_dic["subnet_id"]
112     router_id = network_dic["router_id"]
113
114     if subnet2 is not None:
115         logger.debug("Creating and attaching a second subnet...")
116         subnet_id = os_utils.create_neutron_subnet(
117             conn, subnet2, cidr2, net_id)
118         if not subnet_id:
119             logger.error(
120                 "There has been a problem when creating the second subnet")
121             raise Exception("There has been a problem when creating"
122                             " the second subnet {}".format(subnet2))
123         logger.debug("Subnet '%s' created successfully" % subnet_id)
124     return net_id, subnet_id, router_id
125
126
127 def get_port(conn, instance_id):
128     ports = os_utils.get_port_list(conn)
129     for port in ports:
130         if port.device_id == instance_id:
131             return port
132     return None
133
134
135 def update_port_allowed_address_pairs(conn, port_id, address_pairs):
136     if len(address_pairs) <= 0:
137         return
138     allowed_address_pairs = []
139     for address_pair in address_pairs:
140         address_pair_dict = {'ip_address': address_pair.ipaddress,
141                              'mac_address': address_pair.macaddress}
142         allowed_address_pairs.append(address_pair_dict)
143
144     try:
145         port = conn.network.\
146             update_port(port_id, allowed_address_pairs=allowed_address_pairs)
147         return port.id
148     except Exception as e:
149         logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
150                      " %s" % (port_id, address_pairs, e))
151         return None
152
153
154 def create_instance(conn,
155                     name,
156                     image_id,
157                     network_id,
158                     sg_id,
159                     secgroup_name=None,
160                     fixed_ip=None,
161                     compute_node=None,
162                     userdata=None,
163                     files=[],
164                     **kwargs
165                     ):
166     if 'flavor' not in kwargs:
167         kwargs['flavor'] = common_config.default_flavor
168
169     logger.info("Creating instance '%s'..." % name)
170     logger.debug(
171         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
172         " network=%s\n secgroup=%s \n hypervisor=%s \n"
173         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
174         % (name, kwargs['flavor'], image_id, network_id, sg_id,
175            compute_node, fixed_ip, files, userdata))
176     instance = os_utils.create_instance_and_wait_for_active(
177         kwargs['flavor'],
178         image_id,
179         network_id,
180         name,
181         config_drive=True,
182         userdata=userdata,
183         av_zone=compute_node,
184         fixed_ip=fixed_ip,
185         files=files)
186
187     if instance is None:
188         logger.error("Error while booting instance.")
189         raise Exception("Error while booting instance {}".format(name))
190     else:
191         # Retrieve IP of INSTANCE
192         network_name = conn.network.get_network(network_id).name
193         instance_ip = conn.compute.get_server(instance).\
194             addresses.get(network_name)[0]['addr']
195         logger.debug("Instance '%s' booted successfully. IP='%s'." %
196                      (name, instance_ip))
197
198     if secgroup_name:
199         logger.debug("Adding '%s' to security group '%s'..."
200                      % (name, secgroup_name))
201     else:
202         logger.debug("Adding '%s' to security group '%s'..."
203                      % (name, sg_id))
204     os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
205
206     return instance
207
208
209 def generate_ping_userdata(ips_array, ping_count=10):
210     ips = ""
211     for ip in ips_array:
212         ips = ("%s %s" % (ips, ip))
213
214     ips = ips.replace('  ', ' ')
215     return ("#!/bin/sh\n"
216             "set%s\n"
217             "while true; do\n"
218             " for i do\n"
219             "  ip=$i\n"
220             "  ping -c %s $ip 2>&1 >/dev/null\n"
221             "  RES=$?\n"
222             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
223             "   echo ping $ip OK\n"
224             "  else echo ping $ip KO\n"
225             "  fi\n"
226             " done\n"
227             " sleep 1\n"
228             "done\n"
229             % (ips, ping_count))
230
231
232 def generate_userdata_common():
233     return ("#!/bin/sh\n"
234             "sudo mkdir -p /home/cirros/.ssh/\n"
235             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
236             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
237             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
238             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
239             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
240             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
241             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
242             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
243             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
244             "chmod 700 /home/cirros/.ssh\n"
245             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
246             "chmod 600 /home/cirros/.ssh/id_rsa\n"
247             )
248
249
250 def generate_userdata_with_ssh(ips_array):
251     u1 = generate_userdata_common()
252
253     ips = ""
254     for ip in ips_array:
255         ips = ("%s %s" % (ips, ip))
256
257     ips = ips.replace('  ', ' ')
258     u2 = ("#!/bin/sh\n"
259           "set%s\n"
260           "while true; do\n"
261           " for i do\n"
262           "  ip=$i\n"
263           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
264           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
265           "  RES=$?\n"
266           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
267           "  else echo $ip 'not reachable';fi;\n"
268           " done\n"
269           " sleep 1\n"
270           "done\n"
271           % ips)
272     return (u1 + u2)
273
274
275 def generate_userdata_interface_create(interface_name, interface_number,
276                                        ip_Address, net_mask):
277     return ("#!/bin/sh\n"
278             "set -xe\n"
279             "sudo useradd -m sdnvpn\n"
280             "sudo adduser sdnvpn sudo\n"
281             "sudo echo sdnvpn:opnfv | chpasswd\n"
282             "sleep 20\n"
283             "sudo ifconfig %s:%s %s netmask %s up\n"
284             % (interface_name, interface_number,
285                ip_Address, net_mask))
286
287
288 def get_installerHandler():
289     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
290     installer_ip = get_installer_ip()
291
292     if installer_type not in ["fuel", "apex"]:
293         logger.warn("installer type %s is neither fuel nor apex."
294                     "returning None for installer handler" % installer_type)
295         return None
296     else:
297         if installer_type in ["apex"]:
298             developHandler = DeploymentFactory.get_handler(
299                 installer_type,
300                 installer_ip,
301                 'root',
302                 pkey_file="/root/.ssh/id_rsa")
303
304         if installer_type in ["fuel"]:
305             developHandler = DeploymentFactory.get_handler(
306                 installer_type,
307                 installer_ip,
308                 'root',
309                 'r00tme')
310         return developHandler
311
312
313 def get_nodes():
314     developHandler = get_installerHandler()
315     return developHandler.get_nodes()
316
317
318 def get_installer_ip():
319     return str(os.environ['INSTALLER_IP'])
320
321
322 def get_instance_ip(conn, instance):
323     instance_ip = conn.compute.get_server(instance).\
324         addresses.values()[0][0]['addr']
325     return instance_ip
326
327
328 def wait_for_instance(instance, pattern=".* login:", tries=40):
329     logger.info("Waiting for instance %s to boot up" % instance.id)
330     conn = os_utils.get_os_connection()
331     sleep_time = 2
332     expected_regex = re.compile(pattern)
333     console_log = ""
334     while tries > 0 and not expected_regex.search(console_log):
335         console_log = conn.compute.\
336             get_server_console_output(instance)['output']
337         time.sleep(sleep_time)
338         tries -= 1
339
340     if not expected_regex.search(console_log):
341         logger.error("Instance %s does not boot up properly."
342                      % instance.id)
343         return False
344     return True
345
346
347 def wait_for_instances_up(*instances):
348     check = [wait_for_instance(instance) for instance in instances]
349     return all(check)
350
351
352 def wait_for_instances_get_dhcp(*instances):
353     check = [wait_for_instance(instance, "Lease of .* obtained")
354              for instance in instances]
355     return all(check)
356
357
358 def async_Wait_for_instances(instances, tries=40):
359     if len(instances) <= 0:
360         return
361     futures = []
362     for instance in instances:
363         future = executor.submit(wait_for_instance,
364                                  instance,
365                                  ".* login:",
366                                  tries)
367         futures.append(future)
368     results = []
369     for future in futures:
370         results.append(future.result())
371     if False in results:
372         logger.error("one or more instances is not yet booted up")
373
374
375 def wait_for_instance_delete(conn, instance_id, tries=30):
376     sleep_time = 2
377     instances = [instance_id]
378     logger.debug("Waiting for instance %s to be deleted"
379                  % (instance_id))
380     while tries > 0 and instance_id in instances:
381         instances = [instance.id for instance in
382                      os_utils.get_instances(conn)]
383         time.sleep(sleep_time)
384         tries -= 1
385     if instance_id in instances:
386         logger.error("Deletion of instance %s failed" %
387                      (instance_id))
388
389
390 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
391     tries = 30
392     sleep_time = 1
393     nets = []
394     logger.debug("Waiting for network %s to associate with BGPVPN %s "
395                  % (bgpvpn_id, net_id))
396
397     while tries > 0 and net_id not in nets:
398         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
399         time.sleep(sleep_time)
400         tries -= 1
401     if net_id not in nets:
402         logger.error("Association of network %s with BGPVPN %s failed" %
403                      (net_id, bgpvpn_id))
404         return False
405     return True
406
407
408 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
409     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
410              for id in args]
411     # Return True if all associations succeeded
412     return all(check)
413
414
415 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
416     tries = 30
417     sleep_time = 1
418     routers = []
419     logger.debug("Waiting for router %s to associate with BGPVPN %s "
420                  % (bgpvpn_id, router_id))
421     while tries > 0 and router_id not in routers:
422         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
423         time.sleep(sleep_time)
424         tries -= 1
425     if router_id not in routers:
426         logger.error("Association of router %s with BGPVPN %s failed" %
427                      (router_id, bgpvpn_id))
428         return False
429     return True
430
431
432 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
433     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
434              for id in args]
435     # Return True if all associations succeeded
436     return all(check)
437
438
439 def wait_before_subtest(*args, **kwargs):
440     ''' This is a placeholder.
441         TODO: Replace delay with polling logic. '''
442     time.sleep(30)
443
444
445 def assert_and_get_compute_nodes(conn, required_node_number=2):
446     """Get the compute nodes in the deployment
447     Exit if the deployment doesn't have enough compute nodes"""
448     compute_nodes = os_utils.get_hypervisors(conn)
449
450     num_compute_nodes = len(compute_nodes)
451     if num_compute_nodes < 2:
452         logger.error("There are %s compute nodes in the deployment. "
453                      "Minimum number of nodes to complete the test is 2."
454                      % num_compute_nodes)
455         raise Exception("There are {} compute nodes in the deployment. "
456                         "Minimum number of nodes to complete the test"
457                         " is 2.".format(num_compute_nodes))
458
459     logger.debug("Compute nodes: %s" % compute_nodes)
460     return compute_nodes
461
462
463 def open_icmp(conn, security_group_id):
464     if os_utils.check_security_group_rules(conn,
465                                            security_group_id,
466                                            'ingress',
467                                            'icmp'):
468
469         if not os_utils.create_secgroup_rule(conn,
470                                              security_group_id,
471                                              'ingress',
472                                              'icmp'):
473             logger.error("Failed to create icmp security group rule...")
474     else:
475         logger.info("This rule exists for security group: %s"
476                     % security_group_id)
477
478
479 def open_http_port(conn, security_group_id):
480     if os_utils.check_security_group_rules(conn,
481                                            security_group_id,
482                                            'ingress',
483                                            'tcp',
484                                            80, 80):
485
486         if not os_utils.create_secgroup_rule(conn,
487                                              security_group_id,
488                                              'ingress',
489                                              'tcp',
490                                              80, 80):
491
492             logger.error("Failed to create http security group rule...")
493     else:
494         logger.info("This rule exists for security group: %s"
495                     % security_group_id)
496
497
498 def open_bgp_port(conn, security_group_id):
499     if os_utils.check_security_group_rules(conn,
500                                            security_group_id,
501                                            'ingress',
502                                            'tcp',
503                                            179, 179):
504
505         if not os_utils.create_secgroup_rule(conn,
506                                              security_group_id,
507                                              'ingress',
508                                              'tcp',
509                                              179, 179):
510             logger.error("Failed to create bgp security group rule...")
511     else:
512         logger.info("This rule exists for security group: %s"
513                     % security_group_id)
514
515
516 def exec_cmd(cmd, verbose):
517     success = True
518     logger.debug("Executing '%s'" % cmd)
519     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
520                          stderr=subprocess.STDOUT)
521     output = ""
522     for line in iter(p.stdout.readline, b''):
523         output += line
524
525     if verbose:
526         logger.debug(output)
527
528     p.stdout.close()
529     returncode = p.wait()
530     if returncode != 0:
531         logger.error("Command %s failed to execute." % cmd)
532         success = False
533
534     return output, success
535
536
537 def check_odl_fib(ip, controller_ip):
538     """Check that there is an entry in the ODL Fib for `ip`"""
539     url = "http://" + controller_ip + \
540           ":8181/restconf/config/odl-fib:fibEntries/"
541     logger.debug("Querring '%s' for FIB entries", url)
542     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
543     if res.status_code != 200:
544         logger.error("OpenDaylight response status code: %s", res.status_code)
545         return False
546     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
547                  % controller_ip)
548     logger.debug("OpenDaylight FIB: \n%s" % res.text)
549     return ip in res.text
550
551
552 def run_odl_cmd(odl_node, cmd):
553     '''Run a command in the OpenDaylight Karaf shell
554     This is a bit flimsy because of shell quote escaping, make sure that
555     the cmd passed does not have any top level double quotes or this
556     function will break.
557     The /dev/null is used because client works, but outputs something
558     that contains "ERROR" and run_cmd doesn't like that.
559     '''
560     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
561                  ' 2>/dev/null' % cmd)
562     return odl_node.run_cmd(karaf_cmd)
563
564
565 def wait_for_cloud_init(conn, instance):
566     success = True
567     # ubuntu images take a long time to start
568     tries = 20
569     sleep_time = 30
570     logger.info("Waiting for cloud init of instance: {}"
571                 "".format(instance.name))
572     while tries > 0:
573         instance_log = conn.compute.\
574             get_server_console_output(instance)['output']
575         if "Failed to run module" in instance_log:
576             success = False
577             logger.error("Cloud init failed to run. Reason: %s",
578                          instance_log)
579             break
580         if re.search(r"Cloud-init v. .+ finished at", instance_log):
581             success = True
582             break
583         time.sleep(sleep_time)
584         tries = tries - 1
585
586     if tries == 0:
587         logger.error("Cloud init timed out"
588                      ". Reason: %s",
589                      instance_log)
590         success = False
591     logger.info("Finished waiting for cloud init of instance {} result was {}"
592                 "".format(instance.name, success))
593     return success
594
595
596 def attach_instance_to_ext_br(instance, compute_node):
597     libvirt_instance_name = instance.instance_name
598     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
599     if installer_type == "fuel":
600         bridge = "br-ex"
601     elif installer_type == "apex":
602         # In Apex, br-ex is an ovs bridge and virsh attach-interface
603         # won't just work. We work around it by creating a linux
604         # bridge, attaching that to br-ex with a veth pair
605         # and virsh-attaching the instance to the linux-bridge
606         bridge = "br-quagga"
607         cmd = """
608         set -e
609         if ! sudo brctl show |grep -q ^{bridge};then
610           sudo brctl addbr {bridge}
611           sudo ip link set {bridge} up
612           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
613           sudo ip link set dev ovs-quagga-tap up
614           sudo ip link set dev quagga-tap up
615           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
616           sudo brctl addif {bridge} quagga-tap
617         fi
618         """
619         compute_node.run_cmd(cmd.format(bridge=bridge))
620
621     compute_node.run_cmd("sudo virsh attach-interface %s"
622                          " bridge %s" % (libvirt_instance_name, bridge))
623
624
625 def detach_instance_from_ext_br(instance, compute_node):
626     libvirt_instance_name = instance.instance_name
627     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
628                                "grep running | awk '{print $2}'); "
629                                "do echo -n ; sudo virsh dumpxml $vm| "
630                                "grep -oP '52:54:[\da-f:]+' ;done")
631     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
632                          " --type bridge --mac %s"
633                          % (libvirt_instance_name, mac))
634
635     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
636     if installer_type == "fuel":
637         bridge = "br-ex"
638     elif installer_type == "apex":
639         # In Apex, br-ex is an ovs bridge and virsh attach-interface
640         # won't just work. We work around it by creating a linux
641         # bridge, attaching that to br-ex with a veth pair
642         # and virsh-attaching the instance to the linux-bridge
643         bridge = "br-quagga"
644         cmd = """
645             sudo brctl delif {bridge} quagga-tap &&
646             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
647             sudo ip link set dev quagga-tap down &&
648             sudo ip link set dev ovs-quagga-tap down &&
649             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
650             sudo ip link set {bridge} down &&
651             sudo brctl delbr {bridge}
652         """
653         compute_node.run_cmd(cmd.format(bridge=bridge))
654
655
656 def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
657                     interfaces, subnet_ids, router_ids, network_ids):
658     if len(floatingip_ids) != 0:
659         for floatingip_id in floatingip_ids:
660             if not os_utils.delete_floating_ip(conn, floatingip_id):
661                 logger.error('Fail to delete all floating ips. '
662                              'Floating ip with id {} was not deleted.'.
663                              format(floatingip_id))
664                 return False
665
666     if len(bgpvpn_ids) != 0:
667         for bgpvpn_id in bgpvpn_ids:
668             delete_bgpvpn(neutron_client, bgpvpn_id)
669
670     if len(interfaces) != 0:
671         for router_id, subnet_id in interfaces:
672             if not os_utils.remove_interface_router(conn,
673                                                     router_id, subnet_id):
674                 logger.error('Fail to delete all interface routers. '
675                              'Interface router with id {} was not deleted.'.
676                              format(router_id))
677
678     if len(router_ids) != 0:
679         for router_id in router_ids:
680             if not os_utils.remove_gateway_router(conn, router_id):
681                 logger.error('Fail to delete all gateway routers. '
682                              'Gateway router with id {} was not deleted.'.
683                              format(router_id))
684
685     if len(subnet_ids) != 0:
686         for subnet_id in subnet_ids:
687             if not os_utils.delete_neutron_subnet(conn, subnet_id):
688                 logger.error('Fail to delete all subnets. '
689                              'Subnet with id {} was not deleted.'.
690                              format(subnet_id))
691                 return False
692
693     if len(router_ids) != 0:
694         for router_id in router_ids:
695             if not os_utils.delete_neutron_router(conn, router_id):
696                 logger.error('Fail to delete all routers. '
697                              'Router with id {} was not deleted.'.
698                              format(router_id))
699                 return False
700
701     if len(network_ids) != 0:
702         for network_id in network_ids:
703             if not os_utils.delete_neutron_net(conn, network_id):
704                 logger.error('Fail to delete all networks. '
705                              'Network with id {} was not deleted.'.
706                              format(network_id))
707                 return False
708     return True
709
710
711 def cleanup_nova(conn, instance_ids, flavor_ids=None):
712     if flavor_ids is not None and len(flavor_ids) != 0:
713         for flavor_id in flavor_ids:
714             conn.compute.delete_flavor(flavor_id)
715     if len(instance_ids) != 0:
716         for instance_id in instance_ids:
717             if not os_utils.delete_instance(conn, instance_id):
718                 logger.error('Fail to delete all instances. '
719                              'Instance with id {} was not deleted.'.
720                              format(instance_id))
721             else:
722                 wait_for_instance_delete(conn, instance_id)
723     return True
724
725
726 def cleanup_glance(conn, image_ids):
727     if len(image_ids) != 0:
728         for image_id in image_ids:
729             if not os_utils.delete_glance_image(conn, image_id):
730                 logger.error('Fail to delete all images. '
731                              'Image with id {} was not deleted.'.
732                              format(image_id))
733                 return False
734     return True
735
736
737 def create_bgpvpn(neutron_client, **kwargs):
738     # route_distinguishers
739     # route_targets
740     json_body = {"bgpvpn": kwargs}
741     return neutron_client.create_bgpvpn(json_body)
742
743
744 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
745     json_body = {"bgpvpn": kwargs}
746     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
747
748
749 def delete_bgpvpn(neutron_client, bgpvpn_id):
750     return neutron_client.delete_bgpvpn(bgpvpn_id)
751
752
753 def get_bgpvpn(neutron_client, bgpvpn_id):
754     return neutron_client.show_bgpvpn(bgpvpn_id)
755
756
757 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
758     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
759
760
761 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
762     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
763
764
765 def create_router_association(neutron_client, bgpvpn_id, router_id):
766     json_body = {"router_association": {"router_id": router_id}}
767     return neutron_client.create_router_association(bgpvpn_id, json_body)
768
769
770 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
771     json_body = {"network_association": {"network_id": neutron_network_id}}
772     return neutron_client.create_network_association(bgpvpn_id, json_body)
773
774
775 def is_fail_mode_secure():
776     """
777     Checks the value of the attribute fail_mode,
778     if it is set to secure. This check is performed
779     on all OVS br-int interfaces, for all OpenStack nodes.
780     """
781     is_secure = {}
782     openstack_nodes = get_nodes()
783     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
784                        "grep -i bridge | "
785                        "awk '{print $2}'")
786     # Define OVS get fail_mode command
787     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
788     for openstack_node in openstack_nodes:
789         if not openstack_node.is_active():
790             continue
791
792         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
793                         strip().split('\n'))
794         if 'br-int' in ovs_int_list:
795             # Execute get fail_mode command
796             br_int_fail_mode = (openstack_node.
797                                 run_cmd(get_ovs_fail_mode_cmd).strip())
798             if br_int_fail_mode == 'secure':
799                 # success
800                 is_secure[openstack_node.name] = True
801             else:
802                 # failure
803                 logger.error('The fail_mode for br-int was not secure '
804                              'in {} node'.format(openstack_node.name))
805                 is_secure[openstack_node.name] = False
806     return is_secure
807
808
809 def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
810                                 subnet_quota, port_quota, router_quota):
811     try:
812         conn.network.update_quota(tenant_id, networks=nw_quota,
813                                   subnets=subnet_quota, ports=port_quota,
814                                   routers=router_quota)
815         return True
816     except Exception as e:
817         logger.error("Error [update_nw_subnet_port_quota(network,"
818                      " '%s', '%s', '%s', '%s, %s')]: %s" %
819                      (tenant_id, nw_quota, subnet_quota,
820                       port_quota, router_quota, e))
821         return False
822
823
824 def update_instance_quota_class(cloud, instances_quota):
825     try:
826         cloud.set_compute_quotas('admin', instances=instances_quota)
827         return True
828     except Exception as e:
829         logger.error("Error [update_instance_quota_class(compute,"
830                      " '%s' )]: %s" % (instances_quota, e))
831         return False
832
833
834 def get_neutron_quota(conn, tenant_id):
835     try:
836         return conn.network.quotas(project_id=tenant_id).next()
837     except Exception as e:
838         logger.error("Error in getting network quota for tenant "
839                      " '%s' )]: %s" % (tenant_id, e))
840         raise
841
842
843 def get_nova_instances_quota(cloud):
844     try:
845         return cloud.get_compute_quotas('admin').instances
846     except Exception as e:
847         logger.error("Error in getting nova instances quota: %s" % e)
848         raise
849
850
851 def update_router_extra_route(conn, router_id, extra_routes):
852     if len(extra_routes) <= 0:
853         return
854     routes_list = []
855     for extra_route in extra_routes:
856         route_dict = {'destination': extra_route.destination,
857                       'nexthop': extra_route.nexthop}
858         routes_list.append(route_dict)
859
860     try:
861         conn.network.update_router(router_id, routes=routes_list)
862         return True
863     except Exception as e:
864         logger.error("Error in updating router with extra route: %s" % e)
865         raise
866
867
868 def update_router_no_extra_route(conn, router_ids):
869     for router_id in router_ids:
870         try:
871             conn.network.update_router(router_id, routes=[])
872             return True
873         except Exception as e:
874             logger.error("Error in clearing extra route: %s" % e)
875
876
877 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
878     """
879     Gets, as input, a list of compute nodes and a list of OVS bridges
880     and returns the command console output, as a list of lines, that
881     contains all the OVS groups from all bridges and nodes in lists.
882     """
883     cmd_out_lines = []
884     for compute_node in compute_node_list:
885         for ovs_br in ovs_br_list:
886             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
887                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
888                                   "grep group".format(ovs_br, of_protocol))
889                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
890                                   split("\n"))
891     return cmd_out_lines
892
893
894 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
895     """
896     Gets, as input, a list of compute nodes and a list of OVS bridges
897     and returns the command console output, as a list of lines, that
898     contains all the OVS flows from all bridges and nodes in lists.
899     """
900     cmd_out_lines = []
901     for compute_node in compute_node_list:
902         for ovs_br in ovs_br_list:
903             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
904                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
905                                  "grep table=".format(ovs_br, of_protocol))
906                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
907                                   split("\n"))
908     return cmd_out_lines
909
910
911 def get_odl_bgp_entity_owner(controllers):
912     """ Finds the ODL owner of the BGP entity in the cluster.
913
914     When ODL runs in clustering mode we need to execute the BGP speaker
915     related commands to that ODL which is the owner of the BGP entity.
916
917     :param controllers: list of OS controllers
918     :return controller: OS controller in which ODL BGP entity owner runs
919     """
920     if len(controllers) == 1:
921         return controllers[0]
922     else:
923         url = ('http://admin:admin@{ip}:8081/restconf/'
924                'operational/entity-owners:entity-owners/entity-type/bgp'
925                .format(ip=controllers[0].ip))
926
927         remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
928                                 'initial/akka.conf')
929         remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
930         local_tmp_akka_conf = '/tmp/akka.conf'
931         try:
932             json_output = requests.get(url).json()
933         except Exception:
934             logger.error('Failed to find the ODL BGP '
935                          'entity owner through REST')
936             return None
937         odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
938
939         for controller in controllers:
940
941             controller.run_cmd('sudo cp {0} /home/heat-admin/'
942                                .format(remote_odl_akka_conf))
943             controller.run_cmd('sudo chmod 777 {0}'
944                                .format(remote_odl_home_akka_conf))
945             controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
946
947             for line in open(local_tmp_akka_conf):
948                 if re.search(odl_bgp_owner, line):
949                     return controller
950         return None
951
952
953 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
954     json_body = {'input':
955                  {'destination-ip': remote_tep_ip,
956                   'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
957                  }
958     url = ('http://{ip}:8081/restconf/operations/'
959            'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
960     headers = {'Content-type': 'application/yang.data+json',
961                'Accept': 'application/yang.data+json'}
962     try:
963         requests.post(url, data=json.dumps(json_body),
964                       headers=headers,
965                       auth=HTTPBasicAuth('admin', 'admin'))
966     except Exception as e:
967         logger.error("Failed to create external tunnel endpoint on"
968                      " ODL for external tep ip %s with error %s"
969                      % (remote_tep_ip, e))
970     return None
971
972
973 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
974     url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
975            'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
976     logger.error("url is %s" % url)
977     try:
978         vrf_table = requests.get(url).json()
979         is_ipprefix_exists = False
980         for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
981             if vrf_entry['destPrefix'] == ip_prefix:
982                 is_ipprefix_exists = True
983                 break
984         return is_ipprefix_exists
985     except Exception as e:
986         logger.error('Failed to find ip prefix %s with error %s'
987                      % (ip_prefix, e))
988     return False