900092cf5e4291ef17af0d779cd6fc8797752639
[sdnvpn.git] / sdnvpn / lib / utils.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2017 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 import json
11 import logging
12 import os
13 import time
14 import requests
15 import re
16 import subprocess
17 from concurrent.futures import ThreadPoolExecutor
18 from requests.auth import HTTPBasicAuth
19
20 from opnfv.deployment.factory import Factory as DeploymentFactory
21
22 from sdnvpn.lib import config as sdnvpn_config
23 import sdnvpn.lib.openstack_utils as os_utils
24
25 logger = logging.getLogger('sdnvpn_test_utils')
26
27 common_config = sdnvpn_config.CommonConfig()
28
29 ODL_USER = 'admin'
30 ODL_PASS = 'admin'
31
32 executor = ThreadPoolExecutor(5)
33
34
35 class ExtraRoute(object):
36     """
37     Class to represent extra route for a router
38     """
39
40     def __init__(self, destination, nexthop):
41         self.destination = destination
42         self.nexthop = nexthop
43
44
45 class AllowedAddressPair(object):
46     """
47     Class to represent allowed address pair for a neutron port
48     """
49
50     def __init__(self, ipaddress, macaddress):
51         self.ipaddress = ipaddress
52         self.macaddress = macaddress
53
54
55 def create_default_flavor():
56     return os_utils.get_or_create_flavor(common_config.default_flavor,
57                                          common_config.default_flavor_ram,
58                                          common_config.default_flavor_disk,
59                                          common_config.default_flavor_vcpus)
60
61
62 def create_custom_flavor():
63     return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
64                                          common_config.custom_flavor_ram,
65                                          common_config.custom_flavor_disk,
66                                          common_config.custom_flavor_vcpus)
67
68
69 def create_net(neutron_client, name):
70     logger.debug("Creating network %s", name)
71     net_id = os_utils.create_neutron_net(neutron_client, name)
72     if not net_id:
73         logger.error(
74             "There has been a problem when creating the neutron network")
75         raise Exception("There has been a problem when creating"
76                         " the neutron network {}".format(name))
77     return net_id
78
79
80 def create_subnet(neutron_client, name, cidr, net_id):
81     logger.debug("Creating subnet %s in network %s with cidr %s",
82                  name, net_id, cidr)
83     subnet_id = os_utils.create_neutron_subnet(neutron_client,
84                                                name,
85                                                cidr,
86                                                net_id)
87     if not subnet_id:
88         logger.error(
89             "There has been a problem when creating the neutron subnet")
90         raise Exception("There has been a problem when creating"
91                         " the neutron subnet {}".format(name))
92     return subnet_id
93
94
95 def create_network(neutron_client, net, subnet1, cidr1,
96                    router, subnet2=None, cidr2=None):
97     """Network assoc won't work for networks/subnets created by this function.
98     It is an ODL limitation due to it handling routers as vpns.
99     See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
100     network_dic = os_utils.create_network_full(neutron_client,
101                                                net,
102                                                subnet1,
103                                                router,
104                                                cidr1)
105     if not network_dic:
106         logger.error(
107             "There has been a problem when creating the neutron network")
108         raise Exception("There has been a problem when creating"
109                         " the neutron network {}".format(net))
110     net_id = network_dic["net_id"]
111     subnet_id = network_dic["subnet_id"]
112     router_id = network_dic["router_id"]
113
114     if subnet2 is not None:
115         logger.debug("Creating and attaching a second subnet...")
116         subnet_id = os_utils.create_neutron_subnet(
117             neutron_client, subnet2, cidr2, net_id)
118         if not subnet_id:
119             logger.error(
120                 "There has been a problem when creating the second subnet")
121             raise Exception("There has been a problem when creating"
122                             " the second subnet {}".format(subnet2))
123         logger.debug("Subnet '%s' created successfully" % subnet_id)
124     return net_id, subnet_id, router_id
125
126
127 def get_port(neutron_client, instance_id):
128     ports = os_utils.get_port_list(neutron_client)
129     if ports is not None:
130         for port in ports:
131             if port['device_id'] == instance_id:
132                 return port
133     return None
134
135
136 def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
137     if len(address_pairs) <= 0:
138         return
139     allowed_address_pairs = []
140     for address_pair in address_pairs:
141         address_pair_dict = {'ip_address': address_pair.ipaddress,
142                              'mac_address': address_pair.macaddress}
143         allowed_address_pairs.append(address_pair_dict)
144     json_body = {'port': {
145         "allowed_address_pairs": allowed_address_pairs
146     }}
147
148     try:
149         port = neutron_client.update_port(port=port_id,
150                                           body=json_body)
151         return port['port']['id']
152     except Exception as e:
153         logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
154                      " %s" % (port_id, address_pairs, e))
155         return None
156
157
158 def create_instance(conn,
159                     name,
160                     image_id,
161                     network_id,
162                     sg_id,
163                     secgroup_name=None,
164                     fixed_ip=None,
165                     compute_node=None,
166                     userdata=None,
167                     files=[],
168                     **kwargs
169                     ):
170     if 'flavor' not in kwargs:
171         kwargs['flavor'] = common_config.default_flavor
172
173     logger.info("Creating instance '%s'..." % name)
174     logger.debug(
175         "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
176         " network=%s\n secgroup=%s \n hypervisor=%s \n"
177         " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
178         % (name, kwargs['flavor'], image_id, network_id, sg_id,
179            compute_node, fixed_ip, files, userdata))
180     instance = os_utils.create_instance_and_wait_for_active(
181         kwargs['flavor'],
182         image_id,
183         network_id,
184         name,
185         config_drive=True,
186         userdata=userdata,
187         av_zone=compute_node,
188         fixed_ip=fixed_ip,
189         files=files)
190
191     if instance is None:
192         logger.error("Error while booting instance.")
193         raise Exception("Error while booting instance {}".format(name))
194     else:
195         # Retrieve IP of INSTANCE
196         network_name = conn.network.get_network(network_id).name
197         instance_ip = conn.compute.get_server(instance).\
198             addresses.get(network_name)[0]['addr']
199         logger.debug("Instance '%s' booted successfully. IP='%s'." %
200                      (name, instance_ip))
201
202     if secgroup_name:
203         logger.debug("Adding '%s' to security group '%s'..."
204                      % (name, secgroup_name))
205     else:
206         logger.debug("Adding '%s' to security group '%s'..."
207                      % (name, sg_id))
208     os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
209
210     return instance
211
212
213 def generate_ping_userdata(ips_array, ping_count=10):
214     ips = ""
215     for ip in ips_array:
216         ips = ("%s %s" % (ips, ip))
217
218     ips = ips.replace('  ', ' ')
219     return ("#!/bin/sh\n"
220             "set%s\n"
221             "while true; do\n"
222             " for i do\n"
223             "  ip=$i\n"
224             "  ping -c %s $ip 2>&1 >/dev/null\n"
225             "  RES=$?\n"
226             "  if [ \"Z$RES\" = \"Z0\" ] ; then\n"
227             "   echo ping $ip OK\n"
228             "  else echo ping $ip KO\n"
229             "  fi\n"
230             " done\n"
231             " sleep 1\n"
232             "done\n"
233             % (ips, ping_count))
234
235
236 def generate_userdata_common():
237     return ("#!/bin/sh\n"
238             "sudo mkdir -p /home/cirros/.ssh/\n"
239             "sudo chown cirros:cirros /home/cirros/.ssh/\n"
240             "sudo chown cirros:cirros /home/cirros/id_rsa\n"
241             "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
242             "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
243             "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
244             "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
245             "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
246             "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
247             "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
248             "chmod 700 /home/cirros/.ssh\n"
249             "chmod 644 /home/cirros/.ssh/authorized_keys\n"
250             "chmod 600 /home/cirros/.ssh/id_rsa\n"
251             )
252
253
254 def generate_userdata_with_ssh(ips_array):
255     u1 = generate_userdata_common()
256
257     ips = ""
258     for ip in ips_array:
259         ips = ("%s %s" % (ips, ip))
260
261     ips = ips.replace('  ', ' ')
262     u2 = ("#!/bin/sh\n"
263           "set%s\n"
264           "while true; do\n"
265           " for i do\n"
266           "  ip=$i\n"
267           "  hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
268           "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
269           "  RES=$?\n"
270           "  if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
271           "  else echo $ip 'not reachable';fi;\n"
272           " done\n"
273           " sleep 1\n"
274           "done\n"
275           % ips)
276     return (u1 + u2)
277
278
279 def generate_userdata_interface_create(interface_name, interface_number,
280                                        ip_Address, net_mask):
281     return ("#!/bin/sh\n"
282             "set -xe\n"
283             "sudo useradd -m sdnvpn\n"
284             "sudo adduser sdnvpn sudo\n"
285             "sudo echo sdnvpn:opnfv | chpasswd\n"
286             "sleep 20\n"
287             "sudo ifconfig %s:%s %s netmask %s up\n"
288             % (interface_name, interface_number,
289                ip_Address, net_mask))
290
291
292 def get_installerHandler():
293     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
294     installer_ip = get_installer_ip()
295
296     if installer_type not in ["fuel", "apex"]:
297         logger.warn("installer type %s is neither fuel nor apex."
298                     "returning None for installer handler" % installer_type)
299         return None
300     else:
301         if installer_type in ["apex"]:
302             developHandler = DeploymentFactory.get_handler(
303                 installer_type,
304                 installer_ip,
305                 'root',
306                 pkey_file="/root/.ssh/id_rsa")
307
308         if installer_type in ["fuel"]:
309             developHandler = DeploymentFactory.get_handler(
310                 installer_type,
311                 installer_ip,
312                 'root',
313                 'r00tme')
314         return developHandler
315
316
317 def get_nodes():
318     developHandler = get_installerHandler()
319     return developHandler.get_nodes()
320
321
322 def get_installer_ip():
323     return str(os.environ['INSTALLER_IP'])
324
325
326 def get_instance_ip(conn, instance):
327     instance_ip = conn.compute.get_server(instance).\
328         addresses.values()[0][0]['addr']
329     return instance_ip
330
331
332 def wait_for_instance(instance, pattern=".* login:", tries=40):
333     logger.info("Waiting for instance %s to boot up" % instance.id)
334     conn = os_utils.get_os_connection()
335     sleep_time = 2
336     expected_regex = re.compile(pattern)
337     console_log = ""
338     while tries > 0 and not expected_regex.search(console_log):
339         console_log = conn.compute.\
340             get_server_console_output(instance)['output']
341         time.sleep(sleep_time)
342         tries -= 1
343
344     if not expected_regex.search(console_log):
345         logger.error("Instance %s does not boot up properly."
346                      % instance.id)
347         return False
348     return True
349
350
351 def wait_for_instances_up(*instances):
352     check = [wait_for_instance(instance) for instance in instances]
353     return all(check)
354
355
356 def wait_for_instances_get_dhcp(*instances):
357     check = [wait_for_instance(instance, "Lease of .* obtained")
358              for instance in instances]
359     return all(check)
360
361
362 def async_Wait_for_instances(instances, tries=40):
363     if len(instances) <= 0:
364         return
365     futures = []
366     for instance in instances:
367         future = executor.submit(wait_for_instance,
368                                  instance,
369                                  ".* login:",
370                                  tries)
371         futures.append(future)
372     results = []
373     for future in futures:
374         results.append(future.result())
375     if False in results:
376         logger.error("one or more instances is not yet booted up")
377
378
379 def wait_for_instance_delete(conn, instance_id, tries=30):
380     sleep_time = 2
381     instances = [instance_id]
382     logger.debug("Waiting for instance %s to be deleted"
383                  % (instance_id))
384     while tries > 0 and instance_id in instances:
385         instances = [instance.id for instance in
386                      os_utils.get_instances(conn)]
387         time.sleep(sleep_time)
388         tries -= 1
389     if instance_id in instances:
390         logger.error("Deletion of instance %s failed" %
391                      (instance_id))
392
393
394 def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
395     tries = 30
396     sleep_time = 1
397     nets = []
398     logger.debug("Waiting for network %s to associate with BGPVPN %s "
399                  % (bgpvpn_id, net_id))
400
401     while tries > 0 and net_id not in nets:
402         nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
403         time.sleep(sleep_time)
404         tries -= 1
405     if net_id not in nets:
406         logger.error("Association of network %s with BGPVPN %s failed" %
407                      (net_id, bgpvpn_id))
408         return False
409     return True
410
411
412 def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
413     check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
414              for id in args]
415     # Return True if all associations succeeded
416     return all(check)
417
418
419 def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
420     tries = 30
421     sleep_time = 1
422     routers = []
423     logger.debug("Waiting for router %s to associate with BGPVPN %s "
424                  % (bgpvpn_id, router_id))
425     while tries > 0 and router_id not in routers:
426         routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
427         time.sleep(sleep_time)
428         tries -= 1
429     if router_id not in routers:
430         logger.error("Association of router %s with BGPVPN %s failed" %
431                      (router_id, bgpvpn_id))
432         return False
433     return True
434
435
436 def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
437     check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
438              for id in args]
439     # Return True if all associations succeeded
440     return all(check)
441
442
443 def wait_before_subtest(*args, **kwargs):
444     ''' This is a placeholder.
445         TODO: Replace delay with polling logic. '''
446     time.sleep(30)
447
448
449 def assert_and_get_compute_nodes(conn, required_node_number=2):
450     """Get the compute nodes in the deployment
451     Exit if the deployment doesn't have enough compute nodes"""
452     compute_nodes = os_utils.get_hypervisors(conn)
453
454     num_compute_nodes = len(compute_nodes)
455     if num_compute_nodes < 2:
456         logger.error("There are %s compute nodes in the deployment. "
457                      "Minimum number of nodes to complete the test is 2."
458                      % num_compute_nodes)
459         raise Exception("There are {} compute nodes in the deployment. "
460                         "Minimum number of nodes to complete the test"
461                         " is 2.".format(num_compute_nodes))
462
463     logger.debug("Compute nodes: %s" % compute_nodes)
464     return compute_nodes
465
466
467 def open_icmp(neutron_client, security_group_id):
468     if os_utils.check_security_group_rules(neutron_client,
469                                            security_group_id,
470                                            'ingress',
471                                            'icmp'):
472
473         if not os_utils.create_secgroup_rule(neutron_client,
474                                              security_group_id,
475                                              'ingress',
476                                              'icmp'):
477             logger.error("Failed to create icmp security group rule...")
478     else:
479         logger.info("This rule exists for security group: %s"
480                     % security_group_id)
481
482
483 def open_http_port(neutron_client, security_group_id):
484     if os_utils.check_security_group_rules(neutron_client,
485                                            security_group_id,
486                                            'ingress',
487                                            'tcp',
488                                            80, 80):
489
490         if not os_utils.create_secgroup_rule(neutron_client,
491                                              security_group_id,
492                                              'ingress',
493                                              'tcp',
494                                              80, 80):
495
496             logger.error("Failed to create http security group rule...")
497     else:
498         logger.info("This rule exists for security group: %s"
499                     % security_group_id)
500
501
502 def open_bgp_port(neutron_client, security_group_id):
503     if os_utils.check_security_group_rules(neutron_client,
504                                            security_group_id,
505                                            'ingress',
506                                            'tcp',
507                                            179, 179):
508
509         if not os_utils.create_secgroup_rule(neutron_client,
510                                              security_group_id,
511                                              'ingress',
512                                              'tcp',
513                                              179, 179):
514             logger.error("Failed to create bgp security group rule...")
515     else:
516         logger.info("This rule exists for security group: %s"
517                     % security_group_id)
518
519
520 def exec_cmd(cmd, verbose):
521     success = True
522     logger.debug("Executing '%s'" % cmd)
523     p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
524                          stderr=subprocess.STDOUT)
525     output = ""
526     for line in iter(p.stdout.readline, b''):
527         output += line
528
529     if verbose:
530         logger.debug(output)
531
532     p.stdout.close()
533     returncode = p.wait()
534     if returncode != 0:
535         logger.error("Command %s failed to execute." % cmd)
536         success = False
537
538     return output, success
539
540
541 def check_odl_fib(ip, controller_ip):
542     """Check that there is an entry in the ODL Fib for `ip`"""
543     url = "http://" + controller_ip + \
544           ":8181/restconf/config/odl-fib:fibEntries/"
545     logger.debug("Querring '%s' for FIB entries", url)
546     res = requests.get(url, auth=(ODL_USER, ODL_PASS))
547     if res.status_code != 200:
548         logger.error("OpenDaylight response status code: %s", res.status_code)
549         return False
550     logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
551                  % controller_ip)
552     logger.debug("OpenDaylight FIB: \n%s" % res.text)
553     return ip in res.text
554
555
556 def run_odl_cmd(odl_node, cmd):
557     '''Run a command in the OpenDaylight Karaf shell
558     This is a bit flimsy because of shell quote escaping, make sure that
559     the cmd passed does not have any top level double quotes or this
560     function will break.
561     The /dev/null is used because client works, but outputs something
562     that contains "ERROR" and run_cmd doesn't like that.
563     '''
564     karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
565                  ' 2>/dev/null' % cmd)
566     return odl_node.run_cmd(karaf_cmd)
567
568
569 def wait_for_cloud_init(conn, instance):
570     success = True
571     # ubuntu images take a long time to start
572     tries = 20
573     sleep_time = 30
574     logger.info("Waiting for cloud init of instance: {}"
575                 "".format(instance.name))
576     while tries > 0:
577         instance_log = conn.compute.\
578             get_server_console_output(instance)['output']
579         if "Failed to run module" in instance_log:
580             success = False
581             logger.error("Cloud init failed to run. Reason: %s",
582                          instance_log)
583             break
584         if re.search(r"Cloud-init v. .+ finished at", instance_log):
585             success = True
586             break
587         time.sleep(sleep_time)
588         tries = tries - 1
589
590     if tries == 0:
591         logger.error("Cloud init timed out"
592                      ". Reason: %s",
593                      instance_log)
594         success = False
595     logger.info("Finished waiting for cloud init of instance {} result was {}"
596                 "".format(instance.name, success))
597     return success
598
599
600 def attach_instance_to_ext_br(instance, compute_node):
601     libvirt_instance_name = instance.instance_name
602     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
603     if installer_type == "fuel":
604         bridge = "br-ex"
605     elif installer_type == "apex":
606         # In Apex, br-ex is an ovs bridge and virsh attach-interface
607         # won't just work. We work around it by creating a linux
608         # bridge, attaching that to br-ex with a veth pair
609         # and virsh-attaching the instance to the linux-bridge
610         bridge = "br-quagga"
611         cmd = """
612         set -e
613         if ! sudo brctl show |grep -q ^{bridge};then
614           sudo brctl addbr {bridge}
615           sudo ip link set {bridge} up
616           sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
617           sudo ip link set dev ovs-quagga-tap up
618           sudo ip link set dev quagga-tap up
619           sudo ovs-vsctl add-port br-ex ovs-quagga-tap
620           sudo brctl addif {bridge} quagga-tap
621         fi
622         """
623         compute_node.run_cmd(cmd.format(bridge=bridge))
624
625     compute_node.run_cmd("sudo virsh attach-interface %s"
626                          " bridge %s" % (libvirt_instance_name, bridge))
627
628
629 def detach_instance_from_ext_br(instance, compute_node):
630     libvirt_instance_name = instance.instance_name
631     mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
632                                "grep running | awk '{print $2}'); "
633                                "do echo -n ; sudo virsh dumpxml $vm| "
634                                "grep -oP '52:54:[\da-f:]+' ;done")
635     compute_node.run_cmd("sudo virsh detach-interface --domain %s"
636                          " --type bridge --mac %s"
637                          % (libvirt_instance_name, mac))
638
639     installer_type = str(os.environ['INSTALLER_TYPE'].lower())
640     if installer_type == "fuel":
641         bridge = "br-ex"
642     elif installer_type == "apex":
643         # In Apex, br-ex is an ovs bridge and virsh attach-interface
644         # won't just work. We work around it by creating a linux
645         # bridge, attaching that to br-ex with a veth pair
646         # and virsh-attaching the instance to the linux-bridge
647         bridge = "br-quagga"
648         cmd = """
649             sudo brctl delif {bridge} quagga-tap &&
650             sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
651             sudo ip link set dev quagga-tap down &&
652             sudo ip link set dev ovs-quagga-tap down &&
653             sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
654             sudo ip link set {bridge} down &&
655             sudo brctl delbr {bridge}
656         """
657         compute_node.run_cmd(cmd.format(bridge=bridge))
658
659
660 def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
661                     subnet_ids, router_ids, network_ids):
662
663     if len(floatingip_ids) != 0:
664         for floatingip_id in floatingip_ids:
665             if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
666                 logger.error('Fail to delete all floating ips. '
667                              'Floating ip with id {} was not deleted.'.
668                              format(floatingip_id))
669                 return False
670
671     if len(bgpvpn_ids) != 0:
672         for bgpvpn_id in bgpvpn_ids:
673             delete_bgpvpn(neutron_client, bgpvpn_id)
674
675     if len(interfaces) != 0:
676         for router_id, subnet_id in interfaces:
677             if not os_utils.remove_interface_router(neutron_client,
678                                                     router_id, subnet_id):
679                 logger.error('Fail to delete all interface routers. '
680                              'Interface router with id {} was not deleted.'.
681                              format(router_id))
682
683     if len(router_ids) != 0:
684         for router_id in router_ids:
685             if not os_utils.remove_gateway_router(neutron_client, router_id):
686                 logger.error('Fail to delete all gateway routers. '
687                              'Gateway router with id {} was not deleted.'.
688                              format(router_id))
689
690     if len(subnet_ids) != 0:
691         for subnet_id in subnet_ids:
692             if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
693                 logger.error('Fail to delete all subnets. '
694                              'Subnet with id {} was not deleted.'.
695                              format(subnet_id))
696                 return False
697
698     if len(router_ids) != 0:
699         for router_id in router_ids:
700             if not os_utils.delete_neutron_router(neutron_client, router_id):
701                 logger.error('Fail to delete all routers. '
702                              'Router with id {} was not deleted.'.
703                              format(router_id))
704                 return False
705
706     if len(network_ids) != 0:
707         for network_id in network_ids:
708             if not os_utils.delete_neutron_net(neutron_client, network_id):
709                 logger.error('Fail to delete all networks. '
710                              'Network with id {} was not deleted.'.
711                              format(network_id))
712                 return False
713     return True
714
715
716 def cleanup_nova(conn, instance_ids, flavor_ids=None):
717     if flavor_ids is not None and len(flavor_ids) != 0:
718         for flavor_id in flavor_ids:
719             conn.compute.delete_flavor(flavor_id)
720     if len(instance_ids) != 0:
721         for instance_id in instance_ids:
722             if not os_utils.delete_instance(conn, instance_id):
723                 logger.error('Fail to delete all instances. '
724                              'Instance with id {} was not deleted.'.
725                              format(instance_id))
726             else:
727                 wait_for_instance_delete(conn, instance_id)
728     return True
729
730
731 def cleanup_glance(conn, image_ids):
732     if len(image_ids) != 0:
733         for image_id in image_ids:
734             if not os_utils.delete_glance_image(conn, image_id):
735                 logger.error('Fail to delete all images. '
736                              'Image with id {} was not deleted.'.
737                              format(image_id))
738                 return False
739     return True
740
741
742 def create_bgpvpn(neutron_client, **kwargs):
743     # route_distinguishers
744     # route_targets
745     json_body = {"bgpvpn": kwargs}
746     return neutron_client.create_bgpvpn(json_body)
747
748
749 def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
750     json_body = {"bgpvpn": kwargs}
751     return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
752
753
754 def delete_bgpvpn(neutron_client, bgpvpn_id):
755     return neutron_client.delete_bgpvpn(bgpvpn_id)
756
757
758 def get_bgpvpn(neutron_client, bgpvpn_id):
759     return neutron_client.show_bgpvpn(bgpvpn_id)
760
761
762 def get_bgpvpn_routers(neutron_client, bgpvpn_id):
763     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
764
765
766 def get_bgpvpn_networks(neutron_client, bgpvpn_id):
767     return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
768
769
770 def create_router_association(neutron_client, bgpvpn_id, router_id):
771     json_body = {"router_association": {"router_id": router_id}}
772     return neutron_client.create_router_association(bgpvpn_id, json_body)
773
774
775 def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
776     json_body = {"network_association": {"network_id": neutron_network_id}}
777     return neutron_client.create_network_association(bgpvpn_id, json_body)
778
779
780 def is_fail_mode_secure():
781     """
782     Checks the value of the attribute fail_mode,
783     if it is set to secure. This check is performed
784     on all OVS br-int interfaces, for all OpenStack nodes.
785     """
786     is_secure = {}
787     openstack_nodes = get_nodes()
788     get_ovs_int_cmd = ("sudo ovs-vsctl show | "
789                        "grep -i bridge | "
790                        "awk '{print $2}'")
791     # Define OVS get fail_mode command
792     get_ovs_fail_mode_cmd = ("sudo ovs-vsctl get-fail-mode br-int")
793     for openstack_node in openstack_nodes:
794         if not openstack_node.is_active():
795             continue
796
797         ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
798                         strip().split('\n'))
799         if 'br-int' in ovs_int_list:
800             # Execute get fail_mode command
801             br_int_fail_mode = (openstack_node.
802                                 run_cmd(get_ovs_fail_mode_cmd).strip())
803             if br_int_fail_mode == 'secure':
804                 # success
805                 is_secure[openstack_node.name] = True
806             else:
807                 # failure
808                 logger.error('The fail_mode for br-int was not secure '
809                              'in {} node'.format(openstack_node.name))
810                 is_secure[openstack_node.name] = False
811     return is_secure
812
813
814 def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
815                                 subnet_quota, port_quota, router_quota):
816     json_body = {"quota": {
817         "network": nw_quota,
818         "subnet": subnet_quota,
819         "port": port_quota,
820         "router": router_quota
821     }}
822
823     try:
824         neutron_client.update_quota(tenant_id=tenant_id,
825                                     body=json_body)
826         return True
827     except Exception as e:
828         logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
829                      " '%s', '%s', '%s', '%s, %s')]: %s" %
830                      (tenant_id, nw_quota, subnet_quota,
831                       port_quota, router_quota, e))
832         return False
833
834
835 def update_instance_quota_class(cloud, instances_quota):
836     try:
837         cloud.set_compute_quotas('admin', instances=instances_quota)
838         return True
839     except Exception as e:
840         logger.error("Error [update_instance_quota_class(compute,"
841                      " '%s' )]: %s" % (instances_quota, e))
842         return False
843
844
845 def get_neutron_quota(neutron_client, tenant_id):
846     try:
847         return neutron_client.show_quota(tenant_id=tenant_id)['quota']
848     except Exception as e:
849         logger.error("Error in getting neutron quota for tenant "
850                      " '%s' )]: %s" % (tenant_id, e))
851         raise
852
853
854 def get_nova_instances_quota(cloud):
855     try:
856         return cloud.get_compute_quotas('admin').instances
857     except Exception as e:
858         logger.error("Error in getting nova instances quota: %s" % e)
859         raise
860
861
862 def update_router_extra_route(neutron_client, router_id, extra_routes):
863     if len(extra_routes) <= 0:
864         return
865     routes_list = []
866     for extra_route in extra_routes:
867         route_dict = {'destination': extra_route.destination,
868                       'nexthop': extra_route.nexthop}
869         routes_list.append(route_dict)
870     json_body = {'router': {
871         "routes": routes_list
872     }}
873
874     try:
875         neutron_client.update_router(router_id, body=json_body)
876         return True
877     except Exception as e:
878         logger.error("Error in updating router with extra route: %s" % e)
879         raise
880
881
882 def update_router_no_extra_route(neutron_client, router_ids):
883     json_body = {'router': {
884         "routes": [
885         ]}}
886
887     for router_id in router_ids:
888         try:
889             neutron_client.update_router(router_id, body=json_body)
890             return True
891         except Exception as e:
892             logger.error("Error in clearing extra route: %s" % e)
893
894
895 def get_ovs_groups(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
896     """
897     Gets, as input, a list of compute nodes and a list of OVS bridges
898     and returns the command console output, as a list of lines, that
899     contains all the OVS groups from all bridges and nodes in lists.
900     """
901     cmd_out_lines = []
902     for compute_node in compute_node_list:
903         for ovs_br in ovs_br_list:
904             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
905                 ovs_groups_cmd = ("sudo ovs-ofctl dump-groups {} -O {} | "
906                                   "grep group".format(ovs_br, of_protocol))
907                 cmd_out_lines += (compute_node.run_cmd(ovs_groups_cmd).strip().
908                                   split("\n"))
909     return cmd_out_lines
910
911
912 def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
913     """
914     Gets, as input, a list of compute nodes and a list of OVS bridges
915     and returns the command console output, as a list of lines, that
916     contains all the OVS flows from all bridges and nodes in lists.
917     """
918     cmd_out_lines = []
919     for compute_node in compute_node_list:
920         for ovs_br in ovs_br_list:
921             if ovs_br in compute_node.run_cmd("sudo ovs-vsctl show"):
922                 ovs_flows_cmd = ("sudo ovs-ofctl dump-flows {} -O {} | "
923                                  "grep table=".format(ovs_br, of_protocol))
924                 cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
925                                   split("\n"))
926     return cmd_out_lines
927
928
929 def get_odl_bgp_entity_owner(controllers):
930     """ Finds the ODL owner of the BGP entity in the cluster.
931
932     When ODL runs in clustering mode we need to execute the BGP speaker
933     related commands to that ODL which is the owner of the BGP entity.
934
935     :param controllers: list of OS controllers
936     :return controller: OS controller in which ODL BGP entity owner runs
937     """
938     if len(controllers) == 1:
939         return controllers[0]
940     else:
941         url = ('http://admin:admin@{ip}:8081/restconf/'
942                'operational/entity-owners:entity-owners/entity-type/bgp'
943                .format(ip=controllers[0].ip))
944
945         remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
946                                 'initial/akka.conf')
947         remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
948         local_tmp_akka_conf = '/tmp/akka.conf'
949         try:
950             json_output = requests.get(url).json()
951         except Exception:
952             logger.error('Failed to find the ODL BGP '
953                          'entity owner through REST')
954             return None
955         odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
956
957         for controller in controllers:
958
959             controller.run_cmd('sudo cp {0} /home/heat-admin/'
960                                .format(remote_odl_akka_conf))
961             controller.run_cmd('sudo chmod 777 {0}'
962                                .format(remote_odl_home_akka_conf))
963             controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
964
965             for line in open(local_tmp_akka_conf):
966                 if re.search(odl_bgp_owner, line):
967                     return controller
968         return None
969
970
971 def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
972     json_body = {'input':
973                  {'destination-ip': remote_tep_ip,
974                   'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
975                  }
976     url = ('http://{ip}:8081/restconf/operations/'
977            'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
978     headers = {'Content-type': 'application/yang.data+json',
979                'Accept': 'application/yang.data+json'}
980     try:
981         requests.post(url, data=json.dumps(json_body),
982                       headers=headers,
983                       auth=HTTPBasicAuth('admin', 'admin'))
984     except Exception as e:
985         logger.error("Failed to create external tunnel endpoint on"
986                      " ODL for external tep ip %s with error %s"
987                      % (remote_tep_ip, e))
988     return None
989
990
991 def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
992     url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
993            'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
994     logger.error("url is %s" % url)
995     try:
996         vrf_table = requests.get(url).json()
997         is_ipprefix_exists = False
998         for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
999             if vrf_entry['destPrefix'] == ip_prefix:
1000                 is_ipprefix_exists = True
1001                 break
1002         return is_ipprefix_exists
1003     except Exception as e:
1004         logger.error('Failed to find ip prefix %s with error %s'
1005                      % (ip_prefix, e))
1006     return False